summaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/file_sys/program_metadata.cpp4
-rw-r--r--src/core/file_sys/program_metadata.h4
-rw-r--r--src/core/hle/kernel/process.cpp16
-rw-r--r--src/core/hle/kernel/process.h34
-rw-r--r--src/core/hle/kernel/svc.cpp117
-rw-r--r--src/core/hle/kernel/svc_wrap.h5
-rw-r--r--src/core/hle/kernel/vm_manager.cpp290
-rw-r--r--src/core/hle/kernel/vm_manager.h48
8 files changed, 477 insertions, 41 deletions
diff --git a/src/core/file_sys/program_metadata.cpp b/src/core/file_sys/program_metadata.cpp
index eb76174c5..7310b3602 100644
--- a/src/core/file_sys/program_metadata.cpp
+++ b/src/core/file_sys/program_metadata.cpp
@@ -94,6 +94,10 @@ u64 ProgramMetadata::GetFilesystemPermissions() const {
94 return aci_file_access.permissions; 94 return aci_file_access.permissions;
95} 95}
96 96
97u32 ProgramMetadata::GetSystemResourceSize() const {
98 return npdm_header.system_resource_size;
99}
100
97const ProgramMetadata::KernelCapabilityDescriptors& ProgramMetadata::GetKernelCapabilities() const { 101const ProgramMetadata::KernelCapabilityDescriptors& ProgramMetadata::GetKernelCapabilities() const {
98 return aci_kernel_capabilities; 102 return aci_kernel_capabilities;
99} 103}
diff --git a/src/core/file_sys/program_metadata.h b/src/core/file_sys/program_metadata.h
index 43bf2820a..88ec97d85 100644
--- a/src/core/file_sys/program_metadata.h
+++ b/src/core/file_sys/program_metadata.h
@@ -58,6 +58,7 @@ public:
58 u32 GetMainThreadStackSize() const; 58 u32 GetMainThreadStackSize() const;
59 u64 GetTitleID() const; 59 u64 GetTitleID() const;
60 u64 GetFilesystemPermissions() const; 60 u64 GetFilesystemPermissions() const;
61 u32 GetSystemResourceSize() const;
61 const KernelCapabilityDescriptors& GetKernelCapabilities() const; 62 const KernelCapabilityDescriptors& GetKernelCapabilities() const;
62 63
63 void Print() const; 64 void Print() const;
@@ -76,7 +77,8 @@ private:
76 u8 reserved_3; 77 u8 reserved_3;
77 u8 main_thread_priority; 78 u8 main_thread_priority;
78 u8 main_thread_cpu; 79 u8 main_thread_cpu;
79 std::array<u8, 8> reserved_4; 80 std::array<u8, 4> reserved_4;
81 u32_le system_resource_size;
80 u32_le process_category; 82 u32_le process_category;
81 u32_le main_stack_size; 83 u32_le main_stack_size;
82 std::array<u8, 0x10> application_name; 84 std::array<u8, 0x10> application_name;
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index f45ef05f6..db3ab14ce 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -129,20 +129,17 @@ u64 Process::GetTotalPhysicalMemoryAvailable() const {
129 return vm_manager.GetTotalPhysicalMemoryAvailable(); 129 return vm_manager.GetTotalPhysicalMemoryAvailable();
130} 130}
131 131
132u64 Process::GetTotalPhysicalMemoryAvailableWithoutMmHeap() const { 132u64 Process::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const {
133 // TODO: Subtract the personal heap size from this when the 133 return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize();
134 // personal heap is implemented.
135 return GetTotalPhysicalMemoryAvailable();
136} 134}
137 135
138u64 Process::GetTotalPhysicalMemoryUsed() const { 136u64 Process::GetTotalPhysicalMemoryUsed() const {
139 return vm_manager.GetCurrentHeapSize() + main_thread_stack_size + code_memory_size; 137 return vm_manager.GetCurrentHeapSize() + main_thread_stack_size + code_memory_size +
138 GetSystemResourceUsage();
140} 139}
141 140
142u64 Process::GetTotalPhysicalMemoryUsedWithoutMmHeap() const { 141u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
143 // TODO: Subtract the personal heap size from this when the 142 return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
144 // personal heap is implemented.
145 return GetTotalPhysicalMemoryUsed();
146} 143}
147 144
148void Process::RegisterThread(const Thread* thread) { 145void Process::RegisterThread(const Thread* thread) {
@@ -172,6 +169,7 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) {
172 program_id = metadata.GetTitleID(); 169 program_id = metadata.GetTitleID();
173 ideal_core = metadata.GetMainThreadCore(); 170 ideal_core = metadata.GetMainThreadCore();
174 is_64bit_process = metadata.Is64BitProgram(); 171 is_64bit_process = metadata.Is64BitProgram();
172 system_resource_size = metadata.GetSystemResourceSize();
175 173
176 vm_manager.Reset(metadata.GetAddressSpaceType()); 174 vm_manager.Reset(metadata.GetAddressSpaceType());
177 175
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index 83ea02bee..3196014da 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -168,8 +168,24 @@ public:
168 return capabilities.GetPriorityMask(); 168 return capabilities.GetPriorityMask();
169 } 169 }
170 170
171 u32 IsVirtualMemoryEnabled() const { 171 /// Gets the amount of secure memory to allocate for memory management.
172 return is_virtual_address_memory_enabled; 172 u32 GetSystemResourceSize() const {
173 return system_resource_size;
174 }
175
176 /// Gets the amount of secure memory currently in use for memory management.
177 u32 GetSystemResourceUsage() const {
178 // On hardware, this returns the amount of system resource memory that has
179 // been used by the kernel. This is problematic for Yuzu to emulate, because
180 // system resource memory is used for page tables -- and yuzu doesn't really
181 // have a way to calculate how much memory is required for page tables for
182 // the current process at any given time.
183 // TODO: Is this even worth implementing? Games may retrieve this value via
184 // an SDK function that gets used + available system resource size for debug
185 // or diagnostic purposes. However, it seems unlikely that a game would make
186 // decisions based on how much system memory is dedicated to its page tables.
187 // Is returning a value other than zero wise?
188 return 0;
173 } 189 }
174 190
175 /// Whether this process is an AArch64 or AArch32 process. 191 /// Whether this process is an AArch64 or AArch32 process.
@@ -196,15 +212,15 @@ public:
196 u64 GetTotalPhysicalMemoryAvailable() const; 212 u64 GetTotalPhysicalMemoryAvailable() const;
197 213
198 /// Retrieves the total physical memory available to this process in bytes, 214 /// Retrieves the total physical memory available to this process in bytes,
199 /// without the size of the personal heap added to it. 215 /// without the size of the personal system resource heap added to it.
200 u64 GetTotalPhysicalMemoryAvailableWithoutMmHeap() const; 216 u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource() const;
201 217
202 /// Retrieves the total physical memory used by this process in bytes. 218 /// Retrieves the total physical memory used by this process in bytes.
203 u64 GetTotalPhysicalMemoryUsed() const; 219 u64 GetTotalPhysicalMemoryUsed() const;
204 220
205 /// Retrieves the total physical memory used by this process in bytes, 221 /// Retrieves the total physical memory used by this process in bytes,
206 /// without the size of the personal heap added to it. 222 /// without the size of the personal system resource heap added to it.
207 u64 GetTotalPhysicalMemoryUsedWithoutMmHeap() const; 223 u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const;
208 224
209 /// Gets the list of all threads created with this process as their owner. 225 /// Gets the list of all threads created with this process as their owner.
210 const std::list<const Thread*>& GetThreadList() const { 226 const std::list<const Thread*>& GetThreadList() const {
@@ -298,12 +314,16 @@ private:
298 /// Title ID corresponding to the process 314 /// Title ID corresponding to the process
299 u64 program_id = 0; 315 u64 program_id = 0;
300 316
317 /// Specifies additional memory to be reserved for the process's memory management by the
318 /// system. When this is non-zero, secure memory is allocated and used for page table allocation
319 /// instead of using the normal global page tables/memory block management.
320 u32 system_resource_size = 0;
321
301 /// Resource limit descriptor for this process 322 /// Resource limit descriptor for this process
302 SharedPtr<ResourceLimit> resource_limit; 323 SharedPtr<ResourceLimit> resource_limit;
303 324
304 /// The ideal CPU core for this process, threads are scheduled on this core by default. 325 /// The ideal CPU core for this process, threads are scheduled on this core by default.
305 u8 ideal_core = 0; 326 u8 ideal_core = 0;
306 u32 is_virtual_address_memory_enabled = 0;
307 327
308 /// The Thread Local Storage area is allocated as processes create threads, 328 /// The Thread Local Storage area is allocated as processes create threads,
309 /// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part 329 /// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 58374f829..a46eed3da 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -736,16 +736,16 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
736 StackRegionBaseAddr = 14, 736 StackRegionBaseAddr = 14,
737 StackRegionSize = 15, 737 StackRegionSize = 15,
738 // 3.0.0+ 738 // 3.0.0+
739 IsVirtualAddressMemoryEnabled = 16, 739 SystemResourceSize = 16,
740 PersonalMmHeapUsage = 17, 740 SystemResourceUsage = 17,
741 TitleId = 18, 741 TitleId = 18,
742 // 4.0.0+ 742 // 4.0.0+
743 PrivilegedProcessId = 19, 743 PrivilegedProcessId = 19,
744 // 5.0.0+ 744 // 5.0.0+
745 UserExceptionContextAddr = 20, 745 UserExceptionContextAddr = 20,
746 // 6.0.0+ 746 // 6.0.0+
747 TotalPhysicalMemoryAvailableWithoutMmHeap = 21, 747 TotalPhysicalMemoryAvailableWithoutSystemResource = 21,
748 TotalPhysicalMemoryUsedWithoutMmHeap = 22, 748 TotalPhysicalMemoryUsedWithoutSystemResource = 22,
749 }; 749 };
750 750
751 const auto info_id_type = static_cast<GetInfoType>(info_id); 751 const auto info_id_type = static_cast<GetInfoType>(info_id);
@@ -763,12 +763,12 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
763 case GetInfoType::StackRegionSize: 763 case GetInfoType::StackRegionSize:
764 case GetInfoType::TotalPhysicalMemoryAvailable: 764 case GetInfoType::TotalPhysicalMemoryAvailable:
765 case GetInfoType::TotalPhysicalMemoryUsed: 765 case GetInfoType::TotalPhysicalMemoryUsed:
766 case GetInfoType::IsVirtualAddressMemoryEnabled: 766 case GetInfoType::SystemResourceSize:
767 case GetInfoType::PersonalMmHeapUsage: 767 case GetInfoType::SystemResourceUsage:
768 case GetInfoType::TitleId: 768 case GetInfoType::TitleId:
769 case GetInfoType::UserExceptionContextAddr: 769 case GetInfoType::UserExceptionContextAddr:
770 case GetInfoType::TotalPhysicalMemoryAvailableWithoutMmHeap: 770 case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
771 case GetInfoType::TotalPhysicalMemoryUsedWithoutMmHeap: { 771 case GetInfoType::TotalPhysicalMemoryUsedWithoutSystemResource: {
772 if (info_sub_id != 0) { 772 if (info_sub_id != 0) {
773 return ERR_INVALID_ENUM_VALUE; 773 return ERR_INVALID_ENUM_VALUE;
774 } 774 }
@@ -829,8 +829,13 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
829 *result = process->GetTotalPhysicalMemoryUsed(); 829 *result = process->GetTotalPhysicalMemoryUsed();
830 return RESULT_SUCCESS; 830 return RESULT_SUCCESS;
831 831
832 case GetInfoType::IsVirtualAddressMemoryEnabled: 832 case GetInfoType::SystemResourceSize:
833 *result = process->IsVirtualMemoryEnabled(); 833 *result = process->GetSystemResourceSize();
834 return RESULT_SUCCESS;
835
836 case GetInfoType::SystemResourceUsage:
837 LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query system resource usage");
838 *result = process->GetSystemResourceUsage();
834 return RESULT_SUCCESS; 839 return RESULT_SUCCESS;
835 840
836 case GetInfoType::TitleId: 841 case GetInfoType::TitleId:
@@ -843,12 +848,12 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
843 *result = 0; 848 *result = 0;
844 return RESULT_SUCCESS; 849 return RESULT_SUCCESS;
845 850
846 case GetInfoType::TotalPhysicalMemoryAvailableWithoutMmHeap: 851 case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
847 *result = process->GetTotalPhysicalMemoryAvailable(); 852 *result = process->GetTotalPhysicalMemoryAvailableWithoutSystemResource();
848 return RESULT_SUCCESS; 853 return RESULT_SUCCESS;
849 854
850 case GetInfoType::TotalPhysicalMemoryUsedWithoutMmHeap: 855 case GetInfoType::TotalPhysicalMemoryUsedWithoutSystemResource:
851 *result = process->GetTotalPhysicalMemoryUsedWithoutMmHeap(); 856 *result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource();
852 return RESULT_SUCCESS; 857 return RESULT_SUCCESS;
853 858
854 default: 859 default:
@@ -953,6 +958,86 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
953 } 958 }
954} 959}
955 960
961/// Maps memory at a desired address
962static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
963 LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
964
965 if (!Common::Is4KBAligned(addr)) {
966 LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
967 return ERR_INVALID_ADDRESS;
968 }
969
970 if (!Common::Is4KBAligned(size)) {
971 LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
972 return ERR_INVALID_SIZE;
973 }
974
975 if (size == 0) {
976 LOG_ERROR(Kernel_SVC, "Size is zero");
977 return ERR_INVALID_SIZE;
978 }
979
980 if (!(addr < addr + size)) {
981 LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
982 return ERR_INVALID_MEMORY_RANGE;
983 }
984
985 Process* const current_process = system.Kernel().CurrentProcess();
986 auto& vm_manager = current_process->VMManager();
987
988 if (current_process->GetSystemResourceSize() == 0) {
989 LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
990 return ERR_INVALID_STATE;
991 }
992
993 if (!vm_manager.IsWithinMapRegion(addr, size)) {
994 LOG_ERROR(Kernel_SVC, "Range not within map region");
995 return ERR_INVALID_MEMORY_RANGE;
996 }
997
998 return vm_manager.MapPhysicalMemory(addr, size);
999}
1000
1001/// Unmaps memory previously mapped via MapPhysicalMemory
1002static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) {
1003 LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size);
1004
1005 if (!Common::Is4KBAligned(addr)) {
1006 LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, 0x{:016X}", addr);
1007 return ERR_INVALID_ADDRESS;
1008 }
1009
1010 if (!Common::Is4KBAligned(size)) {
1011 LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:X}", size);
1012 return ERR_INVALID_SIZE;
1013 }
1014
1015 if (size == 0) {
1016 LOG_ERROR(Kernel_SVC, "Size is zero");
1017 return ERR_INVALID_SIZE;
1018 }
1019
1020 if (!(addr < addr + size)) {
1021 LOG_ERROR(Kernel_SVC, "Size causes 64-bit overflow of address");
1022 return ERR_INVALID_MEMORY_RANGE;
1023 }
1024
1025 Process* const current_process = system.Kernel().CurrentProcess();
1026 auto& vm_manager = current_process->VMManager();
1027
1028 if (current_process->GetSystemResourceSize() == 0) {
1029 LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
1030 return ERR_INVALID_STATE;
1031 }
1032
1033 if (!vm_manager.IsWithinMapRegion(addr, size)) {
1034 LOG_ERROR(Kernel_SVC, "Range not within map region");
1035 return ERR_INVALID_MEMORY_RANGE;
1036 }
1037
1038 return vm_manager.UnmapPhysicalMemory(addr, size);
1039}
1040
956/// Sets the thread activity 1041/// Sets the thread activity
957static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 activity) { 1042static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 activity) {
958 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", handle, activity); 1043 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", handle, activity);
@@ -2310,8 +2395,8 @@ static const FunctionDef SVC_Table[] = {
2310 {0x29, SvcWrap<GetInfo>, "GetInfo"}, 2395 {0x29, SvcWrap<GetInfo>, "GetInfo"},
2311 {0x2A, nullptr, "FlushEntireDataCache"}, 2396 {0x2A, nullptr, "FlushEntireDataCache"},
2312 {0x2B, nullptr, "FlushDataCache"}, 2397 {0x2B, nullptr, "FlushDataCache"},
2313 {0x2C, nullptr, "MapPhysicalMemory"}, 2398 {0x2C, SvcWrap<MapPhysicalMemory>, "MapPhysicalMemory"},
2314 {0x2D, nullptr, "UnmapPhysicalMemory"}, 2399 {0x2D, SvcWrap<UnmapPhysicalMemory>, "UnmapPhysicalMemory"},
2315 {0x2E, nullptr, "GetFutureThreadInfo"}, 2400 {0x2E, nullptr, "GetFutureThreadInfo"},
2316 {0x2F, nullptr, "GetLastThreadInfo"}, 2401 {0x2F, nullptr, "GetLastThreadInfo"},
2317 {0x30, SvcWrap<GetResourceLimitLimitValue>, "GetResourceLimitLimitValue"}, 2402 {0x30, SvcWrap<GetResourceLimitLimitValue>, "GetResourceLimitLimitValue"},
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 865473c6f..c2d8d0dc3 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -32,6 +32,11 @@ void SvcWrap(Core::System& system) {
32 FuncReturn(system, func(system, Param(system, 0)).raw); 32 FuncReturn(system, func(system, Param(system, 0)).raw);
33} 33}
34 34
35template <ResultCode func(Core::System&, u64, u64)>
36void SvcWrap(Core::System& system) {
37 FuncReturn(system, func(system, Param(system, 0), Param(system, 1)).raw);
38}
39
35template <ResultCode func(Core::System&, u32)> 40template <ResultCode func(Core::System&, u32)>
36void SvcWrap(Core::System& system) { 41void SvcWrap(Core::System& system) {
37 FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw); 42 FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw);
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index 7bc925a5f..4f45fb03b 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -11,6 +11,8 @@
11#include "core/core.h" 11#include "core/core.h"
12#include "core/file_sys/program_metadata.h" 12#include "core/file_sys/program_metadata.h"
13#include "core/hle/kernel/errors.h" 13#include "core/hle/kernel/errors.h"
14#include "core/hle/kernel/process.h"
15#include "core/hle/kernel/resource_limit.h"
14#include "core/hle/kernel/vm_manager.h" 16#include "core/hle/kernel/vm_manager.h"
15#include "core/memory.h" 17#include "core/memory.h"
16#include "core/memory_setup.h" 18#include "core/memory_setup.h"
@@ -48,10 +50,14 @@ bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
48 type != next.type) { 50 type != next.type) {
49 return false; 51 return false;
50 } 52 }
51 if (type == VMAType::AllocatedMemoryBlock && 53 if ((attribute & MemoryAttribute::DeviceMapped) == MemoryAttribute::DeviceMapped) {
52 (backing_block != next.backing_block || offset + size != next.offset)) { 54 // TODO: Can device mapped memory be merged sanely?
55 // Not merging it may cause inaccuracies versus hardware when memory layout is queried.
53 return false; 56 return false;
54 } 57 }
58 if (type == VMAType::AllocatedMemoryBlock) {
59 return true;
60 }
55 if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) { 61 if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) {
56 return false; 62 return false;
57 } 63 }
@@ -99,7 +105,7 @@ bool VMManager::IsValidHandle(VMAHandle handle) const {
99ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, 105ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
100 std::shared_ptr<std::vector<u8>> block, 106 std::shared_ptr<std::vector<u8>> block,
101 std::size_t offset, u64 size, 107 std::size_t offset, u64 size,
102 MemoryState state) { 108 MemoryState state, VMAPermission perm) {
103 ASSERT(block != nullptr); 109 ASSERT(block != nullptr);
104 ASSERT(offset + size <= block->size()); 110 ASSERT(offset + size <= block->size());
105 111
@@ -109,7 +115,7 @@ ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
109 ASSERT(final_vma.size == size); 115 ASSERT(final_vma.size == size);
110 116
111 final_vma.type = VMAType::AllocatedMemoryBlock; 117 final_vma.type = VMAType::AllocatedMemoryBlock;
112 final_vma.permissions = VMAPermission::ReadWrite; 118 final_vma.permissions = perm;
113 final_vma.state = state; 119 final_vma.state = state;
114 final_vma.backing_block = std::move(block); 120 final_vma.backing_block = std::move(block);
115 final_vma.offset = offset; 121 final_vma.offset = offset;
@@ -288,6 +294,166 @@ ResultVal<VAddr> VMManager::SetHeapSize(u64 size) {
288 return MakeResult<VAddr>(heap_region_base); 294 return MakeResult<VAddr>(heap_region_base);
289} 295}
290 296
297ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) {
298 const auto end_addr = target + size;
299 const auto last_addr = end_addr - 1;
300 VAddr cur_addr = target;
301
302 ResultCode result = RESULT_SUCCESS;
303
304 // Check how much memory we've already mapped.
305 const auto mapped_size_result = SizeOfAllocatedVMAsInRange(target, size);
306 if (mapped_size_result.Failed()) {
307 return mapped_size_result.Code();
308 }
309
310 // If we've already mapped the desired amount, return early.
311 const std::size_t mapped_size = *mapped_size_result;
312 if (mapped_size == size) {
313 return RESULT_SUCCESS;
314 }
315
316 // Check that we can map the memory we want.
317 const auto res_limit = system.CurrentProcess()->GetResourceLimit();
318 const u64 physmem_remaining = res_limit->GetMaxResourceValue(ResourceType::PhysicalMemory) -
319 res_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory);
320 if (physmem_remaining < (size - mapped_size)) {
321 return ERR_RESOURCE_LIMIT_EXCEEDED;
322 }
323
324 // Keep track of the memory regions we unmap.
325 std::vector<std::pair<u64, u64>> mapped_regions;
326
327 // Iterate, trying to map memory.
328 {
329 cur_addr = target;
330
331 auto iter = FindVMA(target);
332 ASSERT_MSG(iter != vma_map.end(), "MapPhysicalMemory iter != end");
333
334 while (true) {
335 const auto& vma = iter->second;
336 const auto vma_start = vma.base;
337 const auto vma_end = vma_start + vma.size;
338 const auto vma_last = vma_end - 1;
339
340 // Map the memory block
341 const auto map_size = std::min(end_addr - cur_addr, vma_end - cur_addr);
342 if (vma.state == MemoryState::Unmapped) {
343 const auto map_res =
344 MapMemoryBlock(cur_addr, std::make_shared<std::vector<u8>>(map_size, 0), 0,
345 map_size, MemoryState::Heap, VMAPermission::ReadWrite);
346 result = map_res.Code();
347 if (result.IsError()) {
348 break;
349 }
350
351 mapped_regions.emplace_back(cur_addr, map_size);
352 }
353
354 // Break once we hit the end of the range.
355 if (last_addr <= vma_last) {
356 break;
357 }
358
359 // Advance to the next block.
360 cur_addr = vma_end;
361 iter = FindVMA(cur_addr);
362 ASSERT_MSG(iter != vma_map.end(), "MapPhysicalMemory iter != end");
363 }
364 }
365
366 // If we failed, unmap memory.
367 if (result.IsError()) {
368 for (const auto [unmap_address, unmap_size] : mapped_regions) {
369 ASSERT_MSG(UnmapRange(unmap_address, unmap_size).IsSuccess(),
370 "MapPhysicalMemory un-map on error");
371 }
372
373 return result;
374 }
375
376 // Update amount of mapped physical memory.
377 physical_memory_mapped += size - mapped_size;
378
379 return RESULT_SUCCESS;
380}
381
382ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) {
383 const auto end_addr = target + size;
384 const auto last_addr = end_addr - 1;
385 VAddr cur_addr = target;
386
387 ResultCode result = RESULT_SUCCESS;
388
389 // Check how much memory is currently mapped.
390 const auto mapped_size_result = SizeOfUnmappablePhysicalMemoryInRange(target, size);
391 if (mapped_size_result.Failed()) {
392 return mapped_size_result.Code();
393 }
394
395 // If we've already unmapped all the memory, return early.
396 const std::size_t mapped_size = *mapped_size_result;
397 if (mapped_size == 0) {
398 return RESULT_SUCCESS;
399 }
400
401 // Keep track of the memory regions we unmap.
402 std::vector<std::pair<u64, u64>> unmapped_regions;
403
404 // Try to unmap regions.
405 {
406 cur_addr = target;
407
408 auto iter = FindVMA(target);
409 ASSERT_MSG(iter != vma_map.end(), "UnmapPhysicalMemory iter != end");
410
411 while (true) {
412 const auto& vma = iter->second;
413 const auto vma_start = vma.base;
414 const auto vma_end = vma_start + vma.size;
415 const auto vma_last = vma_end - 1;
416
417 // Unmap the memory block
418 const auto unmap_size = std::min(end_addr - cur_addr, vma_end - cur_addr);
419 if (vma.state == MemoryState::Heap) {
420 result = UnmapRange(cur_addr, unmap_size);
421 if (result.IsError()) {
422 break;
423 }
424
425 unmapped_regions.emplace_back(cur_addr, unmap_size);
426 }
427
428 // Break once we hit the end of the range.
429 if (last_addr <= vma_last) {
430 break;
431 }
432
433 // Advance to the next block.
434 cur_addr = vma_end;
435 iter = FindVMA(cur_addr);
436 ASSERT_MSG(iter != vma_map.end(), "UnmapPhysicalMemory iter != end");
437 }
438 }
439
440 // If we failed, re-map regions.
441 // TODO: Preserve memory contents?
442 if (result.IsError()) {
443 for (const auto [map_address, map_size] : unmapped_regions) {
444 const auto remap_res =
445 MapMemoryBlock(map_address, std::make_shared<std::vector<u8>>(map_size, 0), 0,
446 map_size, MemoryState::Heap, VMAPermission::None);
447 ASSERT_MSG(remap_res.Succeeded(), "UnmapPhysicalMemory re-map on error");
448 }
449 }
450
451 // Update mapped amount
452 physical_memory_mapped -= mapped_size;
453
454 return RESULT_SUCCESS;
455}
456
291ResultCode VMManager::MapCodeMemory(VAddr dst_address, VAddr src_address, u64 size) { 457ResultCode VMManager::MapCodeMemory(VAddr dst_address, VAddr src_address, u64 size) {
292 constexpr auto ignore_attribute = MemoryAttribute::LockedForIPC | MemoryAttribute::DeviceMapped; 458 constexpr auto ignore_attribute = MemoryAttribute::LockedForIPC | MemoryAttribute::DeviceMapped;
293 const auto src_check_result = CheckRangeState( 459 const auto src_check_result = CheckRangeState(
@@ -435,7 +601,7 @@ ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, Mem
435 // Protect mirror with permissions from old region 601 // Protect mirror with permissions from old region
436 Reprotect(new_vma, vma->second.permissions); 602 Reprotect(new_vma, vma->second.permissions);
437 // Remove permissions from old region 603 // Remove permissions from old region
438 Reprotect(vma, VMAPermission::None); 604 ReprotectRange(src_addr, size, VMAPermission::None);
439 605
440 return RESULT_SUCCESS; 606 return RESULT_SUCCESS;
441} 607}
@@ -568,14 +734,14 @@ VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) {
568VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) { 734VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) {
569 const VMAIter next_vma = std::next(iter); 735 const VMAIter next_vma = std::next(iter);
570 if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) { 736 if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) {
571 iter->second.size += next_vma->second.size; 737 MergeAdjacentVMA(iter->second, next_vma->second);
572 vma_map.erase(next_vma); 738 vma_map.erase(next_vma);
573 } 739 }
574 740
575 if (iter != vma_map.begin()) { 741 if (iter != vma_map.begin()) {
576 VMAIter prev_vma = std::prev(iter); 742 VMAIter prev_vma = std::prev(iter);
577 if (prev_vma->second.CanBeMergedWith(iter->second)) { 743 if (prev_vma->second.CanBeMergedWith(iter->second)) {
578 prev_vma->second.size += iter->second.size; 744 MergeAdjacentVMA(prev_vma->second, iter->second);
579 vma_map.erase(iter); 745 vma_map.erase(iter);
580 iter = prev_vma; 746 iter = prev_vma;
581 } 747 }
@@ -584,6 +750,38 @@ VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) {
584 return iter; 750 return iter;
585} 751}
586 752
753void VMManager::MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryArea& right) {
754 ASSERT(left.CanBeMergedWith(right));
755
756 // Always merge allocated memory blocks, even when they don't share the same backing block.
757 if (left.type == VMAType::AllocatedMemoryBlock &&
758 (left.backing_block != right.backing_block || left.offset + left.size != right.offset)) {
759 // Check if we can save work.
760 if (left.offset == 0 && left.size == left.backing_block->size()) {
761 // Fast case: left is an entire backing block.
762 left.backing_block->insert(left.backing_block->end(),
763 right.backing_block->begin() + right.offset,
764 right.backing_block->begin() + right.offset + right.size);
765 } else {
766 // Slow case: make a new memory block for left and right.
767 auto new_memory = std::make_shared<std::vector<u8>>();
768 new_memory->insert(new_memory->end(), left.backing_block->begin() + left.offset,
769 left.backing_block->begin() + left.offset + left.size);
770 new_memory->insert(new_memory->end(), right.backing_block->begin() + right.offset,
771 right.backing_block->begin() + right.offset + right.size);
772 left.backing_block = new_memory;
773 left.offset = 0;
774 }
775
776 // Page table update is needed, because backing memory changed.
777 left.size += right.size;
778 UpdatePageTableForVMA(left);
779 } else {
780 // Just update the size.
781 left.size += right.size;
782 }
783}
784
587void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { 785void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
588 switch (vma.type) { 786 switch (vma.type) {
589 case VMAType::Free: 787 case VMAType::Free:
@@ -758,6 +956,84 @@ VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, Memo
758 std::make_tuple(initial_state, initial_permissions, initial_attributes & ~ignore_mask)); 956 std::make_tuple(initial_state, initial_permissions, initial_attributes & ~ignore_mask));
759} 957}
760 958
959ResultVal<std::size_t> VMManager::SizeOfAllocatedVMAsInRange(VAddr address,
960 std::size_t size) const {
961 const VAddr end_addr = address + size;
962 const VAddr last_addr = end_addr - 1;
963 std::size_t mapped_size = 0;
964
965 VAddr cur_addr = address;
966 auto iter = FindVMA(cur_addr);
967 ASSERT_MSG(iter != vma_map.end(), "SizeOfAllocatedVMAsInRange iter != end");
968
969 while (true) {
970 const auto& vma = iter->second;
971 const VAddr vma_start = vma.base;
972 const VAddr vma_end = vma_start + vma.size;
973 const VAddr vma_last = vma_end - 1;
974
975 // Add size if relevant.
976 if (vma.state != MemoryState::Unmapped) {
977 mapped_size += std::min(end_addr - cur_addr, vma_end - cur_addr);
978 }
979
980 // Break once we hit the end of the range.
981 if (last_addr <= vma_last) {
982 break;
983 }
984
985 // Advance to the next block.
986 cur_addr = vma_end;
987 iter = std::next(iter);
988 ASSERT_MSG(iter != vma_map.end(), "SizeOfAllocatedVMAsInRange iter != end");
989 }
990
991 return MakeResult(mapped_size);
992}
993
994ResultVal<std::size_t> VMManager::SizeOfUnmappablePhysicalMemoryInRange(VAddr address,
995 std::size_t size) const {
996 const VAddr end_addr = address + size;
997 const VAddr last_addr = end_addr - 1;
998 std::size_t mapped_size = 0;
999
1000 VAddr cur_addr = address;
1001 auto iter = FindVMA(cur_addr);
1002 ASSERT_MSG(iter != vma_map.end(), "SizeOfUnmappablePhysicalMemoryInRange iter != end");
1003
1004 while (true) {
1005 const auto& vma = iter->second;
1006 const auto vma_start = vma.base;
1007 const auto vma_end = vma_start + vma.size;
1008 const auto vma_last = vma_end - 1;
1009 const auto state = vma.state;
1010 const auto attr = vma.attribute;
1011
1012 // Memory within region must be free or mapped heap.
1013 if (!((state == MemoryState::Heap && attr == MemoryAttribute::None) ||
1014 (state == MemoryState::Unmapped))) {
1015 return ERR_INVALID_ADDRESS_STATE;
1016 }
1017
1018 // Add size if relevant.
1019 if (state != MemoryState::Unmapped) {
1020 mapped_size += std::min(end_addr - cur_addr, vma_end - cur_addr);
1021 }
1022
1023 // Break once we hit the end of the range.
1024 if (last_addr <= vma_last) {
1025 break;
1026 }
1027
1028 // Advance to the next block.
1029 cur_addr = vma_end;
1030 iter = std::next(iter);
1031 ASSERT_MSG(iter != vma_map.end(), "SizeOfUnmappablePhysicalMemoryInRange iter != end");
1032 }
1033
1034 return MakeResult(mapped_size);
1035}
1036
761u64 VMManager::GetTotalPhysicalMemoryAvailable() const { 1037u64 VMManager::GetTotalPhysicalMemoryAvailable() const {
762 LOG_WARNING(Kernel, "(STUBBED) called"); 1038 LOG_WARNING(Kernel, "(STUBBED) called");
763 return 0xF8000000; 1039 return 0xF8000000;
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 9fe6ac3f4..0aecb7499 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -349,7 +349,8 @@ public:
349 * @param state MemoryState tag to attach to the VMA. 349 * @param state MemoryState tag to attach to the VMA.
350 */ 350 */
351 ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block, 351 ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block,
352 std::size_t offset, u64 size, MemoryState state); 352 std::size_t offset, u64 size, MemoryState state,
353 VMAPermission perm = VMAPermission::ReadWrite);
353 354
354 /** 355 /**
355 * Maps an unmanaged host memory pointer at a given address. 356 * Maps an unmanaged host memory pointer at a given address.
@@ -450,6 +451,34 @@ public:
450 /// 451 ///
451 ResultVal<VAddr> SetHeapSize(u64 size); 452 ResultVal<VAddr> SetHeapSize(u64 size);
452 453
454 /// Maps memory at a given address.
455 ///
456 /// @param addr The virtual address to map memory at.
457 /// @param size The amount of memory to map.
458 ///
459 /// @note The destination address must lie within the Map region.
460 ///
461 /// @note This function requires that SystemResourceSize be non-zero,
462 /// however, this is just because if it were not then the
463 /// resulting page tables could be exploited on hardware by
464 /// a malicious program. SystemResource usage does not need
465 /// to be explicitly checked or updated here.
466 ResultCode MapPhysicalMemory(VAddr target, u64 size);
467
468 /// Unmaps memory at a given address.
469 ///
470 /// @param addr The virtual address to unmap memory at.
471 /// @param size The amount of memory to unmap.
472 ///
473 /// @note The destination address must lie within the Map region.
474 ///
475 /// @note This function requires that SystemResourceSize be non-zero,
476 /// however, this is just because if it were not then the
477 /// resulting page tables could be exploited on hardware by
478 /// a malicious program. SystemResource usage does not need
479 /// to be explicitly checked or updated here.
480 ResultCode UnmapPhysicalMemory(VAddr target, u64 size);
481
453 /// Maps a region of memory as code memory. 482 /// Maps a region of memory as code memory.
454 /// 483 ///
455 /// @param dst_address The base address of the region to create the aliasing memory region. 484 /// @param dst_address The base address of the region to create the aliasing memory region.
@@ -657,6 +686,11 @@ private:
657 */ 686 */
658 VMAIter MergeAdjacent(VMAIter vma); 687 VMAIter MergeAdjacent(VMAIter vma);
659 688
689 /**
690 * Merges two adjacent VMAs.
691 */
692 void MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryArea& right);
693
660 /// Updates the pages corresponding to this VMA so they match the VMA's attributes. 694 /// Updates the pages corresponding to this VMA so they match the VMA's attributes.
661 void UpdatePageTableForVMA(const VirtualMemoryArea& vma); 695 void UpdatePageTableForVMA(const VirtualMemoryArea& vma);
662 696
@@ -701,6 +735,13 @@ private:
701 MemoryAttribute attribute_mask, MemoryAttribute attribute, 735 MemoryAttribute attribute_mask, MemoryAttribute attribute,
702 MemoryAttribute ignore_mask) const; 736 MemoryAttribute ignore_mask) const;
703 737
738 /// Gets the amount of memory currently mapped (state != Unmapped) in a range.
739 ResultVal<std::size_t> SizeOfAllocatedVMAsInRange(VAddr address, std::size_t size) const;
740
741 /// Gets the amount of memory unmappable by UnmapPhysicalMemory in a range.
742 ResultVal<std::size_t> SizeOfUnmappablePhysicalMemoryInRange(VAddr address,
743 std::size_t size) const;
744
704 /** 745 /**
705 * A map covering the entirety of the managed address space, keyed by the `base` field of each 746 * A map covering the entirety of the managed address space, keyed by the `base` field of each
706 * VMA. It must always be modified by splitting or merging VMAs, so that the invariant 747 * VMA. It must always be modified by splitting or merging VMAs, so that the invariant
@@ -742,6 +783,11 @@ private:
742 // end of the range. This is essentially 'base_address + current_size'. 783 // end of the range. This is essentially 'base_address + current_size'.
743 VAddr heap_end = 0; 784 VAddr heap_end = 0;
744 785
786 // The current amount of memory mapped via MapPhysicalMemory.
787 // This is used here (and in Nintendo's kernel) only for debugging, and does not impact
788 // any behavior.
789 u64 physical_memory_mapped = 0;
790
745 Core::System& system; 791 Core::System& system;
746}; 792};
747} // namespace Kernel 793} // namespace Kernel