diff options
| -rw-r--r-- | src/core/CMakeLists.txt | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/vm_manager.cpp | 1175 | ||||
| -rw-r--r-- | src/core/hle/kernel/vm_manager.h | 796 |
3 files changed, 0 insertions, 1973 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 4ca68a309..8546d3602 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -209,8 +209,6 @@ add_library(core STATIC | |||
| 209 | hle/kernel/time_manager.h | 209 | hle/kernel/time_manager.h |
| 210 | hle/kernel/transfer_memory.cpp | 210 | hle/kernel/transfer_memory.cpp |
| 211 | hle/kernel/transfer_memory.h | 211 | hle/kernel/transfer_memory.h |
| 212 | hle/kernel/vm_manager.cpp | ||
| 213 | hle/kernel/vm_manager.h | ||
| 214 | hle/kernel/writable_event.cpp | 212 | hle/kernel/writable_event.cpp |
| 215 | hle/kernel/writable_event.h | 213 | hle/kernel/writable_event.h |
| 216 | hle/lock.cpp | 214 | hle/lock.cpp |
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp deleted file mode 100644 index 024c22901..000000000 --- a/src/core/hle/kernel/vm_manager.cpp +++ /dev/null | |||
| @@ -1,1175 +0,0 @@ | |||
| 1 | // Copyright 2015 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <algorithm> | ||
| 6 | #include <cstring> | ||
| 7 | #include <iterator> | ||
| 8 | #include <utility> | ||
| 9 | #include "common/alignment.h" | ||
| 10 | #include "common/assert.h" | ||
| 11 | #include "common/logging/log.h" | ||
| 12 | #include "common/memory_hook.h" | ||
| 13 | #include "core/core.h" | ||
| 14 | #include "core/file_sys/program_metadata.h" | ||
| 15 | #include "core/hle/kernel/errors.h" | ||
| 16 | #include "core/hle/kernel/process.h" | ||
| 17 | #include "core/hle/kernel/resource_limit.h" | ||
| 18 | #include "core/hle/kernel/vm_manager.h" | ||
| 19 | #include "core/memory.h" | ||
| 20 | |||
| 21 | namespace Kernel { | ||
| 22 | namespace { | ||
| 23 | const char* GetMemoryStateName(MemoryState state) { | ||
| 24 | static constexpr const char* names[] = { | ||
| 25 | "Unmapped", "Io", | ||
| 26 | "Normal", "Code", | ||
| 27 | "CodeData", "Heap", | ||
| 28 | "Shared", "Unknown1", | ||
| 29 | "ModuleCode", "ModuleCodeData", | ||
| 30 | "IpcBuffer0", "Stack", | ||
| 31 | "ThreadLocal", "TransferMemoryIsolated", | ||
| 32 | "TransferMemory", "ProcessMemory", | ||
| 33 | "Inaccessible", "IpcBuffer1", | ||
| 34 | "IpcBuffer3", "KernelStack", | ||
| 35 | }; | ||
| 36 | |||
| 37 | return names[ToSvcMemoryState(state)]; | ||
| 38 | } | ||
| 39 | |||
| 40 | // Checks if a given address range lies within a larger address range. | ||
| 41 | constexpr bool IsInsideAddressRange(VAddr address, u64 size, VAddr address_range_begin, | ||
| 42 | VAddr address_range_end) { | ||
| 43 | const VAddr end_address = address + size - 1; | ||
| 44 | return address_range_begin <= address && end_address <= address_range_end - 1; | ||
| 45 | } | ||
| 46 | } // Anonymous namespace | ||
| 47 | |||
| 48 | bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const { | ||
| 49 | ASSERT(base + size == next.base); | ||
| 50 | if (permissions != next.permissions || state != next.state || attribute != next.attribute || | ||
| 51 | type != next.type) { | ||
| 52 | return false; | ||
| 53 | } | ||
| 54 | if ((attribute & MemoryAttribute::DeviceMapped) == MemoryAttribute::DeviceMapped) { | ||
| 55 | // TODO: Can device mapped memory be merged sanely? | ||
| 56 | // Not merging it may cause inaccuracies versus hardware when memory layout is queried. | ||
| 57 | return false; | ||
| 58 | } | ||
| 59 | if (type == VMAType::AllocatedMemoryBlock) { | ||
| 60 | return true; | ||
| 61 | } | ||
| 62 | if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) { | ||
| 63 | return false; | ||
| 64 | } | ||
| 65 | if (type == VMAType::MMIO && paddr + size != next.paddr) { | ||
| 66 | return false; | ||
| 67 | } | ||
| 68 | return true; | ||
| 69 | } | ||
| 70 | |||
| 71 | VMManager::VMManager(Core::System& system) : system{system} { | ||
| 72 | // Default to assuming a 39-bit address space. This way we have a sane | ||
| 73 | // starting point with executables that don't provide metadata. | ||
| 74 | Reset(FileSys::ProgramAddressSpaceType::Is39Bit); | ||
| 75 | } | ||
| 76 | |||
| 77 | VMManager::~VMManager() = default; | ||
| 78 | |||
| 79 | void VMManager::Reset(FileSys::ProgramAddressSpaceType type) { | ||
| 80 | Clear(); | ||
| 81 | |||
| 82 | InitializeMemoryRegionRanges(type); | ||
| 83 | |||
| 84 | page_table.Resize(address_space_width); | ||
| 85 | |||
| 86 | // Initialize the map with a single free region covering the entire managed space. | ||
| 87 | VirtualMemoryArea initial_vma; | ||
| 88 | initial_vma.size = address_space_end; | ||
| 89 | vma_map.emplace(initial_vma.base, initial_vma); | ||
| 90 | |||
| 91 | UpdatePageTableForVMA(initial_vma); | ||
| 92 | } | ||
| 93 | |||
| 94 | VMManager::VMAHandle VMManager::FindVMA(VAddr target) const { | ||
| 95 | if (target >= address_space_end) { | ||
| 96 | return vma_map.end(); | ||
| 97 | } else { | ||
| 98 | return std::prev(vma_map.upper_bound(target)); | ||
| 99 | } | ||
| 100 | } | ||
| 101 | |||
| 102 | bool VMManager::IsValidHandle(VMAHandle handle) const { | ||
| 103 | return handle != vma_map.cend(); | ||
| 104 | } | ||
| 105 | |||
| 106 | ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, | ||
| 107 | std::shared_ptr<PhysicalMemory> block, | ||
| 108 | std::size_t offset, u64 size, | ||
| 109 | MemoryState state, VMAPermission perm) { | ||
| 110 | ASSERT(block != nullptr); | ||
| 111 | ASSERT(offset + size <= block->size()); | ||
| 112 | |||
| 113 | // This is the appropriately sized VMA that will turn into our allocation. | ||
| 114 | CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); | ||
| 115 | VirtualMemoryArea& final_vma = vma_handle->second; | ||
| 116 | ASSERT(final_vma.size == size); | ||
| 117 | |||
| 118 | final_vma.type = VMAType::AllocatedMemoryBlock; | ||
| 119 | final_vma.permissions = perm; | ||
| 120 | final_vma.state = state; | ||
| 121 | final_vma.backing_block = std::move(block); | ||
| 122 | final_vma.offset = offset; | ||
| 123 | UpdatePageTableForVMA(final_vma); | ||
| 124 | |||
| 125 | return MakeResult<VMAHandle>(MergeAdjacent(vma_handle)); | ||
| 126 | } | ||
| 127 | |||
| 128 | ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* memory, u64 size, | ||
| 129 | MemoryState state) { | ||
| 130 | ASSERT(memory != nullptr); | ||
| 131 | |||
| 132 | // This is the appropriately sized VMA that will turn into our allocation. | ||
| 133 | CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); | ||
| 134 | VirtualMemoryArea& final_vma = vma_handle->second; | ||
| 135 | ASSERT(final_vma.size == size); | ||
| 136 | |||
| 137 | final_vma.type = VMAType::BackingMemory; | ||
| 138 | final_vma.permissions = VMAPermission::ReadWrite; | ||
| 139 | final_vma.state = state; | ||
| 140 | final_vma.backing_memory = memory; | ||
| 141 | UpdatePageTableForVMA(final_vma); | ||
| 142 | |||
| 143 | return MakeResult<VMAHandle>(MergeAdjacent(vma_handle)); | ||
| 144 | } | ||
| 145 | |||
| 146 | ResultVal<VAddr> VMManager::FindFreeRegion(u64 size) const { | ||
| 147 | return FindFreeRegion(GetASLRRegionBaseAddress(), GetASLRRegionEndAddress(), size); | ||
| 148 | } | ||
| 149 | |||
| 150 | ResultVal<VAddr> VMManager::FindFreeRegion(VAddr begin, VAddr end, u64 size) const { | ||
| 151 | ASSERT(begin < end); | ||
| 152 | ASSERT(size <= end - begin); | ||
| 153 | |||
| 154 | const VMAHandle vma_handle = | ||
| 155 | std::find_if(vma_map.begin(), vma_map.end(), [begin, end, size](const auto& vma) { | ||
| 156 | if (vma.second.type != VMAType::Free) { | ||
| 157 | return false; | ||
| 158 | } | ||
| 159 | const VAddr vma_base = vma.second.base; | ||
| 160 | const VAddr vma_end = vma_base + vma.second.size; | ||
| 161 | const VAddr assumed_base = (begin < vma_base) ? vma_base : begin; | ||
| 162 | const VAddr used_range = assumed_base + size; | ||
| 163 | |||
| 164 | return vma_base <= assumed_base && assumed_base < used_range && used_range < end && | ||
| 165 | used_range <= vma_end; | ||
| 166 | }); | ||
| 167 | |||
| 168 | if (vma_handle == vma_map.cend()) { | ||
| 169 | // TODO(Subv): Find the correct error code here. | ||
| 170 | return RESULT_UNKNOWN; | ||
| 171 | } | ||
| 172 | |||
| 173 | const VAddr target = std::max(begin, vma_handle->second.base); | ||
| 174 | return MakeResult<VAddr>(target); | ||
| 175 | } | ||
| 176 | |||
| 177 | ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size, | ||
| 178 | MemoryState state, | ||
| 179 | Common::MemoryHookPointer mmio_handler) { | ||
| 180 | // This is the appropriately sized VMA that will turn into our allocation. | ||
| 181 | CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); | ||
| 182 | VirtualMemoryArea& final_vma = vma_handle->second; | ||
| 183 | ASSERT(final_vma.size == size); | ||
| 184 | |||
| 185 | final_vma.type = VMAType::MMIO; | ||
| 186 | final_vma.permissions = VMAPermission::ReadWrite; | ||
| 187 | final_vma.state = state; | ||
| 188 | final_vma.paddr = paddr; | ||
| 189 | final_vma.mmio_handler = std::move(mmio_handler); | ||
| 190 | UpdatePageTableForVMA(final_vma); | ||
| 191 | |||
| 192 | return MakeResult<VMAHandle>(MergeAdjacent(vma_handle)); | ||
| 193 | } | ||
| 194 | |||
| 195 | VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) { | ||
| 196 | VirtualMemoryArea& vma = vma_handle->second; | ||
| 197 | vma.type = VMAType::Free; | ||
| 198 | vma.permissions = VMAPermission::None; | ||
| 199 | vma.state = MemoryState::Unmapped; | ||
| 200 | vma.attribute = MemoryAttribute::None; | ||
| 201 | |||
| 202 | vma.backing_block = nullptr; | ||
| 203 | vma.offset = 0; | ||
| 204 | vma.backing_memory = nullptr; | ||
| 205 | vma.paddr = 0; | ||
| 206 | |||
| 207 | UpdatePageTableForVMA(vma); | ||
| 208 | |||
| 209 | return MergeAdjacent(vma_handle); | ||
| 210 | } | ||
| 211 | |||
| 212 | ResultCode VMManager::UnmapRange(VAddr target, u64 size) { | ||
| 213 | CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size)); | ||
| 214 | const VAddr target_end = target + size; | ||
| 215 | |||
| 216 | const VMAIter end = vma_map.end(); | ||
| 217 | // The comparison against the end of the range must be done using addresses since VMAs can be | ||
| 218 | // merged during this process, causing invalidation of the iterators. | ||
| 219 | while (vma != end && vma->second.base < target_end) { | ||
| 220 | vma = std::next(Unmap(vma)); | ||
| 221 | } | ||
| 222 | |||
| 223 | ASSERT(FindVMA(target)->second.size >= size); | ||
| 224 | |||
| 225 | return RESULT_SUCCESS; | ||
| 226 | } | ||
| 227 | |||
| 228 | VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) { | ||
| 229 | VMAIter iter = StripIterConstness(vma_handle); | ||
| 230 | |||
| 231 | VirtualMemoryArea& vma = iter->second; | ||
| 232 | vma.permissions = new_perms; | ||
| 233 | UpdatePageTableForVMA(vma); | ||
| 234 | |||
| 235 | return MergeAdjacent(iter); | ||
| 236 | } | ||
| 237 | |||
| 238 | ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_perms) { | ||
| 239 | CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size)); | ||
| 240 | const VAddr target_end = target + size; | ||
| 241 | |||
| 242 | const VMAIter end = vma_map.end(); | ||
| 243 | // The comparison against the end of the range must be done using addresses since VMAs can be | ||
| 244 | // merged during this process, causing invalidation of the iterators. | ||
| 245 | while (vma != end && vma->second.base < target_end) { | ||
| 246 | vma = std::next(StripIterConstness(Reprotect(vma, new_perms))); | ||
| 247 | } | ||
| 248 | |||
| 249 | return RESULT_SUCCESS; | ||
| 250 | } | ||
| 251 | |||
| 252 | ResultVal<VAddr> VMManager::SetHeapSize(u64 size) { | ||
| 253 | if (size > GetHeapRegionSize()) { | ||
| 254 | return ERR_OUT_OF_MEMORY; | ||
| 255 | } | ||
| 256 | |||
| 257 | // No need to do any additional work if the heap is already the given size. | ||
| 258 | if (size == GetCurrentHeapSize()) { | ||
| 259 | return MakeResult(heap_region_base); | ||
| 260 | } | ||
| 261 | |||
| 262 | if (heap_memory == nullptr) { | ||
| 263 | // Initialize heap | ||
| 264 | heap_memory = std::make_shared<PhysicalMemory>(size); | ||
| 265 | heap_end = heap_region_base + size; | ||
| 266 | } else { | ||
| 267 | UnmapRange(heap_region_base, GetCurrentHeapSize()); | ||
| 268 | } | ||
| 269 | |||
| 270 | // If necessary, expand backing vector to cover new heap extents in | ||
| 271 | // the case of allocating. Otherwise, shrink the backing memory, | ||
| 272 | // if a smaller heap has been requested. | ||
| 273 | heap_memory->resize(size); | ||
| 274 | heap_memory->shrink_to_fit(); | ||
| 275 | RefreshMemoryBlockMappings(heap_memory.get()); | ||
| 276 | |||
| 277 | heap_end = heap_region_base + size; | ||
| 278 | ASSERT(GetCurrentHeapSize() == heap_memory->size()); | ||
| 279 | |||
| 280 | const auto mapping_result = | ||
| 281 | MapMemoryBlock(heap_region_base, heap_memory, 0, size, MemoryState::Heap); | ||
| 282 | if (mapping_result.Failed()) { | ||
| 283 | return mapping_result.Code(); | ||
| 284 | } | ||
| 285 | |||
| 286 | return MakeResult<VAddr>(heap_region_base); | ||
| 287 | } | ||
| 288 | |||
| 289 | ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) { | ||
| 290 | // Check how much memory we've already mapped. | ||
| 291 | const auto mapped_size_result = SizeOfAllocatedVMAsInRange(target, size); | ||
| 292 | if (mapped_size_result.Failed()) { | ||
| 293 | return mapped_size_result.Code(); | ||
| 294 | } | ||
| 295 | |||
| 296 | // If we've already mapped the desired amount, return early. | ||
| 297 | const std::size_t mapped_size = *mapped_size_result; | ||
| 298 | if (mapped_size == size) { | ||
| 299 | return RESULT_SUCCESS; | ||
| 300 | } | ||
| 301 | |||
| 302 | // Check that we can map the memory we want. | ||
| 303 | const auto res_limit = system.CurrentProcess()->GetResourceLimit(); | ||
| 304 | const u64 physmem_remaining = res_limit->GetMaxResourceValue(ResourceType::PhysicalMemory) - | ||
| 305 | res_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory); | ||
| 306 | if (physmem_remaining < (size - mapped_size)) { | ||
| 307 | return ERR_RESOURCE_LIMIT_EXCEEDED; | ||
| 308 | } | ||
| 309 | |||
| 310 | // Keep track of the memory regions we unmap. | ||
| 311 | std::vector<std::pair<u64, u64>> mapped_regions; | ||
| 312 | ResultCode result = RESULT_SUCCESS; | ||
| 313 | |||
| 314 | // Iterate, trying to map memory. | ||
| 315 | { | ||
| 316 | const auto end_addr = target + size; | ||
| 317 | const auto last_addr = end_addr - 1; | ||
| 318 | VAddr cur_addr = target; | ||
| 319 | |||
| 320 | auto iter = FindVMA(target); | ||
| 321 | ASSERT(iter != vma_map.end()); | ||
| 322 | |||
| 323 | while (true) { | ||
| 324 | const auto& vma = iter->second; | ||
| 325 | const auto vma_start = vma.base; | ||
| 326 | const auto vma_end = vma_start + vma.size; | ||
| 327 | const auto vma_last = vma_end - 1; | ||
| 328 | |||
| 329 | // Map the memory block | ||
| 330 | const auto map_size = std::min(end_addr - cur_addr, vma_end - cur_addr); | ||
| 331 | if (vma.state == MemoryState::Unmapped) { | ||
| 332 | const auto map_res = | ||
| 333 | MapMemoryBlock(cur_addr, std::make_shared<PhysicalMemory>(map_size), 0, | ||
| 334 | map_size, MemoryState::Heap, VMAPermission::ReadWrite); | ||
| 335 | result = map_res.Code(); | ||
| 336 | if (result.IsError()) { | ||
| 337 | break; | ||
| 338 | } | ||
| 339 | |||
| 340 | mapped_regions.emplace_back(cur_addr, map_size); | ||
| 341 | } | ||
| 342 | |||
| 343 | // Break once we hit the end of the range. | ||
| 344 | if (last_addr <= vma_last) { | ||
| 345 | break; | ||
| 346 | } | ||
| 347 | |||
| 348 | // Advance to the next block. | ||
| 349 | cur_addr = vma_end; | ||
| 350 | iter = FindVMA(cur_addr); | ||
| 351 | ASSERT(iter != vma_map.end()); | ||
| 352 | } | ||
| 353 | } | ||
| 354 | |||
| 355 | // If we failed, unmap memory. | ||
| 356 | if (result.IsError()) { | ||
| 357 | for (const auto [unmap_address, unmap_size] : mapped_regions) { | ||
| 358 | ASSERT_MSG(UnmapRange(unmap_address, unmap_size).IsSuccess(), | ||
| 359 | "Failed to unmap memory range."); | ||
| 360 | } | ||
| 361 | |||
| 362 | return result; | ||
| 363 | } | ||
| 364 | |||
| 365 | // Update amount of mapped physical memory. | ||
| 366 | physical_memory_mapped += size - mapped_size; | ||
| 367 | |||
| 368 | return RESULT_SUCCESS; | ||
| 369 | } | ||
| 370 | |||
| 371 | ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) { | ||
| 372 | // Check how much memory is currently mapped. | ||
| 373 | const auto mapped_size_result = SizeOfUnmappablePhysicalMemoryInRange(target, size); | ||
| 374 | if (mapped_size_result.Failed()) { | ||
| 375 | return mapped_size_result.Code(); | ||
| 376 | } | ||
| 377 | |||
| 378 | // If we've already unmapped all the memory, return early. | ||
| 379 | const std::size_t mapped_size = *mapped_size_result; | ||
| 380 | if (mapped_size == 0) { | ||
| 381 | return RESULT_SUCCESS; | ||
| 382 | } | ||
| 383 | |||
| 384 | // Keep track of the memory regions we unmap. | ||
| 385 | std::vector<std::pair<u64, u64>> unmapped_regions; | ||
| 386 | ResultCode result = RESULT_SUCCESS; | ||
| 387 | |||
| 388 | // Try to unmap regions. | ||
| 389 | { | ||
| 390 | const auto end_addr = target + size; | ||
| 391 | const auto last_addr = end_addr - 1; | ||
| 392 | VAddr cur_addr = target; | ||
| 393 | |||
| 394 | auto iter = FindVMA(target); | ||
| 395 | ASSERT(iter != vma_map.end()); | ||
| 396 | |||
| 397 | while (true) { | ||
| 398 | const auto& vma = iter->second; | ||
| 399 | const auto vma_start = vma.base; | ||
| 400 | const auto vma_end = vma_start + vma.size; | ||
| 401 | const auto vma_last = vma_end - 1; | ||
| 402 | |||
| 403 | // Unmap the memory block | ||
| 404 | const auto unmap_size = std::min(end_addr - cur_addr, vma_end - cur_addr); | ||
| 405 | if (vma.state == MemoryState::Heap) { | ||
| 406 | result = UnmapRange(cur_addr, unmap_size); | ||
| 407 | if (result.IsError()) { | ||
| 408 | break; | ||
| 409 | } | ||
| 410 | |||
| 411 | unmapped_regions.emplace_back(cur_addr, unmap_size); | ||
| 412 | } | ||
| 413 | |||
| 414 | // Break once we hit the end of the range. | ||
| 415 | if (last_addr <= vma_last) { | ||
| 416 | break; | ||
| 417 | } | ||
| 418 | |||
| 419 | // Advance to the next block. | ||
| 420 | cur_addr = vma_end; | ||
| 421 | iter = FindVMA(cur_addr); | ||
| 422 | ASSERT(iter != vma_map.end()); | ||
| 423 | } | ||
| 424 | } | ||
| 425 | |||
| 426 | // If we failed, re-map regions. | ||
| 427 | // TODO: Preserve memory contents? | ||
| 428 | if (result.IsError()) { | ||
| 429 | for (const auto [map_address, map_size] : unmapped_regions) { | ||
| 430 | const auto remap_res = | ||
| 431 | MapMemoryBlock(map_address, std::make_shared<PhysicalMemory>(map_size), 0, map_size, | ||
| 432 | MemoryState::Heap, VMAPermission::None); | ||
| 433 | ASSERT_MSG(remap_res.Succeeded(), "Failed to remap a memory block."); | ||
| 434 | } | ||
| 435 | |||
| 436 | return result; | ||
| 437 | } | ||
| 438 | |||
| 439 | // Update mapped amount | ||
| 440 | physical_memory_mapped -= mapped_size; | ||
| 441 | |||
| 442 | return RESULT_SUCCESS; | ||
| 443 | } | ||
| 444 | |||
| 445 | ResultCode VMManager::MapCodeMemory(VAddr dst_address, VAddr src_address, u64 size) { | ||
| 446 | constexpr auto ignore_attribute = MemoryAttribute::LockedForIPC | MemoryAttribute::DeviceMapped; | ||
| 447 | const auto src_check_result = CheckRangeState( | ||
| 448 | src_address, size, MemoryState::All, MemoryState::Heap, VMAPermission::All, | ||
| 449 | VMAPermission::ReadWrite, MemoryAttribute::Mask, MemoryAttribute::None, ignore_attribute); | ||
| 450 | |||
| 451 | if (src_check_result.Failed()) { | ||
| 452 | return src_check_result.Code(); | ||
| 453 | } | ||
| 454 | |||
| 455 | const auto mirror_result = | ||
| 456 | MirrorMemory(dst_address, src_address, size, MemoryState::ModuleCode); | ||
| 457 | if (mirror_result.IsError()) { | ||
| 458 | return mirror_result; | ||
| 459 | } | ||
| 460 | |||
| 461 | // Ensure we lock the source memory region. | ||
| 462 | const auto src_vma_result = CarveVMARange(src_address, size); | ||
| 463 | if (src_vma_result.Failed()) { | ||
| 464 | return src_vma_result.Code(); | ||
| 465 | } | ||
| 466 | auto src_vma_iter = *src_vma_result; | ||
| 467 | src_vma_iter->second.attribute = MemoryAttribute::Locked; | ||
| 468 | Reprotect(src_vma_iter, VMAPermission::Read); | ||
| 469 | |||
| 470 | // The destination memory region is fine as is, however we need to make it read-only. | ||
| 471 | return ReprotectRange(dst_address, size, VMAPermission::Read); | ||
| 472 | } | ||
| 473 | |||
| 474 | ResultCode VMManager::UnmapCodeMemory(VAddr dst_address, VAddr src_address, u64 size) { | ||
| 475 | constexpr auto ignore_attribute = MemoryAttribute::LockedForIPC | MemoryAttribute::DeviceMapped; | ||
| 476 | const auto src_check_result = CheckRangeState( | ||
| 477 | src_address, size, MemoryState::All, MemoryState::Heap, VMAPermission::None, | ||
| 478 | VMAPermission::None, MemoryAttribute::Mask, MemoryAttribute::Locked, ignore_attribute); | ||
| 479 | |||
| 480 | if (src_check_result.Failed()) { | ||
| 481 | return src_check_result.Code(); | ||
| 482 | } | ||
| 483 | |||
| 484 | // Yes, the kernel only checks the first page of the region. | ||
| 485 | const auto dst_check_result = | ||
| 486 | CheckRangeState(dst_address, Memory::PAGE_SIZE, MemoryState::FlagModule, | ||
| 487 | MemoryState::FlagModule, VMAPermission::None, VMAPermission::None, | ||
| 488 | MemoryAttribute::Mask, MemoryAttribute::None, ignore_attribute); | ||
| 489 | |||
| 490 | if (dst_check_result.Failed()) { | ||
| 491 | return dst_check_result.Code(); | ||
| 492 | } | ||
| 493 | |||
| 494 | const auto dst_memory_state = std::get<MemoryState>(*dst_check_result); | ||
| 495 | const auto dst_contiguous_check_result = CheckRangeState( | ||
| 496 | dst_address, size, MemoryState::All, dst_memory_state, VMAPermission::None, | ||
| 497 | VMAPermission::None, MemoryAttribute::Mask, MemoryAttribute::None, ignore_attribute); | ||
| 498 | |||
| 499 | if (dst_contiguous_check_result.Failed()) { | ||
| 500 | return dst_contiguous_check_result.Code(); | ||
| 501 | } | ||
| 502 | |||
| 503 | const auto unmap_result = UnmapRange(dst_address, size); | ||
| 504 | if (unmap_result.IsError()) { | ||
| 505 | return unmap_result; | ||
| 506 | } | ||
| 507 | |||
| 508 | // With the mirrored portion unmapped, restore the original region's traits. | ||
| 509 | const auto src_vma_result = CarveVMARange(src_address, size); | ||
| 510 | if (src_vma_result.Failed()) { | ||
| 511 | return src_vma_result.Code(); | ||
| 512 | } | ||
| 513 | auto src_vma_iter = *src_vma_result; | ||
| 514 | src_vma_iter->second.state = MemoryState::Heap; | ||
| 515 | src_vma_iter->second.attribute = MemoryAttribute::None; | ||
| 516 | Reprotect(src_vma_iter, VMAPermission::ReadWrite); | ||
| 517 | |||
| 518 | if (dst_memory_state == MemoryState::ModuleCode) { | ||
| 519 | system.InvalidateCpuInstructionCaches(); | ||
| 520 | } | ||
| 521 | |||
| 522 | return unmap_result; | ||
| 523 | } | ||
| 524 | |||
| 525 | MemoryInfo VMManager::QueryMemory(VAddr address) const { | ||
| 526 | const auto vma = FindVMA(address); | ||
| 527 | MemoryInfo memory_info{}; | ||
| 528 | |||
| 529 | if (IsValidHandle(vma)) { | ||
| 530 | memory_info.base_address = vma->second.base; | ||
| 531 | memory_info.attributes = ToSvcMemoryAttribute(vma->second.attribute); | ||
| 532 | memory_info.permission = static_cast<u32>(vma->second.permissions); | ||
| 533 | memory_info.size = vma->second.size; | ||
| 534 | memory_info.state = ToSvcMemoryState(vma->second.state); | ||
| 535 | } else { | ||
| 536 | memory_info.base_address = address_space_end; | ||
| 537 | memory_info.permission = static_cast<u32>(VMAPermission::None); | ||
| 538 | memory_info.size = 0 - address_space_end; | ||
| 539 | memory_info.state = static_cast<u32>(MemoryState::Inaccessible); | ||
| 540 | } | ||
| 541 | |||
| 542 | return memory_info; | ||
| 543 | } | ||
| 544 | |||
| 545 | ResultCode VMManager::SetMemoryAttribute(VAddr address, u64 size, MemoryAttribute mask, | ||
| 546 | MemoryAttribute attribute) { | ||
| 547 | constexpr auto ignore_mask = | ||
| 548 | MemoryAttribute::Uncached | MemoryAttribute::DeviceMapped | MemoryAttribute::Locked; | ||
| 549 | constexpr auto attribute_mask = ~ignore_mask; | ||
| 550 | |||
| 551 | const auto result = CheckRangeState( | ||
| 552 | address, size, MemoryState::FlagUncached, MemoryState::FlagUncached, VMAPermission::None, | ||
| 553 | VMAPermission::None, attribute_mask, MemoryAttribute::None, ignore_mask); | ||
| 554 | |||
| 555 | if (result.Failed()) { | ||
| 556 | return result.Code(); | ||
| 557 | } | ||
| 558 | |||
| 559 | const auto [prev_state, prev_permissions, prev_attributes] = *result; | ||
| 560 | const auto new_attribute = (prev_attributes & ~mask) | (mask & attribute); | ||
| 561 | |||
| 562 | const auto carve_result = CarveVMARange(address, size); | ||
| 563 | if (carve_result.Failed()) { | ||
| 564 | return carve_result.Code(); | ||
| 565 | } | ||
| 566 | |||
| 567 | auto vma_iter = *carve_result; | ||
| 568 | vma_iter->second.attribute = new_attribute; | ||
| 569 | |||
| 570 | MergeAdjacent(vma_iter); | ||
| 571 | return RESULT_SUCCESS; | ||
| 572 | } | ||
| 573 | |||
| 574 | ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state) { | ||
| 575 | const auto vma = FindVMA(src_addr); | ||
| 576 | |||
| 577 | ASSERT_MSG(vma != vma_map.end(), "Invalid memory address"); | ||
| 578 | ASSERT_MSG(vma->second.backing_block, "Backing block doesn't exist for address"); | ||
| 579 | |||
| 580 | // The returned VMA might be a bigger one encompassing the desired address. | ||
| 581 | const auto vma_offset = src_addr - vma->first; | ||
| 582 | ASSERT_MSG(vma_offset + size <= vma->second.size, | ||
| 583 | "Shared memory exceeds bounds of mapped block"); | ||
| 584 | |||
| 585 | const std::shared_ptr<PhysicalMemory>& backing_block = vma->second.backing_block; | ||
| 586 | const std::size_t backing_block_offset = vma->second.offset + vma_offset; | ||
| 587 | |||
| 588 | CASCADE_RESULT(auto new_vma, | ||
| 589 | MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size, state)); | ||
| 590 | // Protect mirror with permissions from old region | ||
| 591 | Reprotect(new_vma, vma->second.permissions); | ||
| 592 | // Remove permissions from old region | ||
| 593 | ReprotectRange(src_addr, size, VMAPermission::None); | ||
| 594 | |||
| 595 | return RESULT_SUCCESS; | ||
| 596 | } | ||
| 597 | |||
| 598 | void VMManager::RefreshMemoryBlockMappings(const PhysicalMemory* block) { | ||
| 599 | // If this ever proves to have a noticeable performance impact, allow users of the function to | ||
| 600 | // specify a specific range of addresses to limit the scan to. | ||
| 601 | for (const auto& p : vma_map) { | ||
| 602 | const VirtualMemoryArea& vma = p.second; | ||
| 603 | if (block == vma.backing_block.get()) { | ||
| 604 | UpdatePageTableForVMA(vma); | ||
| 605 | } | ||
| 606 | } | ||
| 607 | } | ||
| 608 | |||
| 609 | void VMManager::LogLayout() const { | ||
| 610 | for (const auto& p : vma_map) { | ||
| 611 | const VirtualMemoryArea& vma = p.second; | ||
| 612 | LOG_DEBUG(Kernel, "{:016X} - {:016X} size: {:016X} {}{}{} {}", vma.base, | ||
| 613 | vma.base + vma.size, vma.size, | ||
| 614 | (u8)vma.permissions & (u8)VMAPermission::Read ? 'R' : '-', | ||
| 615 | (u8)vma.permissions & (u8)VMAPermission::Write ? 'W' : '-', | ||
| 616 | (u8)vma.permissions & (u8)VMAPermission::Execute ? 'X' : '-', | ||
| 617 | GetMemoryStateName(vma.state)); | ||
| 618 | } | ||
| 619 | } | ||
| 620 | |||
| 621 | VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle& iter) { | ||
| 622 | // This uses a neat C++ trick to convert a const_iterator to a regular iterator, given | ||
| 623 | // non-const access to its container. | ||
| 624 | return vma_map.erase(iter, iter); // Erases an empty range of elements | ||
| 625 | } | ||
| 626 | |||
| 627 | ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u64 size) { | ||
| 628 | ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x{:016X}", size); | ||
| 629 | ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x{:016X}", base); | ||
| 630 | |||
| 631 | VMAIter vma_handle = StripIterConstness(FindVMA(base)); | ||
| 632 | if (vma_handle == vma_map.end()) { | ||
| 633 | // Target address is outside the range managed by the kernel | ||
| 634 | return ERR_INVALID_ADDRESS; | ||
| 635 | } | ||
| 636 | |||
| 637 | const VirtualMemoryArea& vma = vma_handle->second; | ||
| 638 | if (vma.type != VMAType::Free) { | ||
| 639 | // Region is already allocated | ||
| 640 | return ERR_INVALID_ADDRESS_STATE; | ||
| 641 | } | ||
| 642 | |||
| 643 | const VAddr start_in_vma = base - vma.base; | ||
| 644 | const VAddr end_in_vma = start_in_vma + size; | ||
| 645 | |||
| 646 | if (end_in_vma > vma.size) { | ||
| 647 | // Requested allocation doesn't fit inside VMA | ||
| 648 | return ERR_INVALID_ADDRESS_STATE; | ||
| 649 | } | ||
| 650 | |||
| 651 | if (end_in_vma != vma.size) { | ||
| 652 | // Split VMA at the end of the allocated region | ||
| 653 | SplitVMA(vma_handle, end_in_vma); | ||
| 654 | } | ||
| 655 | if (start_in_vma != 0) { | ||
| 656 | // Split VMA at the start of the allocated region | ||
| 657 | vma_handle = SplitVMA(vma_handle, start_in_vma); | ||
| 658 | } | ||
| 659 | |||
| 660 | return MakeResult<VMAIter>(vma_handle); | ||
| 661 | } | ||
| 662 | |||
| 663 | ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u64 size) { | ||
| 664 | ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x{:016X}", size); | ||
| 665 | ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x{:016X}", target); | ||
| 666 | |||
| 667 | const VAddr target_end = target + size; | ||
| 668 | ASSERT(target_end >= target); | ||
| 669 | ASSERT(target_end <= address_space_end); | ||
| 670 | ASSERT(size > 0); | ||
| 671 | |||
| 672 | VMAIter begin_vma = StripIterConstness(FindVMA(target)); | ||
| 673 | const VMAIter i_end = vma_map.lower_bound(target_end); | ||
| 674 | if (std::any_of(begin_vma, i_end, | ||
| 675 | [](const auto& entry) { return entry.second.type == VMAType::Free; })) { | ||
| 676 | return ERR_INVALID_ADDRESS_STATE; | ||
| 677 | } | ||
| 678 | |||
| 679 | if (target != begin_vma->second.base) { | ||
| 680 | begin_vma = SplitVMA(begin_vma, target - begin_vma->second.base); | ||
| 681 | } | ||
| 682 | |||
| 683 | VMAIter end_vma = StripIterConstness(FindVMA(target_end)); | ||
| 684 | if (end_vma != vma_map.end() && target_end != end_vma->second.base) { | ||
| 685 | end_vma = SplitVMA(end_vma, target_end - end_vma->second.base); | ||
| 686 | } | ||
| 687 | |||
| 688 | return MakeResult<VMAIter>(begin_vma); | ||
| 689 | } | ||
| 690 | |||
| 691 | VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) { | ||
| 692 | VirtualMemoryArea& old_vma = vma_handle->second; | ||
| 693 | VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA | ||
| 694 | |||
| 695 | // For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably | ||
| 696 | // a bug. This restriction might be removed later. | ||
| 697 | ASSERT(offset_in_vma < old_vma.size); | ||
| 698 | ASSERT(offset_in_vma > 0); | ||
| 699 | |||
| 700 | old_vma.size = offset_in_vma; | ||
| 701 | new_vma.base += offset_in_vma; | ||
| 702 | new_vma.size -= offset_in_vma; | ||
| 703 | |||
| 704 | switch (new_vma.type) { | ||
| 705 | case VMAType::Free: | ||
| 706 | break; | ||
| 707 | case VMAType::AllocatedMemoryBlock: | ||
| 708 | new_vma.offset += offset_in_vma; | ||
| 709 | break; | ||
| 710 | case VMAType::BackingMemory: | ||
| 711 | new_vma.backing_memory += offset_in_vma; | ||
| 712 | break; | ||
| 713 | case VMAType::MMIO: | ||
| 714 | new_vma.paddr += offset_in_vma; | ||
| 715 | break; | ||
| 716 | } | ||
| 717 | |||
| 718 | ASSERT(old_vma.CanBeMergedWith(new_vma)); | ||
| 719 | |||
| 720 | return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma); | ||
| 721 | } | ||
| 722 | |||
| 723 | VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) { | ||
| 724 | const VMAIter next_vma = std::next(iter); | ||
| 725 | if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) { | ||
| 726 | MergeAdjacentVMA(iter->second, next_vma->second); | ||
| 727 | vma_map.erase(next_vma); | ||
| 728 | } | ||
| 729 | |||
| 730 | if (iter != vma_map.begin()) { | ||
| 731 | VMAIter prev_vma = std::prev(iter); | ||
| 732 | if (prev_vma->second.CanBeMergedWith(iter->second)) { | ||
| 733 | MergeAdjacentVMA(prev_vma->second, iter->second); | ||
| 734 | vma_map.erase(iter); | ||
| 735 | iter = prev_vma; | ||
| 736 | } | ||
| 737 | } | ||
| 738 | |||
| 739 | return iter; | ||
| 740 | } | ||
| 741 | |||
| 742 | void VMManager::MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryArea& right) { | ||
| 743 | ASSERT(left.CanBeMergedWith(right)); | ||
| 744 | |||
| 745 | // Always merge allocated memory blocks, even when they don't share the same backing block. | ||
| 746 | if (left.type == VMAType::AllocatedMemoryBlock && | ||
| 747 | (left.backing_block != right.backing_block || left.offset + left.size != right.offset)) { | ||
| 748 | |||
| 749 | // Check if we can save work. | ||
| 750 | if (left.offset == 0 && left.size == left.backing_block->size()) { | ||
| 751 | // Fast case: left is an entire backing block. | ||
| 752 | left.backing_block->resize(left.size + right.size); | ||
| 753 | std::memcpy(left.backing_block->data() + left.size, | ||
| 754 | right.backing_block->data() + right.offset, right.size); | ||
| 755 | } else { | ||
| 756 | // Slow case: make a new memory block for left and right. | ||
| 757 | auto new_memory = std::make_shared<PhysicalMemory>(); | ||
| 758 | new_memory->resize(left.size + right.size); | ||
| 759 | std::memcpy(new_memory->data(), left.backing_block->data() + left.offset, left.size); | ||
| 760 | std::memcpy(new_memory->data() + left.size, right.backing_block->data() + right.offset, | ||
| 761 | right.size); | ||
| 762 | |||
| 763 | left.backing_block = std::move(new_memory); | ||
| 764 | left.offset = 0; | ||
| 765 | } | ||
| 766 | |||
| 767 | // Page table update is needed, because backing memory changed. | ||
| 768 | left.size += right.size; | ||
| 769 | UpdatePageTableForVMA(left); | ||
| 770 | } else { | ||
| 771 | // Just update the size. | ||
| 772 | left.size += right.size; | ||
| 773 | } | ||
| 774 | } | ||
| 775 | |||
| 776 | void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { | ||
| 777 | auto& memory = system.Memory(); | ||
| 778 | |||
| 779 | switch (vma.type) { | ||
| 780 | case VMAType::Free: | ||
| 781 | memory.UnmapRegion(page_table, vma.base, vma.size); | ||
| 782 | break; | ||
| 783 | case VMAType::AllocatedMemoryBlock: | ||
| 784 | memory.MapMemoryRegion(page_table, vma.base, vma.size, *vma.backing_block, vma.offset); | ||
| 785 | break; | ||
| 786 | case VMAType::BackingMemory: | ||
| 787 | memory.MapMemoryRegion(page_table, vma.base, vma.size, vma.backing_memory); | ||
| 788 | break; | ||
| 789 | case VMAType::MMIO: | ||
| 790 | memory.MapIoRegion(page_table, vma.base, vma.size, vma.mmio_handler); | ||
| 791 | break; | ||
| 792 | } | ||
| 793 | } | ||
| 794 | |||
| 795 | void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType type) { | ||
| 796 | u64 map_region_size = 0; | ||
| 797 | u64 heap_region_size = 0; | ||
| 798 | u64 stack_region_size = 0; | ||
| 799 | u64 tls_io_region_size = 0; | ||
| 800 | |||
| 801 | u64 stack_and_tls_io_end = 0; | ||
| 802 | |||
| 803 | switch (type) { | ||
| 804 | case FileSys::ProgramAddressSpaceType::Is32Bit: | ||
| 805 | case FileSys::ProgramAddressSpaceType::Is32BitNoMap: | ||
| 806 | address_space_width = 32; | ||
| 807 | code_region_base = 0x200000; | ||
| 808 | code_region_end = code_region_base + 0x3FE00000; | ||
| 809 | aslr_region_base = 0x200000; | ||
| 810 | aslr_region_end = aslr_region_base + 0xFFE00000; | ||
| 811 | if (type == FileSys::ProgramAddressSpaceType::Is32Bit) { | ||
| 812 | map_region_size = 0x40000000; | ||
| 813 | heap_region_size = 0x40000000; | ||
| 814 | } else { | ||
| 815 | map_region_size = 0; | ||
| 816 | heap_region_size = 0x80000000; | ||
| 817 | } | ||
| 818 | stack_and_tls_io_end = 0x40000000; | ||
| 819 | break; | ||
| 820 | case FileSys::ProgramAddressSpaceType::Is36Bit: | ||
| 821 | address_space_width = 36; | ||
| 822 | code_region_base = 0x8000000; | ||
| 823 | code_region_end = code_region_base + 0x78000000; | ||
| 824 | aslr_region_base = 0x8000000; | ||
| 825 | aslr_region_end = aslr_region_base + 0xFF8000000; | ||
| 826 | map_region_size = 0x180000000; | ||
| 827 | heap_region_size = 0x180000000; | ||
| 828 | stack_and_tls_io_end = 0x80000000; | ||
| 829 | break; | ||
| 830 | case FileSys::ProgramAddressSpaceType::Is39Bit: | ||
| 831 | address_space_width = 39; | ||
| 832 | code_region_base = 0x8000000; | ||
| 833 | code_region_end = code_region_base + 0x80000000; | ||
| 834 | aslr_region_base = 0x8000000; | ||
| 835 | aslr_region_end = aslr_region_base + 0x7FF8000000; | ||
| 836 | map_region_size = 0x1000000000; | ||
| 837 | heap_region_size = 0x180000000; | ||
| 838 | stack_region_size = 0x80000000; | ||
| 839 | tls_io_region_size = 0x1000000000; | ||
| 840 | break; | ||
| 841 | default: | ||
| 842 | UNREACHABLE_MSG("Invalid address space type specified: {}", static_cast<u32>(type)); | ||
| 843 | return; | ||
| 844 | } | ||
| 845 | |||
| 846 | const u64 stack_and_tls_io_begin = aslr_region_base; | ||
| 847 | |||
| 848 | address_space_base = 0; | ||
| 849 | address_space_end = 1ULL << address_space_width; | ||
| 850 | |||
| 851 | map_region_base = code_region_end; | ||
| 852 | map_region_end = map_region_base + map_region_size; | ||
| 853 | |||
| 854 | heap_region_base = map_region_end; | ||
| 855 | heap_region_end = heap_region_base + heap_region_size; | ||
| 856 | heap_end = heap_region_base; | ||
| 857 | |||
| 858 | stack_region_base = heap_region_end; | ||
| 859 | stack_region_end = stack_region_base + stack_region_size; | ||
| 860 | |||
| 861 | tls_io_region_base = stack_region_end; | ||
| 862 | tls_io_region_end = tls_io_region_base + tls_io_region_size; | ||
| 863 | |||
| 864 | if (stack_region_size == 0) { | ||
| 865 | stack_region_base = stack_and_tls_io_begin; | ||
| 866 | stack_region_end = stack_and_tls_io_end; | ||
| 867 | } | ||
| 868 | |||
| 869 | if (tls_io_region_size == 0) { | ||
| 870 | tls_io_region_base = stack_and_tls_io_begin; | ||
| 871 | tls_io_region_end = stack_and_tls_io_end; | ||
| 872 | } | ||
| 873 | } | ||
| 874 | |||
| 875 | void VMManager::Clear() { | ||
| 876 | ClearVMAMap(); | ||
| 877 | ClearPageTable(); | ||
| 878 | } | ||
| 879 | |||
| 880 | void VMManager::ClearVMAMap() { | ||
| 881 | vma_map.clear(); | ||
| 882 | } | ||
| 883 | |||
| 884 | void VMManager::ClearPageTable() { | ||
| 885 | std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr); | ||
| 886 | page_table.special_regions.clear(); | ||
| 887 | std::fill(page_table.attributes.begin(), page_table.attributes.end(), | ||
| 888 | Common::PageType::Unmapped); | ||
| 889 | } | ||
| 890 | |||
| 891 | VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, MemoryState state_mask, | ||
| 892 | MemoryState state, VMAPermission permission_mask, | ||
| 893 | VMAPermission permissions, | ||
| 894 | MemoryAttribute attribute_mask, | ||
| 895 | MemoryAttribute attribute, | ||
| 896 | MemoryAttribute ignore_mask) const { | ||
| 897 | auto iter = FindVMA(address); | ||
| 898 | |||
| 899 | // If we don't have a valid VMA handle at this point, then it means this is | ||
| 900 | // being called with an address outside of the address space, which is definitely | ||
| 901 | // indicative of a bug, as this function only operates on mapped memory regions. | ||
| 902 | DEBUG_ASSERT(IsValidHandle(iter)); | ||
| 903 | |||
| 904 | const VAddr end_address = address + size - 1; | ||
| 905 | const MemoryAttribute initial_attributes = iter->second.attribute; | ||
| 906 | const VMAPermission initial_permissions = iter->second.permissions; | ||
| 907 | const MemoryState initial_state = iter->second.state; | ||
| 908 | |||
| 909 | while (true) { | ||
| 910 | // The iterator should be valid throughout the traversal. Hitting the end of | ||
| 911 | // the mapped VMA regions is unquestionably indicative of a bug. | ||
| 912 | DEBUG_ASSERT(IsValidHandle(iter)); | ||
| 913 | |||
| 914 | const auto& vma = iter->second; | ||
| 915 | |||
| 916 | if (vma.state != initial_state) { | ||
| 917 | return ERR_INVALID_ADDRESS_STATE; | ||
| 918 | } | ||
| 919 | |||
| 920 | if ((vma.state & state_mask) != state) { | ||
| 921 | return ERR_INVALID_ADDRESS_STATE; | ||
| 922 | } | ||
| 923 | |||
| 924 | if (vma.permissions != initial_permissions) { | ||
| 925 | return ERR_INVALID_ADDRESS_STATE; | ||
| 926 | } | ||
| 927 | |||
| 928 | if ((vma.permissions & permission_mask) != permissions) { | ||
| 929 | return ERR_INVALID_ADDRESS_STATE; | ||
| 930 | } | ||
| 931 | |||
| 932 | if ((vma.attribute | ignore_mask) != (initial_attributes | ignore_mask)) { | ||
| 933 | return ERR_INVALID_ADDRESS_STATE; | ||
| 934 | } | ||
| 935 | |||
| 936 | if ((vma.attribute & attribute_mask) != attribute) { | ||
| 937 | return ERR_INVALID_ADDRESS_STATE; | ||
| 938 | } | ||
| 939 | |||
| 940 | if (end_address <= vma.EndAddress()) { | ||
| 941 | break; | ||
| 942 | } | ||
| 943 | |||
| 944 | ++iter; | ||
| 945 | } | ||
| 946 | |||
| 947 | return MakeResult( | ||
| 948 | std::make_tuple(initial_state, initial_permissions, initial_attributes & ~ignore_mask)); | ||
| 949 | } | ||
| 950 | |||
| 951 | ResultVal<std::size_t> VMManager::SizeOfAllocatedVMAsInRange(VAddr address, | ||
| 952 | std::size_t size) const { | ||
| 953 | const VAddr end_addr = address + size; | ||
| 954 | const VAddr last_addr = end_addr - 1; | ||
| 955 | std::size_t mapped_size = 0; | ||
| 956 | |||
| 957 | VAddr cur_addr = address; | ||
| 958 | auto iter = FindVMA(cur_addr); | ||
| 959 | ASSERT(iter != vma_map.end()); | ||
| 960 | |||
| 961 | while (true) { | ||
| 962 | const auto& vma = iter->second; | ||
| 963 | const VAddr vma_start = vma.base; | ||
| 964 | const VAddr vma_end = vma_start + vma.size; | ||
| 965 | const VAddr vma_last = vma_end - 1; | ||
| 966 | |||
| 967 | // Add size if relevant. | ||
| 968 | if (vma.state != MemoryState::Unmapped) { | ||
| 969 | mapped_size += std::min(end_addr - cur_addr, vma_end - cur_addr); | ||
| 970 | } | ||
| 971 | |||
| 972 | // Break once we hit the end of the range. | ||
| 973 | if (last_addr <= vma_last) { | ||
| 974 | break; | ||
| 975 | } | ||
| 976 | |||
| 977 | // Advance to the next block. | ||
| 978 | cur_addr = vma_end; | ||
| 979 | iter = std::next(iter); | ||
| 980 | ASSERT(iter != vma_map.end()); | ||
| 981 | } | ||
| 982 | |||
| 983 | return MakeResult(mapped_size); | ||
| 984 | } | ||
| 985 | |||
| 986 | ResultVal<std::size_t> VMManager::SizeOfUnmappablePhysicalMemoryInRange(VAddr address, | ||
| 987 | std::size_t size) const { | ||
| 988 | const VAddr end_addr = address + size; | ||
| 989 | const VAddr last_addr = end_addr - 1; | ||
| 990 | std::size_t mapped_size = 0; | ||
| 991 | |||
| 992 | VAddr cur_addr = address; | ||
| 993 | auto iter = FindVMA(cur_addr); | ||
| 994 | ASSERT(iter != vma_map.end()); | ||
| 995 | |||
| 996 | while (true) { | ||
| 997 | const auto& vma = iter->second; | ||
| 998 | const auto vma_start = vma.base; | ||
| 999 | const auto vma_end = vma_start + vma.size; | ||
| 1000 | const auto vma_last = vma_end - 1; | ||
| 1001 | const auto state = vma.state; | ||
| 1002 | const auto attr = vma.attribute; | ||
| 1003 | |||
| 1004 | // Memory within region must be free or mapped heap. | ||
| 1005 | if (!((state == MemoryState::Heap && attr == MemoryAttribute::None) || | ||
| 1006 | (state == MemoryState::Unmapped))) { | ||
| 1007 | return ERR_INVALID_ADDRESS_STATE; | ||
| 1008 | } | ||
| 1009 | |||
| 1010 | // Add size if relevant. | ||
| 1011 | if (state != MemoryState::Unmapped) { | ||
| 1012 | mapped_size += std::min(end_addr - cur_addr, vma_end - cur_addr); | ||
| 1013 | } | ||
| 1014 | |||
| 1015 | // Break once we hit the end of the range. | ||
| 1016 | if (last_addr <= vma_last) { | ||
| 1017 | break; | ||
| 1018 | } | ||
| 1019 | |||
| 1020 | // Advance to the next block. | ||
| 1021 | cur_addr = vma_end; | ||
| 1022 | iter = std::next(iter); | ||
| 1023 | ASSERT(iter != vma_map.end()); | ||
| 1024 | } | ||
| 1025 | |||
| 1026 | return MakeResult(mapped_size); | ||
| 1027 | } | ||
| 1028 | |||
| 1029 | u64 VMManager::GetTotalPhysicalMemoryAvailable() const { | ||
| 1030 | LOG_WARNING(Kernel, "(STUBBED) called"); | ||
| 1031 | return 0xF8000000; | ||
| 1032 | } | ||
| 1033 | |||
| 1034 | VAddr VMManager::GetAddressSpaceBaseAddress() const { | ||
| 1035 | return address_space_base; | ||
| 1036 | } | ||
| 1037 | |||
| 1038 | VAddr VMManager::GetAddressSpaceEndAddress() const { | ||
| 1039 | return address_space_end; | ||
| 1040 | } | ||
| 1041 | |||
| 1042 | u64 VMManager::GetAddressSpaceSize() const { | ||
| 1043 | return address_space_end - address_space_base; | ||
| 1044 | } | ||
| 1045 | |||
| 1046 | u64 VMManager::GetAddressSpaceWidth() const { | ||
| 1047 | return address_space_width; | ||
| 1048 | } | ||
| 1049 | |||
| 1050 | bool VMManager::IsWithinAddressSpace(VAddr address, u64 size) const { | ||
| 1051 | return IsInsideAddressRange(address, size, GetAddressSpaceBaseAddress(), | ||
| 1052 | GetAddressSpaceEndAddress()); | ||
| 1053 | } | ||
| 1054 | |||
| 1055 | VAddr VMManager::GetASLRRegionBaseAddress() const { | ||
| 1056 | return aslr_region_base; | ||
| 1057 | } | ||
| 1058 | |||
| 1059 | VAddr VMManager::GetASLRRegionEndAddress() const { | ||
| 1060 | return aslr_region_end; | ||
| 1061 | } | ||
| 1062 | |||
| 1063 | u64 VMManager::GetASLRRegionSize() const { | ||
| 1064 | return aslr_region_end - aslr_region_base; | ||
| 1065 | } | ||
| 1066 | |||
| 1067 | bool VMManager::IsWithinASLRRegion(VAddr begin, u64 size) const { | ||
| 1068 | const VAddr range_end = begin + size; | ||
| 1069 | const VAddr aslr_start = GetASLRRegionBaseAddress(); | ||
| 1070 | const VAddr aslr_end = GetASLRRegionEndAddress(); | ||
| 1071 | |||
| 1072 | if (aslr_start > begin || begin > range_end || range_end - 1 > aslr_end - 1) { | ||
| 1073 | return false; | ||
| 1074 | } | ||
| 1075 | |||
| 1076 | if (range_end > heap_region_base && heap_region_end > begin) { | ||
| 1077 | return false; | ||
| 1078 | } | ||
| 1079 | |||
| 1080 | if (range_end > map_region_base && map_region_end > begin) { | ||
| 1081 | return false; | ||
| 1082 | } | ||
| 1083 | |||
| 1084 | return true; | ||
| 1085 | } | ||
| 1086 | |||
| 1087 | VAddr VMManager::GetCodeRegionBaseAddress() const { | ||
| 1088 | return code_region_base; | ||
| 1089 | } | ||
| 1090 | |||
| 1091 | VAddr VMManager::GetCodeRegionEndAddress() const { | ||
| 1092 | return code_region_end; | ||
| 1093 | } | ||
| 1094 | |||
| 1095 | u64 VMManager::GetCodeRegionSize() const { | ||
| 1096 | return code_region_end - code_region_base; | ||
| 1097 | } | ||
| 1098 | |||
| 1099 | bool VMManager::IsWithinCodeRegion(VAddr address, u64 size) const { | ||
| 1100 | return IsInsideAddressRange(address, size, GetCodeRegionBaseAddress(), | ||
| 1101 | GetCodeRegionEndAddress()); | ||
| 1102 | } | ||
| 1103 | |||
| 1104 | VAddr VMManager::GetHeapRegionBaseAddress() const { | ||
| 1105 | return heap_region_base; | ||
| 1106 | } | ||
| 1107 | |||
| 1108 | VAddr VMManager::GetHeapRegionEndAddress() const { | ||
| 1109 | return heap_region_end; | ||
| 1110 | } | ||
| 1111 | |||
| 1112 | u64 VMManager::GetHeapRegionSize() const { | ||
| 1113 | return heap_region_end - heap_region_base; | ||
| 1114 | } | ||
| 1115 | |||
| 1116 | u64 VMManager::GetCurrentHeapSize() const { | ||
| 1117 | return heap_end - heap_region_base; | ||
| 1118 | } | ||
| 1119 | |||
| 1120 | bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const { | ||
| 1121 | return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(), | ||
| 1122 | GetHeapRegionEndAddress()); | ||
| 1123 | } | ||
| 1124 | |||
| 1125 | VAddr VMManager::GetMapRegionBaseAddress() const { | ||
| 1126 | return map_region_base; | ||
| 1127 | } | ||
| 1128 | |||
| 1129 | VAddr VMManager::GetMapRegionEndAddress() const { | ||
| 1130 | return map_region_end; | ||
| 1131 | } | ||
| 1132 | |||
| 1133 | u64 VMManager::GetMapRegionSize() const { | ||
| 1134 | return map_region_end - map_region_base; | ||
| 1135 | } | ||
| 1136 | |||
| 1137 | bool VMManager::IsWithinMapRegion(VAddr address, u64 size) const { | ||
| 1138 | return IsInsideAddressRange(address, size, GetMapRegionBaseAddress(), GetMapRegionEndAddress()); | ||
| 1139 | } | ||
| 1140 | |||
| 1141 | VAddr VMManager::GetStackRegionBaseAddress() const { | ||
| 1142 | return stack_region_base; | ||
| 1143 | } | ||
| 1144 | |||
| 1145 | VAddr VMManager::GetStackRegionEndAddress() const { | ||
| 1146 | return stack_region_end; | ||
| 1147 | } | ||
| 1148 | |||
| 1149 | u64 VMManager::GetStackRegionSize() const { | ||
| 1150 | return stack_region_end - stack_region_base; | ||
| 1151 | } | ||
| 1152 | |||
| 1153 | bool VMManager::IsWithinStackRegion(VAddr address, u64 size) const { | ||
| 1154 | return IsInsideAddressRange(address, size, GetStackRegionBaseAddress(), | ||
| 1155 | GetStackRegionEndAddress()); | ||
| 1156 | } | ||
| 1157 | |||
| 1158 | VAddr VMManager::GetTLSIORegionBaseAddress() const { | ||
| 1159 | return tls_io_region_base; | ||
| 1160 | } | ||
| 1161 | |||
| 1162 | VAddr VMManager::GetTLSIORegionEndAddress() const { | ||
| 1163 | return tls_io_region_end; | ||
| 1164 | } | ||
| 1165 | |||
| 1166 | u64 VMManager::GetTLSIORegionSize() const { | ||
| 1167 | return tls_io_region_end - tls_io_region_base; | ||
| 1168 | } | ||
| 1169 | |||
| 1170 | bool VMManager::IsWithinTLSIORegion(VAddr address, u64 size) const { | ||
| 1171 | return IsInsideAddressRange(address, size, GetTLSIORegionBaseAddress(), | ||
| 1172 | GetTLSIORegionEndAddress()); | ||
| 1173 | } | ||
| 1174 | |||
| 1175 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h deleted file mode 100644 index 90b4b006a..000000000 --- a/src/core/hle/kernel/vm_manager.h +++ /dev/null | |||
| @@ -1,796 +0,0 @@ | |||
| 1 | // Copyright 2015 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <map> | ||
| 8 | #include <memory> | ||
| 9 | #include <tuple> | ||
| 10 | #include <vector> | ||
| 11 | #include "common/common_types.h" | ||
| 12 | #include "common/memory_hook.h" | ||
| 13 | #include "common/page_table.h" | ||
| 14 | #include "core/hle/kernel/physical_memory.h" | ||
| 15 | #include "core/hle/result.h" | ||
| 16 | #include "core/memory.h" | ||
| 17 | |||
| 18 | namespace Core { | ||
| 19 | class System; | ||
| 20 | } | ||
| 21 | |||
| 22 | namespace FileSys { | ||
| 23 | enum class ProgramAddressSpaceType : u8; | ||
| 24 | } | ||
| 25 | |||
| 26 | namespace Kernel { | ||
| 27 | |||
| 28 | enum class VMAType : u8 { | ||
| 29 | /// VMA represents an unmapped region of the address space. | ||
| 30 | Free, | ||
| 31 | /// VMA is backed by a ref-counted allocate memory block. | ||
| 32 | AllocatedMemoryBlock, | ||
| 33 | /// VMA is backed by a raw, unmanaged pointer. | ||
| 34 | BackingMemory, | ||
| 35 | /// VMA is mapped to MMIO registers at a fixed PAddr. | ||
| 36 | MMIO, | ||
| 37 | // TODO(yuriks): Implement MemoryAlias to support MAP/UNMAP | ||
| 38 | }; | ||
| 39 | |||
| 40 | /// Permissions for mapped memory blocks | ||
| 41 | enum class VMAPermission : u8 { | ||
| 42 | None = 0, | ||
| 43 | Read = 1, | ||
| 44 | Write = 2, | ||
| 45 | Execute = 4, | ||
| 46 | |||
| 47 | ReadWrite = Read | Write, | ||
| 48 | ReadExecute = Read | Execute, | ||
| 49 | WriteExecute = Write | Execute, | ||
| 50 | ReadWriteExecute = Read | Write | Execute, | ||
| 51 | |||
| 52 | // Used as a wildcard when checking permissions across memory ranges | ||
| 53 | All = 0xFF, | ||
| 54 | }; | ||
| 55 | |||
| 56 | constexpr VMAPermission operator|(VMAPermission lhs, VMAPermission rhs) { | ||
| 57 | return static_cast<VMAPermission>(u32(lhs) | u32(rhs)); | ||
| 58 | } | ||
| 59 | |||
| 60 | constexpr VMAPermission operator&(VMAPermission lhs, VMAPermission rhs) { | ||
| 61 | return static_cast<VMAPermission>(u32(lhs) & u32(rhs)); | ||
| 62 | } | ||
| 63 | |||
| 64 | constexpr VMAPermission operator^(VMAPermission lhs, VMAPermission rhs) { | ||
| 65 | return static_cast<VMAPermission>(u32(lhs) ^ u32(rhs)); | ||
| 66 | } | ||
| 67 | |||
| 68 | constexpr VMAPermission operator~(VMAPermission permission) { | ||
| 69 | return static_cast<VMAPermission>(~u32(permission)); | ||
| 70 | } | ||
| 71 | |||
| 72 | constexpr VMAPermission& operator|=(VMAPermission& lhs, VMAPermission rhs) { | ||
| 73 | lhs = lhs | rhs; | ||
| 74 | return lhs; | ||
| 75 | } | ||
| 76 | |||
| 77 | constexpr VMAPermission& operator&=(VMAPermission& lhs, VMAPermission rhs) { | ||
| 78 | lhs = lhs & rhs; | ||
| 79 | return lhs; | ||
| 80 | } | ||
| 81 | |||
| 82 | constexpr VMAPermission& operator^=(VMAPermission& lhs, VMAPermission rhs) { | ||
| 83 | lhs = lhs ^ rhs; | ||
| 84 | return lhs; | ||
| 85 | } | ||
| 86 | |||
| 87 | /// Attribute flags that can be applied to a VMA | ||
| 88 | enum class MemoryAttribute : u32 { | ||
| 89 | Mask = 0xFF, | ||
| 90 | |||
| 91 | /// No particular qualities | ||
| 92 | None = 0, | ||
| 93 | /// Memory locked/borrowed for use. e.g. This would be used by transfer memory. | ||
| 94 | Locked = 1, | ||
| 95 | /// Memory locked for use by IPC-related internals. | ||
| 96 | LockedForIPC = 2, | ||
| 97 | /// Mapped as part of the device address space. | ||
| 98 | DeviceMapped = 4, | ||
| 99 | /// Uncached memory | ||
| 100 | Uncached = 8, | ||
| 101 | |||
| 102 | IpcAndDeviceMapped = LockedForIPC | DeviceMapped, | ||
| 103 | }; | ||
| 104 | |||
| 105 | constexpr MemoryAttribute operator|(MemoryAttribute lhs, MemoryAttribute rhs) { | ||
| 106 | return static_cast<MemoryAttribute>(u32(lhs) | u32(rhs)); | ||
| 107 | } | ||
| 108 | |||
| 109 | constexpr MemoryAttribute operator&(MemoryAttribute lhs, MemoryAttribute rhs) { | ||
| 110 | return static_cast<MemoryAttribute>(u32(lhs) & u32(rhs)); | ||
| 111 | } | ||
| 112 | |||
| 113 | constexpr MemoryAttribute operator^(MemoryAttribute lhs, MemoryAttribute rhs) { | ||
| 114 | return static_cast<MemoryAttribute>(u32(lhs) ^ u32(rhs)); | ||
| 115 | } | ||
| 116 | |||
| 117 | constexpr MemoryAttribute operator~(MemoryAttribute attribute) { | ||
| 118 | return static_cast<MemoryAttribute>(~u32(attribute)); | ||
| 119 | } | ||
| 120 | |||
| 121 | constexpr MemoryAttribute& operator|=(MemoryAttribute& lhs, MemoryAttribute rhs) { | ||
| 122 | lhs = lhs | rhs; | ||
| 123 | return lhs; | ||
| 124 | } | ||
| 125 | |||
| 126 | constexpr MemoryAttribute& operator&=(MemoryAttribute& lhs, MemoryAttribute rhs) { | ||
| 127 | lhs = lhs & rhs; | ||
| 128 | return lhs; | ||
| 129 | } | ||
| 130 | |||
| 131 | constexpr MemoryAttribute& operator^=(MemoryAttribute& lhs, MemoryAttribute rhs) { | ||
| 132 | lhs = lhs ^ rhs; | ||
| 133 | return lhs; | ||
| 134 | } | ||
| 135 | |||
| 136 | constexpr u32 ToSvcMemoryAttribute(MemoryAttribute attribute) { | ||
| 137 | return static_cast<u32>(attribute & MemoryAttribute::Mask); | ||
| 138 | } | ||
| 139 | |||
| 140 | // clang-format off | ||
| 141 | /// Represents memory states and any relevant flags, as used by the kernel. | ||
| 142 | /// svcQueryMemory interprets these by masking away all but the first eight | ||
| 143 | /// bits when storing memory state into a MemoryInfo instance. | ||
| 144 | enum class MemoryState : u32 { | ||
| 145 | Mask = 0xFF, | ||
| 146 | FlagProtect = 1U << 8, | ||
| 147 | FlagDebug = 1U << 9, | ||
| 148 | FlagIPC0 = 1U << 10, | ||
| 149 | FlagIPC3 = 1U << 11, | ||
| 150 | FlagIPC1 = 1U << 12, | ||
| 151 | FlagMapped = 1U << 13, | ||
| 152 | FlagCode = 1U << 14, | ||
| 153 | FlagAlias = 1U << 15, | ||
| 154 | FlagModule = 1U << 16, | ||
| 155 | FlagTransfer = 1U << 17, | ||
| 156 | FlagQueryPhysicalAddressAllowed = 1U << 18, | ||
| 157 | FlagSharedDevice = 1U << 19, | ||
| 158 | FlagSharedDeviceAligned = 1U << 20, | ||
| 159 | FlagIPCBuffer = 1U << 21, | ||
| 160 | FlagMemoryPoolAllocated = 1U << 22, | ||
| 161 | FlagMapProcess = 1U << 23, | ||
| 162 | FlagUncached = 1U << 24, | ||
| 163 | FlagCodeMemory = 1U << 25, | ||
| 164 | |||
| 165 | // Wildcard used in range checking to indicate all states. | ||
| 166 | All = 0xFFFFFFFF, | ||
| 167 | |||
| 168 | // Convenience flag sets to reduce repetition | ||
| 169 | IPCFlags = FlagIPC0 | FlagIPC3 | FlagIPC1, | ||
| 170 | |||
| 171 | CodeFlags = FlagDebug | IPCFlags | FlagMapped | FlagCode | FlagQueryPhysicalAddressAllowed | | ||
| 172 | FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated, | ||
| 173 | |||
| 174 | DataFlags = FlagProtect | IPCFlags | FlagMapped | FlagAlias | FlagTransfer | | ||
| 175 | FlagQueryPhysicalAddressAllowed | FlagSharedDevice | FlagSharedDeviceAligned | | ||
| 176 | FlagMemoryPoolAllocated | FlagIPCBuffer | FlagUncached, | ||
| 177 | |||
| 178 | Unmapped = 0x00, | ||
| 179 | Io = 0x01 | FlagMapped, | ||
| 180 | Normal = 0x02 | FlagMapped | FlagQueryPhysicalAddressAllowed, | ||
| 181 | Code = 0x03 | CodeFlags | FlagMapProcess, | ||
| 182 | CodeData = 0x04 | DataFlags | FlagMapProcess | FlagCodeMemory, | ||
| 183 | Heap = 0x05 | DataFlags | FlagCodeMemory, | ||
| 184 | Shared = 0x06 | FlagMapped | FlagMemoryPoolAllocated, | ||
| 185 | ModuleCode = 0x08 | CodeFlags | FlagModule | FlagMapProcess, | ||
| 186 | ModuleCodeData = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory, | ||
| 187 | |||
| 188 | IpcBuffer0 = 0x0A | FlagMapped | FlagQueryPhysicalAddressAllowed | FlagMemoryPoolAllocated | | ||
| 189 | IPCFlags | FlagSharedDevice | FlagSharedDeviceAligned, | ||
| 190 | |||
| 191 | Stack = 0x0B | FlagMapped | IPCFlags | FlagQueryPhysicalAddressAllowed | | ||
| 192 | FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated, | ||
| 193 | |||
| 194 | ThreadLocal = 0x0C | FlagMapped | FlagMemoryPoolAllocated, | ||
| 195 | |||
| 196 | TransferMemoryIsolated = 0x0D | IPCFlags | FlagMapped | FlagQueryPhysicalAddressAllowed | | ||
| 197 | FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated | | ||
| 198 | FlagUncached, | ||
| 199 | |||
| 200 | TransferMemory = 0x0E | FlagIPC3 | FlagIPC1 | FlagMapped | FlagQueryPhysicalAddressAllowed | | ||
| 201 | FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated, | ||
| 202 | |||
| 203 | ProcessMemory = 0x0F | FlagIPC3 | FlagIPC1 | FlagMapped | FlagMemoryPoolAllocated, | ||
| 204 | |||
| 205 | // Used to signify an inaccessible or invalid memory region with memory queries | ||
| 206 | Inaccessible = 0x10, | ||
| 207 | |||
| 208 | IpcBuffer1 = 0x11 | FlagIPC3 | FlagIPC1 | FlagMapped | FlagQueryPhysicalAddressAllowed | | ||
| 209 | FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated, | ||
| 210 | |||
| 211 | IpcBuffer3 = 0x12 | FlagIPC3 | FlagMapped | FlagQueryPhysicalAddressAllowed | | ||
| 212 | FlagSharedDeviceAligned | FlagMemoryPoolAllocated, | ||
| 213 | |||
| 214 | KernelStack = 0x13 | FlagMapped, | ||
| 215 | }; | ||
| 216 | // clang-format on | ||
| 217 | |||
| 218 | constexpr MemoryState operator|(MemoryState lhs, MemoryState rhs) { | ||
| 219 | return static_cast<MemoryState>(u32(lhs) | u32(rhs)); | ||
| 220 | } | ||
| 221 | |||
| 222 | constexpr MemoryState operator&(MemoryState lhs, MemoryState rhs) { | ||
| 223 | return static_cast<MemoryState>(u32(lhs) & u32(rhs)); | ||
| 224 | } | ||
| 225 | |||
| 226 | constexpr MemoryState operator^(MemoryState lhs, MemoryState rhs) { | ||
| 227 | return static_cast<MemoryState>(u32(lhs) ^ u32(rhs)); | ||
| 228 | } | ||
| 229 | |||
| 230 | constexpr MemoryState operator~(MemoryState lhs) { | ||
| 231 | return static_cast<MemoryState>(~u32(lhs)); | ||
| 232 | } | ||
| 233 | |||
| 234 | constexpr MemoryState& operator|=(MemoryState& lhs, MemoryState rhs) { | ||
| 235 | lhs = lhs | rhs; | ||
| 236 | return lhs; | ||
| 237 | } | ||
| 238 | |||
| 239 | constexpr MemoryState& operator&=(MemoryState& lhs, MemoryState rhs) { | ||
| 240 | lhs = lhs & rhs; | ||
| 241 | return lhs; | ||
| 242 | } | ||
| 243 | |||
| 244 | constexpr MemoryState& operator^=(MemoryState& lhs, MemoryState rhs) { | ||
| 245 | lhs = lhs ^ rhs; | ||
| 246 | return lhs; | ||
| 247 | } | ||
| 248 | |||
| 249 | constexpr u32 ToSvcMemoryState(MemoryState state) { | ||
| 250 | return static_cast<u32>(state & MemoryState::Mask); | ||
| 251 | } | ||
| 252 | |||
| 253 | struct MemoryInfo { | ||
| 254 | u64 base_address; | ||
| 255 | u64 size; | ||
| 256 | u32 state; | ||
| 257 | u32 attributes; | ||
| 258 | u32 permission; | ||
| 259 | u32 ipc_ref_count; | ||
| 260 | u32 device_ref_count; | ||
| 261 | }; | ||
| 262 | static_assert(sizeof(MemoryInfo) == 0x28, "MemoryInfo has incorrect size."); | ||
| 263 | |||
| 264 | struct PageInfo { | ||
| 265 | u32 flags; | ||
| 266 | }; | ||
| 267 | |||
| 268 | /** | ||
| 269 | * Represents a VMA in an address space. A VMA is a contiguous region of virtual addressing space | ||
| 270 | * with homogeneous attributes across its extents. In this particular implementation each VMA is | ||
| 271 | * also backed by a single host memory allocation. | ||
| 272 | */ | ||
| 273 | struct VirtualMemoryArea { | ||
| 274 | /// Gets the starting (base) address of this VMA. | ||
| 275 | VAddr StartAddress() const { | ||
| 276 | return base; | ||
| 277 | } | ||
| 278 | |||
| 279 | /// Gets the ending address of this VMA. | ||
| 280 | VAddr EndAddress() const { | ||
| 281 | return base + size - 1; | ||
| 282 | } | ||
| 283 | |||
| 284 | /// Virtual base address of the region. | ||
| 285 | VAddr base = 0; | ||
| 286 | /// Size of the region. | ||
| 287 | u64 size = 0; | ||
| 288 | |||
| 289 | VMAType type = VMAType::Free; | ||
| 290 | VMAPermission permissions = VMAPermission::None; | ||
| 291 | MemoryState state = MemoryState::Unmapped; | ||
| 292 | MemoryAttribute attribute = MemoryAttribute::None; | ||
| 293 | |||
| 294 | // Settings for type = AllocatedMemoryBlock | ||
| 295 | /// Memory block backing this VMA. | ||
| 296 | std::shared_ptr<PhysicalMemory> backing_block = nullptr; | ||
| 297 | /// Offset into the backing_memory the mapping starts from. | ||
| 298 | std::size_t offset = 0; | ||
| 299 | |||
| 300 | // Settings for type = BackingMemory | ||
| 301 | /// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed. | ||
| 302 | u8* backing_memory = nullptr; | ||
| 303 | |||
| 304 | // Settings for type = MMIO | ||
| 305 | /// Physical address of the register area this VMA maps to. | ||
| 306 | PAddr paddr = 0; | ||
| 307 | Common::MemoryHookPointer mmio_handler = nullptr; | ||
| 308 | |||
| 309 | /// Tests if this area can be merged to the right with `next`. | ||
| 310 | bool CanBeMergedWith(const VirtualMemoryArea& next) const; | ||
| 311 | }; | ||
| 312 | |||
| 313 | /** | ||
| 314 | * Manages a process' virtual addressing space. This class maintains a list of allocated and free | ||
| 315 | * regions in the address space, along with their attributes, and allows kernel clients to | ||
| 316 | * manipulate it, adjusting the page table to match. | ||
| 317 | * | ||
| 318 | * This is similar in idea and purpose to the VM manager present in operating system kernels, with | ||
| 319 | * the main difference being that it doesn't have to support swapping or memory mapping of files. | ||
| 320 | * The implementation is also simplified by not having to allocate page frames. See these articles | ||
| 321 | * about the Linux kernel for an explantion of the concept and implementation: | ||
| 322 | * - http://duartes.org/gustavo/blog/post/how-the-kernel-manages-your-memory/ | ||
| 323 | * - http://duartes.org/gustavo/blog/post/page-cache-the-affair-between-memory-and-files/ | ||
| 324 | */ | ||
| 325 | class VMManager final { | ||
| 326 | using VMAMap = std::map<VAddr, VirtualMemoryArea>; | ||
| 327 | |||
| 328 | public: | ||
| 329 | using VMAHandle = VMAMap::const_iterator; | ||
| 330 | |||
| 331 | explicit VMManager(Core::System& system); | ||
| 332 | ~VMManager(); | ||
| 333 | |||
| 334 | /// Clears the address space map, re-initializing with a single free area. | ||
| 335 | void Reset(FileSys::ProgramAddressSpaceType type); | ||
| 336 | |||
| 337 | /// Finds the VMA in which the given address is included in, or `vma_map.end()`. | ||
| 338 | VMAHandle FindVMA(VAddr target) const; | ||
| 339 | |||
| 340 | /// Indicates whether or not the given handle is within the VMA map. | ||
| 341 | bool IsValidHandle(VMAHandle handle) const; | ||
| 342 | |||
| 343 | // TODO(yuriks): Should these functions actually return the handle? | ||
| 344 | |||
| 345 | /** | ||
| 346 | * Maps part of a ref-counted block of memory at a given address. | ||
| 347 | * | ||
| 348 | * @param target The guest address to start the mapping at. | ||
| 349 | * @param block The block to be mapped. | ||
| 350 | * @param offset Offset into `block` to map from. | ||
| 351 | * @param size Size of the mapping. | ||
| 352 | * @param state MemoryState tag to attach to the VMA. | ||
| 353 | */ | ||
| 354 | ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<PhysicalMemory> block, | ||
| 355 | std::size_t offset, u64 size, MemoryState state, | ||
| 356 | VMAPermission perm = VMAPermission::ReadWrite); | ||
| 357 | |||
| 358 | /** | ||
| 359 | * Maps an unmanaged host memory pointer at a given address. | ||
| 360 | * | ||
| 361 | * @param target The guest address to start the mapping at. | ||
| 362 | * @param memory The memory to be mapped. | ||
| 363 | * @param size Size of the mapping. | ||
| 364 | * @param state MemoryState tag to attach to the VMA. | ||
| 365 | */ | ||
| 366 | ResultVal<VMAHandle> MapBackingMemory(VAddr target, u8* memory, u64 size, MemoryState state); | ||
| 367 | |||
| 368 | /** | ||
| 369 | * Finds the first free memory region of the given size within | ||
| 370 | * the user-addressable ASLR memory region. | ||
| 371 | * | ||
| 372 | * @param size The size of the desired region in bytes. | ||
| 373 | * | ||
| 374 | * @returns If successful, the base address of the free region with | ||
| 375 | * the given size. | ||
| 376 | */ | ||
| 377 | ResultVal<VAddr> FindFreeRegion(u64 size) const; | ||
| 378 | |||
| 379 | /** | ||
| 380 | * Finds the first free address range that can hold a region of the desired size | ||
| 381 | * | ||
| 382 | * @param begin The starting address of the range. | ||
| 383 | * This is treated as an inclusive beginning address. | ||
| 384 | * | ||
| 385 | * @param end The ending address of the range. | ||
| 386 | * This is treated as an exclusive ending address. | ||
| 387 | * | ||
| 388 | * @param size The size of the free region to attempt to locate, | ||
| 389 | * in bytes. | ||
| 390 | * | ||
| 391 | * @returns If successful, the base address of the free region with | ||
| 392 | * the given size. | ||
| 393 | * | ||
| 394 | * @returns If unsuccessful, a result containing an error code. | ||
| 395 | * | ||
| 396 | * @pre The starting address must be less than the ending address. | ||
| 397 | * @pre The size must not exceed the address range itself. | ||
| 398 | */ | ||
| 399 | ResultVal<VAddr> FindFreeRegion(VAddr begin, VAddr end, u64 size) const; | ||
| 400 | |||
| 401 | /** | ||
| 402 | * Maps a memory-mapped IO region at a given address. | ||
| 403 | * | ||
| 404 | * @param target The guest address to start the mapping at. | ||
| 405 | * @param paddr The physical address where the registers are present. | ||
| 406 | * @param size Size of the mapping. | ||
| 407 | * @param state MemoryState tag to attach to the VMA. | ||
| 408 | * @param mmio_handler The handler that will implement read and write for this MMIO region. | ||
| 409 | */ | ||
| 410 | ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state, | ||
| 411 | Common::MemoryHookPointer mmio_handler); | ||
| 412 | |||
| 413 | /// Unmaps a range of addresses, splitting VMAs as necessary. | ||
| 414 | ResultCode UnmapRange(VAddr target, u64 size); | ||
| 415 | |||
| 416 | /// Changes the permissions of the given VMA. | ||
| 417 | VMAHandle Reprotect(VMAHandle vma, VMAPermission new_perms); | ||
| 418 | |||
| 419 | /// Changes the permissions of a range of addresses, splitting VMAs as necessary. | ||
| 420 | ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms); | ||
| 421 | |||
| 422 | ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state); | ||
| 423 | |||
| 424 | /// Attempts to allocate a heap with the given size. | ||
| 425 | /// | ||
| 426 | /// @param size The size of the heap to allocate in bytes. | ||
| 427 | /// | ||
| 428 | /// @note If a heap is currently allocated, and this is called | ||
| 429 | /// with a size that is equal to the size of the current heap, | ||
| 430 | /// then this function will do nothing and return the current | ||
| 431 | /// heap's starting address, as there's no need to perform | ||
| 432 | /// any additional heap allocation work. | ||
| 433 | /// | ||
| 434 | /// @note If a heap is currently allocated, and this is called | ||
| 435 | /// with a size less than the current heap's size, then | ||
| 436 | /// this function will attempt to shrink the heap. | ||
| 437 | /// | ||
| 438 | /// @note If a heap is currently allocated, and this is called | ||
| 439 | /// with a size larger than the current heap's size, then | ||
| 440 | /// this function will attempt to extend the size of the heap. | ||
| 441 | /// | ||
| 442 | /// @returns A result indicating either success or failure. | ||
| 443 | /// <p> | ||
| 444 | /// If successful, this function will return a result | ||
| 445 | /// containing the starting address to the allocated heap. | ||
| 446 | /// <p> | ||
| 447 | /// If unsuccessful, this function will return a result | ||
| 448 | /// containing an error code. | ||
| 449 | /// | ||
| 450 | /// @pre The given size must lie within the allowable heap | ||
| 451 | /// memory region managed by this VMManager instance. | ||
| 452 | /// Failure to abide by this will result in ERR_OUT_OF_MEMORY | ||
| 453 | /// being returned as the result. | ||
| 454 | /// | ||
| 455 | ResultVal<VAddr> SetHeapSize(u64 size); | ||
| 456 | |||
| 457 | /// Maps memory at a given address. | ||
| 458 | /// | ||
| 459 | /// @param target The virtual address to map memory at. | ||
| 460 | /// @param size The amount of memory to map. | ||
| 461 | /// | ||
| 462 | /// @note The destination address must lie within the Map region. | ||
| 463 | /// | ||
| 464 | /// @note This function requires that SystemResourceSize be non-zero, | ||
| 465 | /// however, this is just because if it were not then the | ||
| 466 | /// resulting page tables could be exploited on hardware by | ||
| 467 | /// a malicious program. SystemResource usage does not need | ||
| 468 | /// to be explicitly checked or updated here. | ||
| 469 | ResultCode MapPhysicalMemory(VAddr target, u64 size); | ||
| 470 | |||
| 471 | /// Unmaps memory at a given address. | ||
| 472 | /// | ||
| 473 | /// @param target The virtual address to unmap memory at. | ||
| 474 | /// @param size The amount of memory to unmap. | ||
| 475 | /// | ||
| 476 | /// @note The destination address must lie within the Map region. | ||
| 477 | /// | ||
| 478 | /// @note This function requires that SystemResourceSize be non-zero, | ||
| 479 | /// however, this is just because if it were not then the | ||
| 480 | /// resulting page tables could be exploited on hardware by | ||
| 481 | /// a malicious program. SystemResource usage does not need | ||
| 482 | /// to be explicitly checked or updated here. | ||
| 483 | ResultCode UnmapPhysicalMemory(VAddr target, u64 size); | ||
| 484 | |||
| 485 | /// Maps a region of memory as code memory. | ||
| 486 | /// | ||
| 487 | /// @param dst_address The base address of the region to create the aliasing memory region. | ||
| 488 | /// @param src_address The base address of the region to be aliased. | ||
| 489 | /// @param size The total amount of memory to map in bytes. | ||
| 490 | /// | ||
| 491 | /// @pre Both memory regions lie within the actual addressable address space. | ||
| 492 | /// | ||
| 493 | /// @post After this function finishes execution, assuming success, then the address range | ||
| 494 | /// [dst_address, dst_address+size) will alias the memory region, | ||
| 495 | /// [src_address, src_address+size). | ||
| 496 | /// <p> | ||
| 497 | /// What this also entails is as follows: | ||
| 498 | /// 1. The aliased region gains the Locked memory attribute. | ||
| 499 | /// 2. The aliased region becomes read-only. | ||
| 500 | /// 3. The aliasing region becomes read-only. | ||
| 501 | /// 4. The aliasing region is created with a memory state of MemoryState::CodeModule. | ||
| 502 | /// | ||
| 503 | ResultCode MapCodeMemory(VAddr dst_address, VAddr src_address, u64 size); | ||
| 504 | |||
| 505 | /// Unmaps a region of memory designated as code module memory. | ||
| 506 | /// | ||
| 507 | /// @param dst_address The base address of the memory region aliasing the source memory region. | ||
| 508 | /// @param src_address The base address of the memory region being aliased. | ||
| 509 | /// @param size The size of the memory region to unmap in bytes. | ||
| 510 | /// | ||
| 511 | /// @pre Both memory ranges lie within the actual addressable address space. | ||
| 512 | /// | ||
| 513 | /// @pre The memory region being unmapped has been previously been mapped | ||
| 514 | /// by a call to MapCodeMemory. | ||
| 515 | /// | ||
| 516 | /// @post After execution of the function, if successful. the aliasing memory region | ||
| 517 | /// will be unmapped and the aliased region will have various traits about it | ||
| 518 | /// restored to what they were prior to the original mapping call preceding | ||
| 519 | /// this function call. | ||
| 520 | /// <p> | ||
| 521 | /// What this also entails is as follows: | ||
| 522 | /// 1. The state of the memory region will now indicate a general heap region. | ||
| 523 | /// 2. All memory attributes for the memory region are cleared. | ||
| 524 | /// 3. Memory permissions for the region are restored to user read/write. | ||
| 525 | /// | ||
| 526 | ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, u64 size); | ||
| 527 | |||
| 528 | /// Queries the memory manager for information about the given address. | ||
| 529 | /// | ||
| 530 | /// @param address The address to query the memory manager about for information. | ||
| 531 | /// | ||
| 532 | /// @return A MemoryInfo instance containing information about the given address. | ||
| 533 | /// | ||
| 534 | MemoryInfo QueryMemory(VAddr address) const; | ||
| 535 | |||
| 536 | /// Sets an attribute across the given address range. | ||
| 537 | /// | ||
| 538 | /// @param address The starting address | ||
| 539 | /// @param size The size of the range to set the attribute on. | ||
| 540 | /// @param mask The attribute mask | ||
| 541 | /// @param attribute The attribute to set across the given address range | ||
| 542 | /// | ||
| 543 | /// @returns RESULT_SUCCESS if successful | ||
| 544 | /// @returns ERR_INVALID_ADDRESS_STATE if the attribute could not be set. | ||
| 545 | /// | ||
| 546 | ResultCode SetMemoryAttribute(VAddr address, u64 size, MemoryAttribute mask, | ||
| 547 | MemoryAttribute attribute); | ||
| 548 | |||
| 549 | /** | ||
| 550 | * Scans all VMAs and updates the page table range of any that use the given vector as backing | ||
| 551 | * memory. This should be called after any operation that causes reallocation of the vector. | ||
| 552 | */ | ||
| 553 | void RefreshMemoryBlockMappings(const PhysicalMemory* block); | ||
| 554 | |||
| 555 | /// Dumps the address space layout to the log, for debugging | ||
| 556 | void LogLayout() const; | ||
| 557 | |||
| 558 | /// Gets the total memory usage, used by svcGetInfo | ||
| 559 | u64 GetTotalPhysicalMemoryAvailable() const; | ||
| 560 | |||
| 561 | /// Gets the address space base address | ||
| 562 | VAddr GetAddressSpaceBaseAddress() const; | ||
| 563 | |||
| 564 | /// Gets the address space end address | ||
| 565 | VAddr GetAddressSpaceEndAddress() const; | ||
| 566 | |||
| 567 | /// Gets the total address space address size in bytes | ||
| 568 | u64 GetAddressSpaceSize() const; | ||
| 569 | |||
| 570 | /// Gets the address space width in bits. | ||
| 571 | u64 GetAddressSpaceWidth() const; | ||
| 572 | |||
| 573 | /// Determines whether or not the given address range lies within the address space. | ||
| 574 | bool IsWithinAddressSpace(VAddr address, u64 size) const; | ||
| 575 | |||
| 576 | /// Gets the base address of the ASLR region. | ||
| 577 | VAddr GetASLRRegionBaseAddress() const; | ||
| 578 | |||
| 579 | /// Gets the end address of the ASLR region. | ||
| 580 | VAddr GetASLRRegionEndAddress() const; | ||
| 581 | |||
| 582 | /// Gets the size of the ASLR region | ||
| 583 | u64 GetASLRRegionSize() const; | ||
| 584 | |||
| 585 | /// Determines whether or not the specified address range is within the ASLR region. | ||
| 586 | bool IsWithinASLRRegion(VAddr address, u64 size) const; | ||
| 587 | |||
| 588 | /// Gets the base address of the code region. | ||
| 589 | VAddr GetCodeRegionBaseAddress() const; | ||
| 590 | |||
| 591 | /// Gets the end address of the code region. | ||
| 592 | VAddr GetCodeRegionEndAddress() const; | ||
| 593 | |||
| 594 | /// Gets the total size of the code region in bytes. | ||
| 595 | u64 GetCodeRegionSize() const; | ||
| 596 | |||
| 597 | /// Determines whether or not the specified range is within the code region. | ||
| 598 | bool IsWithinCodeRegion(VAddr address, u64 size) const; | ||
| 599 | |||
| 600 | /// Gets the base address of the heap region. | ||
| 601 | VAddr GetHeapRegionBaseAddress() const; | ||
| 602 | |||
| 603 | /// Gets the end address of the heap region; | ||
| 604 | VAddr GetHeapRegionEndAddress() const; | ||
| 605 | |||
| 606 | /// Gets the total size of the heap region in bytes. | ||
| 607 | u64 GetHeapRegionSize() const; | ||
| 608 | |||
| 609 | /// Gets the total size of the current heap in bytes. | ||
| 610 | /// | ||
| 611 | /// @note This is the current allocated heap size, not the size | ||
| 612 | /// of the region it's allowed to exist within. | ||
| 613 | /// | ||
| 614 | u64 GetCurrentHeapSize() const; | ||
| 615 | |||
| 616 | /// Determines whether or not the specified range is within the heap region. | ||
| 617 | bool IsWithinHeapRegion(VAddr address, u64 size) const; | ||
| 618 | |||
| 619 | /// Gets the base address of the map region. | ||
| 620 | VAddr GetMapRegionBaseAddress() const; | ||
| 621 | |||
| 622 | /// Gets the end address of the map region. | ||
| 623 | VAddr GetMapRegionEndAddress() const; | ||
| 624 | |||
| 625 | /// Gets the total size of the map region in bytes. | ||
| 626 | u64 GetMapRegionSize() const; | ||
| 627 | |||
| 628 | /// Determines whether or not the specified range is within the map region. | ||
| 629 | bool IsWithinMapRegion(VAddr address, u64 size) const; | ||
| 630 | |||
| 631 | /// Gets the base address of the stack region. | ||
| 632 | VAddr GetStackRegionBaseAddress() const; | ||
| 633 | |||
| 634 | /// Gets the end address of the stack region. | ||
| 635 | VAddr GetStackRegionEndAddress() const; | ||
| 636 | |||
| 637 | /// Gets the total size of the stack region in bytes. | ||
| 638 | u64 GetStackRegionSize() const; | ||
| 639 | |||
| 640 | /// Determines whether or not the given address range is within the stack region | ||
| 641 | bool IsWithinStackRegion(VAddr address, u64 size) const; | ||
| 642 | |||
| 643 | /// Gets the base address of the TLS IO region. | ||
| 644 | VAddr GetTLSIORegionBaseAddress() const; | ||
| 645 | |||
| 646 | /// Gets the end address of the TLS IO region. | ||
| 647 | VAddr GetTLSIORegionEndAddress() const; | ||
| 648 | |||
| 649 | /// Gets the total size of the TLS IO region in bytes. | ||
| 650 | u64 GetTLSIORegionSize() const; | ||
| 651 | |||
| 652 | /// Determines if the given address range is within the TLS IO region. | ||
| 653 | bool IsWithinTLSIORegion(VAddr address, u64 size) const; | ||
| 654 | |||
| 655 | /// Each VMManager has its own page table, which is set as the main one when the owning process | ||
| 656 | /// is scheduled. | ||
| 657 | Common::PageTable page_table{Memory::PAGE_BITS}; | ||
| 658 | |||
| 659 | using CheckResults = ResultVal<std::tuple<MemoryState, VMAPermission, MemoryAttribute>>; | ||
| 660 | |||
| 661 | /// Checks if an address range adheres to the specified states provided. | ||
| 662 | /// | ||
| 663 | /// @param address The starting address of the address range. | ||
| 664 | /// @param size The size of the address range. | ||
| 665 | /// @param state_mask The memory state mask. | ||
| 666 | /// @param state The state to compare the individual VMA states against, | ||
| 667 | /// which is done in the form of: (vma.state & state_mask) != state. | ||
| 668 | /// @param permission_mask The memory permissions mask. | ||
| 669 | /// @param permissions The permission to compare the individual VMA permissions against, | ||
| 670 | /// which is done in the form of: | ||
| 671 | /// (vma.permission & permission_mask) != permission. | ||
| 672 | /// @param attribute_mask The memory attribute mask. | ||
| 673 | /// @param attribute The memory attributes to compare the individual VMA attributes | ||
| 674 | /// against, which is done in the form of: | ||
| 675 | /// (vma.attributes & attribute_mask) != attribute. | ||
| 676 | /// @param ignore_mask The memory attributes to ignore during the check. | ||
| 677 | /// | ||
| 678 | /// @returns If successful, returns a tuple containing the memory attributes | ||
| 679 | /// (with ignored bits specified by ignore_mask unset), memory permissions, and | ||
| 680 | /// memory state across the memory range. | ||
| 681 | /// @returns If not successful, returns ERR_INVALID_ADDRESS_STATE. | ||
| 682 | /// | ||
| 683 | CheckResults CheckRangeState(VAddr address, u64 size, MemoryState state_mask, MemoryState state, | ||
| 684 | VMAPermission permission_mask, VMAPermission permissions, | ||
| 685 | MemoryAttribute attribute_mask, MemoryAttribute attribute, | ||
| 686 | MemoryAttribute ignore_mask) const; | ||
| 687 | |||
| 688 | private: | ||
| 689 | using VMAIter = VMAMap::iterator; | ||
| 690 | |||
| 691 | /// Converts a VMAHandle to a mutable VMAIter. | ||
| 692 | VMAIter StripIterConstness(const VMAHandle& iter); | ||
| 693 | |||
| 694 | /// Unmaps the given VMA. | ||
| 695 | VMAIter Unmap(VMAIter vma); | ||
| 696 | |||
| 697 | /** | ||
| 698 | * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing | ||
| 699 | * the appropriate error checking. | ||
| 700 | */ | ||
| 701 | ResultVal<VMAIter> CarveVMA(VAddr base, u64 size); | ||
| 702 | |||
| 703 | /** | ||
| 704 | * Splits the edges of the given range of non-Free VMAs so that there is a VMA split at each | ||
| 705 | * end of the range. | ||
| 706 | */ | ||
| 707 | ResultVal<VMAIter> CarveVMARange(VAddr base, u64 size); | ||
| 708 | |||
| 709 | /** | ||
| 710 | * Splits a VMA in two, at the specified offset. | ||
| 711 | * @returns the right side of the split, with the original iterator becoming the left side. | ||
| 712 | */ | ||
| 713 | VMAIter SplitVMA(VMAIter vma, u64 offset_in_vma); | ||
| 714 | |||
| 715 | /** | ||
| 716 | * Checks for and merges the specified VMA with adjacent ones if possible. | ||
| 717 | * @returns the merged VMA or the original if no merging was possible. | ||
| 718 | */ | ||
| 719 | VMAIter MergeAdjacent(VMAIter vma); | ||
| 720 | |||
| 721 | /** | ||
| 722 | * Merges two adjacent VMAs. | ||
| 723 | */ | ||
| 724 | void MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryArea& right); | ||
| 725 | |||
| 726 | /// Updates the pages corresponding to this VMA so they match the VMA's attributes. | ||
| 727 | void UpdatePageTableForVMA(const VirtualMemoryArea& vma); | ||
| 728 | |||
| 729 | /// Initializes memory region ranges to adhere to a given address space type. | ||
| 730 | void InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType type); | ||
| 731 | |||
| 732 | /// Clears the underlying map and page table. | ||
| 733 | void Clear(); | ||
| 734 | |||
| 735 | /// Clears out the VMA map, unmapping any previously mapped ranges. | ||
| 736 | void ClearVMAMap(); | ||
| 737 | |||
| 738 | /// Clears out the page table | ||
| 739 | void ClearPageTable(); | ||
| 740 | |||
| 741 | /// Gets the amount of memory currently mapped (state != Unmapped) in a range. | ||
| 742 | ResultVal<std::size_t> SizeOfAllocatedVMAsInRange(VAddr address, std::size_t size) const; | ||
| 743 | |||
| 744 | /// Gets the amount of memory unmappable by UnmapPhysicalMemory in a range. | ||
| 745 | ResultVal<std::size_t> SizeOfUnmappablePhysicalMemoryInRange(VAddr address, | ||
| 746 | std::size_t size) const; | ||
| 747 | |||
| 748 | /** | ||
| 749 | * A map covering the entirety of the managed address space, keyed by the `base` field of each | ||
| 750 | * VMA. It must always be modified by splitting or merging VMAs, so that the invariant | ||
| 751 | * `elem.base + elem.size == next.base` is preserved, and mergeable regions must always be | ||
| 752 | * merged when possible so that no two similar and adjacent regions exist that have not been | ||
| 753 | * merged. | ||
| 754 | */ | ||
| 755 | VMAMap vma_map; | ||
| 756 | |||
| 757 | u32 address_space_width = 0; | ||
| 758 | VAddr address_space_base = 0; | ||
| 759 | VAddr address_space_end = 0; | ||
| 760 | |||
| 761 | VAddr aslr_region_base = 0; | ||
| 762 | VAddr aslr_region_end = 0; | ||
| 763 | |||
| 764 | VAddr code_region_base = 0; | ||
| 765 | VAddr code_region_end = 0; | ||
| 766 | |||
| 767 | VAddr heap_region_base = 0; | ||
| 768 | VAddr heap_region_end = 0; | ||
| 769 | |||
| 770 | VAddr map_region_base = 0; | ||
| 771 | VAddr map_region_end = 0; | ||
| 772 | |||
| 773 | VAddr stack_region_base = 0; | ||
| 774 | VAddr stack_region_end = 0; | ||
| 775 | |||
| 776 | VAddr tls_io_region_base = 0; | ||
| 777 | VAddr tls_io_region_end = 0; | ||
| 778 | |||
| 779 | // Memory used to back the allocations in the regular heap. A single vector is used to cover | ||
| 780 | // the entire virtual address space extents that bound the allocations, including any holes. | ||
| 781 | // This makes deallocation and reallocation of holes fast and keeps process memory contiguous | ||
| 782 | // in the emulator address space, allowing Memory::GetPointer to be reasonably safe. | ||
| 783 | std::shared_ptr<PhysicalMemory> heap_memory; | ||
| 784 | |||
| 785 | // The end of the currently allocated heap. This is not an inclusive | ||
| 786 | // end of the range. This is essentially 'base_address + current_size'. | ||
| 787 | VAddr heap_end = 0; | ||
| 788 | |||
| 789 | // The current amount of memory mapped via MapPhysicalMemory. | ||
| 790 | // This is used here (and in Nintendo's kernel) only for debugging, and does not impact | ||
| 791 | // any behavior. | ||
| 792 | u64 physical_memory_mapped = 0; | ||
| 793 | |||
| 794 | Core::System& system; | ||
| 795 | }; | ||
| 796 | } // namespace Kernel | ||