diff options
| -rw-r--r-- | src/core/hle/kernel/vm_manager.cpp | 72 | ||||
| -rw-r--r-- | src/core/hle/kernel/vm_manager.h | 8 |
2 files changed, 41 insertions, 39 deletions
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 40cea1e7c..c7af87073 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp | |||
| @@ -296,12 +296,6 @@ ResultVal<VAddr> VMManager::SetHeapSize(u64 size) { | |||
| 296 | } | 296 | } |
| 297 | 297 | ||
| 298 | ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) { | 298 | ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) { |
| 299 | const auto end_addr = target + size; | ||
| 300 | const auto last_addr = end_addr - 1; | ||
| 301 | VAddr cur_addr = target; | ||
| 302 | |||
| 303 | ResultCode result = RESULT_SUCCESS; | ||
| 304 | |||
| 305 | // Check how much memory we've already mapped. | 299 | // Check how much memory we've already mapped. |
| 306 | const auto mapped_size_result = SizeOfAllocatedVMAsInRange(target, size); | 300 | const auto mapped_size_result = SizeOfAllocatedVMAsInRange(target, size); |
| 307 | if (mapped_size_result.Failed()) { | 301 | if (mapped_size_result.Failed()) { |
| @@ -324,13 +318,16 @@ ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) { | |||
| 324 | 318 | ||
| 325 | // Keep track of the memory regions we unmap. | 319 | // Keep track of the memory regions we unmap. |
| 326 | std::vector<std::pair<u64, u64>> mapped_regions; | 320 | std::vector<std::pair<u64, u64>> mapped_regions; |
| 321 | ResultCode result = RESULT_SUCCESS; | ||
| 327 | 322 | ||
| 328 | // Iterate, trying to map memory. | 323 | // Iterate, trying to map memory. |
| 329 | { | 324 | { |
| 330 | cur_addr = target; | 325 | const auto end_addr = target + size; |
| 326 | const auto last_addr = end_addr - 1; | ||
| 327 | VAddr cur_addr = target; | ||
| 331 | 328 | ||
| 332 | auto iter = FindVMA(target); | 329 | auto iter = FindVMA(target); |
| 333 | ASSERT_MSG(iter != vma_map.end(), "MapPhysicalMemory iter != end"); | 330 | ASSERT(iter != vma_map.end()); |
| 334 | 331 | ||
| 335 | while (true) { | 332 | while (true) { |
| 336 | const auto& vma = iter->second; | 333 | const auto& vma = iter->second; |
| @@ -342,7 +339,7 @@ ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) { | |||
| 342 | const auto map_size = std::min(end_addr - cur_addr, vma_end - cur_addr); | 339 | const auto map_size = std::min(end_addr - cur_addr, vma_end - cur_addr); |
| 343 | if (vma.state == MemoryState::Unmapped) { | 340 | if (vma.state == MemoryState::Unmapped) { |
| 344 | const auto map_res = | 341 | const auto map_res = |
| 345 | MapMemoryBlock(cur_addr, std::make_shared<PhysicalMemory>(map_size, 0), 0, | 342 | MapMemoryBlock(cur_addr, std::make_shared<PhysicalMemory>(map_size), 0, |
| 346 | map_size, MemoryState::Heap, VMAPermission::ReadWrite); | 343 | map_size, MemoryState::Heap, VMAPermission::ReadWrite); |
| 347 | result = map_res.Code(); | 344 | result = map_res.Code(); |
| 348 | if (result.IsError()) { | 345 | if (result.IsError()) { |
| @@ -360,7 +357,7 @@ ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) { | |||
| 360 | // Advance to the next block. | 357 | // Advance to the next block. |
| 361 | cur_addr = vma_end; | 358 | cur_addr = vma_end; |
| 362 | iter = FindVMA(cur_addr); | 359 | iter = FindVMA(cur_addr); |
| 363 | ASSERT_MSG(iter != vma_map.end(), "MapPhysicalMemory iter != end"); | 360 | ASSERT(iter != vma_map.end()); |
| 364 | } | 361 | } |
| 365 | } | 362 | } |
| 366 | 363 | ||
| @@ -368,7 +365,7 @@ ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) { | |||
| 368 | if (result.IsError()) { | 365 | if (result.IsError()) { |
| 369 | for (const auto [unmap_address, unmap_size] : mapped_regions) { | 366 | for (const auto [unmap_address, unmap_size] : mapped_regions) { |
| 370 | ASSERT_MSG(UnmapRange(unmap_address, unmap_size).IsSuccess(), | 367 | ASSERT_MSG(UnmapRange(unmap_address, unmap_size).IsSuccess(), |
| 371 | "MapPhysicalMemory un-map on error"); | 368 | "Failed to unmap memory range."); |
| 372 | } | 369 | } |
| 373 | 370 | ||
| 374 | return result; | 371 | return result; |
| @@ -381,12 +378,6 @@ ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) { | |||
| 381 | } | 378 | } |
| 382 | 379 | ||
| 383 | ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) { | 380 | ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) { |
| 384 | const auto end_addr = target + size; | ||
| 385 | const auto last_addr = end_addr - 1; | ||
| 386 | VAddr cur_addr = target; | ||
| 387 | |||
| 388 | ResultCode result = RESULT_SUCCESS; | ||
| 389 | |||
| 390 | // Check how much memory is currently mapped. | 381 | // Check how much memory is currently mapped. |
| 391 | const auto mapped_size_result = SizeOfUnmappablePhysicalMemoryInRange(target, size); | 382 | const auto mapped_size_result = SizeOfUnmappablePhysicalMemoryInRange(target, size); |
| 392 | if (mapped_size_result.Failed()) { | 383 | if (mapped_size_result.Failed()) { |
| @@ -401,13 +392,16 @@ ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) { | |||
| 401 | 392 | ||
| 402 | // Keep track of the memory regions we unmap. | 393 | // Keep track of the memory regions we unmap. |
| 403 | std::vector<std::pair<u64, u64>> unmapped_regions; | 394 | std::vector<std::pair<u64, u64>> unmapped_regions; |
| 395 | ResultCode result = RESULT_SUCCESS; | ||
| 404 | 396 | ||
| 405 | // Try to unmap regions. | 397 | // Try to unmap regions. |
| 406 | { | 398 | { |
| 407 | cur_addr = target; | 399 | const auto end_addr = target + size; |
| 400 | const auto last_addr = end_addr - 1; | ||
| 401 | VAddr cur_addr = target; | ||
| 408 | 402 | ||
| 409 | auto iter = FindVMA(target); | 403 | auto iter = FindVMA(target); |
| 410 | ASSERT_MSG(iter != vma_map.end(), "UnmapPhysicalMemory iter != end"); | 404 | ASSERT(iter != vma_map.end()); |
| 411 | 405 | ||
| 412 | while (true) { | 406 | while (true) { |
| 413 | const auto& vma = iter->second; | 407 | const auto& vma = iter->second; |
| @@ -434,7 +428,7 @@ ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) { | |||
| 434 | // Advance to the next block. | 428 | // Advance to the next block. |
| 435 | cur_addr = vma_end; | 429 | cur_addr = vma_end; |
| 436 | iter = FindVMA(cur_addr); | 430 | iter = FindVMA(cur_addr); |
| 437 | ASSERT_MSG(iter != vma_map.end(), "UnmapPhysicalMemory iter != end"); | 431 | ASSERT(iter != vma_map.end()); |
| 438 | } | 432 | } |
| 439 | } | 433 | } |
| 440 | 434 | ||
| @@ -443,10 +437,12 @@ ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) { | |||
| 443 | if (result.IsError()) { | 437 | if (result.IsError()) { |
| 444 | for (const auto [map_address, map_size] : unmapped_regions) { | 438 | for (const auto [map_address, map_size] : unmapped_regions) { |
| 445 | const auto remap_res = | 439 | const auto remap_res = |
| 446 | MapMemoryBlock(map_address, std::make_shared<PhysicalMemory>(map_size, 0), 0, | 440 | MapMemoryBlock(map_address, std::make_shared<PhysicalMemory>(map_size), 0, map_size, |
| 447 | map_size, MemoryState::Heap, VMAPermission::None); | 441 | MemoryState::Heap, VMAPermission::None); |
| 448 | ASSERT_MSG(remap_res.Succeeded(), "UnmapPhysicalMemory re-map on error"); | 442 | ASSERT_MSG(remap_res.Succeeded(), "Failed to remap a memory block."); |
| 449 | } | 443 | } |
| 444 | |||
| 445 | return result; | ||
| 450 | } | 446 | } |
| 451 | 447 | ||
| 452 | // Update mapped amount | 448 | // Update mapped amount |
| @@ -757,20 +753,26 @@ void VMManager::MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryAre | |||
| 757 | // Always merge allocated memory blocks, even when they don't share the same backing block. | 753 | // Always merge allocated memory blocks, even when they don't share the same backing block. |
| 758 | if (left.type == VMAType::AllocatedMemoryBlock && | 754 | if (left.type == VMAType::AllocatedMemoryBlock && |
| 759 | (left.backing_block != right.backing_block || left.offset + left.size != right.offset)) { | 755 | (left.backing_block != right.backing_block || left.offset + left.size != right.offset)) { |
| 756 | const auto right_begin = right.backing_block->begin() + right.offset; | ||
| 757 | const auto right_end = right_begin + right.size; | ||
| 758 | |||
| 760 | // Check if we can save work. | 759 | // Check if we can save work. |
| 761 | if (left.offset == 0 && left.size == left.backing_block->size()) { | 760 | if (left.offset == 0 && left.size == left.backing_block->size()) { |
| 762 | // Fast case: left is an entire backing block. | 761 | // Fast case: left is an entire backing block. |
| 763 | left.backing_block->insert(left.backing_block->end(), | 762 | left.backing_block->insert(left.backing_block->end(), right_begin, right_end); |
| 764 | right.backing_block->begin() + right.offset, | ||
| 765 | right.backing_block->begin() + right.offset + right.size); | ||
| 766 | } else { | 763 | } else { |
| 767 | // Slow case: make a new memory block for left and right. | 764 | // Slow case: make a new memory block for left and right. |
| 765 | const auto left_begin = left.backing_block->begin() + left.offset; | ||
| 766 | const auto left_end = left_begin + left.size; | ||
| 767 | const auto left_size = static_cast<std::size_t>(std::distance(left_begin, left_end)); | ||
| 768 | const auto right_size = static_cast<std::size_t>(std::distance(right_begin, right_end)); | ||
| 769 | |||
| 768 | auto new_memory = std::make_shared<PhysicalMemory>(); | 770 | auto new_memory = std::make_shared<PhysicalMemory>(); |
| 769 | new_memory->insert(new_memory->end(), left.backing_block->begin() + left.offset, | 771 | new_memory->reserve(left_size + right_size); |
| 770 | left.backing_block->begin() + left.offset + left.size); | 772 | new_memory->insert(new_memory->end(), left_begin, left_end); |
| 771 | new_memory->insert(new_memory->end(), right.backing_block->begin() + right.offset, | 773 | new_memory->insert(new_memory->end(), right_begin, right_end); |
| 772 | right.backing_block->begin() + right.offset + right.size); | 774 | |
| 773 | left.backing_block = new_memory; | 775 | left.backing_block = std::move(new_memory); |
| 774 | left.offset = 0; | 776 | left.offset = 0; |
| 775 | } | 777 | } |
| 776 | 778 | ||
| @@ -965,7 +967,7 @@ ResultVal<std::size_t> VMManager::SizeOfAllocatedVMAsInRange(VAddr address, | |||
| 965 | 967 | ||
| 966 | VAddr cur_addr = address; | 968 | VAddr cur_addr = address; |
| 967 | auto iter = FindVMA(cur_addr); | 969 | auto iter = FindVMA(cur_addr); |
| 968 | ASSERT_MSG(iter != vma_map.end(), "SizeOfAllocatedVMAsInRange iter != end"); | 970 | ASSERT(iter != vma_map.end()); |
| 969 | 971 | ||
| 970 | while (true) { | 972 | while (true) { |
| 971 | const auto& vma = iter->second; | 973 | const auto& vma = iter->second; |
| @@ -986,7 +988,7 @@ ResultVal<std::size_t> VMManager::SizeOfAllocatedVMAsInRange(VAddr address, | |||
| 986 | // Advance to the next block. | 988 | // Advance to the next block. |
| 987 | cur_addr = vma_end; | 989 | cur_addr = vma_end; |
| 988 | iter = std::next(iter); | 990 | iter = std::next(iter); |
| 989 | ASSERT_MSG(iter != vma_map.end(), "SizeOfAllocatedVMAsInRange iter != end"); | 991 | ASSERT(iter != vma_map.end()); |
| 990 | } | 992 | } |
| 991 | 993 | ||
| 992 | return MakeResult(mapped_size); | 994 | return MakeResult(mapped_size); |
| @@ -1000,7 +1002,7 @@ ResultVal<std::size_t> VMManager::SizeOfUnmappablePhysicalMemoryInRange(VAddr ad | |||
| 1000 | 1002 | ||
| 1001 | VAddr cur_addr = address; | 1003 | VAddr cur_addr = address; |
| 1002 | auto iter = FindVMA(cur_addr); | 1004 | auto iter = FindVMA(cur_addr); |
| 1003 | ASSERT_MSG(iter != vma_map.end(), "SizeOfUnmappablePhysicalMemoryInRange iter != end"); | 1005 | ASSERT(iter != vma_map.end()); |
| 1004 | 1006 | ||
| 1005 | while (true) { | 1007 | while (true) { |
| 1006 | const auto& vma = iter->second; | 1008 | const auto& vma = iter->second; |
| @@ -1029,7 +1031,7 @@ ResultVal<std::size_t> VMManager::SizeOfUnmappablePhysicalMemoryInRange(VAddr ad | |||
| 1029 | // Advance to the next block. | 1031 | // Advance to the next block. |
| 1030 | cur_addr = vma_end; | 1032 | cur_addr = vma_end; |
| 1031 | iter = std::next(iter); | 1033 | iter = std::next(iter); |
| 1032 | ASSERT_MSG(iter != vma_map.end(), "SizeOfUnmappablePhysicalMemoryInRange iter != end"); | 1034 | ASSERT(iter != vma_map.end()); |
| 1033 | } | 1035 | } |
| 1034 | 1036 | ||
| 1035 | return MakeResult(mapped_size); | 1037 | return MakeResult(mapped_size); |
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index b18cde619..850a7ebc3 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h | |||
| @@ -454,8 +454,8 @@ public: | |||
| 454 | 454 | ||
| 455 | /// Maps memory at a given address. | 455 | /// Maps memory at a given address. |
| 456 | /// | 456 | /// |
| 457 | /// @param addr The virtual address to map memory at. | 457 | /// @param target The virtual address to map memory at. |
| 458 | /// @param size The amount of memory to map. | 458 | /// @param size The amount of memory to map. |
| 459 | /// | 459 | /// |
| 460 | /// @note The destination address must lie within the Map region. | 460 | /// @note The destination address must lie within the Map region. |
| 461 | /// | 461 | /// |
| @@ -468,8 +468,8 @@ public: | |||
| 468 | 468 | ||
| 469 | /// Unmaps memory at a given address. | 469 | /// Unmaps memory at a given address. |
| 470 | /// | 470 | /// |
| 471 | /// @param addr The virtual address to unmap memory at. | 471 | /// @param target The virtual address to unmap memory at. |
| 472 | /// @param size The amount of memory to unmap. | 472 | /// @param size The amount of memory to unmap. |
| 473 | /// | 473 | /// |
| 474 | /// @note The destination address must lie within the Map region. | 474 | /// @note The destination address must lie within the Map region. |
| 475 | /// | 475 | /// |