diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/common/page_table.cpp | 58 | ||||
| -rw-r--r-- | src/common/page_table.h | 24 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_table.cpp | 560 |
3 files changed, 508 insertions, 134 deletions
diff --git a/src/common/page_table.cpp b/src/common/page_table.cpp index 9fffd816f..4817b09f9 100644 --- a/src/common/page_table.cpp +++ b/src/common/page_table.cpp | |||
| @@ -10,11 +10,65 @@ PageTable::PageTable() = default; | |||
| 10 | 10 | ||
| 11 | PageTable::~PageTable() noexcept = default; | 11 | PageTable::~PageTable() noexcept = default; |
| 12 | 12 | ||
| 13 | void PageTable::Resize(size_t address_space_width_in_bits, size_t page_size_in_bits) { | 13 | bool PageTable::BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context, |
| 14 | const size_t num_page_table_entries{1ULL << (address_space_width_in_bits - page_size_in_bits)}; | 14 | u64 address) const { |
| 15 | // Setup invalid defaults. | ||
| 16 | out_entry.phys_addr = 0; | ||
| 17 | out_entry.block_size = page_size; | ||
| 18 | out_context.next_page = 0; | ||
| 19 | |||
| 20 | // Validate that we can read the actual entry. | ||
| 21 | const auto page = address / page_size; | ||
| 22 | if (page >= backing_addr.size()) { | ||
| 23 | return false; | ||
| 24 | } | ||
| 25 | |||
| 26 | // Validate that the entry is mapped. | ||
| 27 | const auto phys_addr = backing_addr[page]; | ||
| 28 | if (phys_addr == 0) { | ||
| 29 | return false; | ||
| 30 | } | ||
| 31 | |||
| 32 | // Populate the results. | ||
| 33 | out_entry.phys_addr = phys_addr + address; | ||
| 34 | out_context.next_page = page + 1; | ||
| 35 | out_context.next_offset = address + page_size; | ||
| 36 | |||
| 37 | return true; | ||
| 38 | } | ||
| 39 | |||
| 40 | bool PageTable::ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const { | ||
| 41 | // Setup invalid defaults. | ||
| 42 | out_entry.phys_addr = 0; | ||
| 43 | out_entry.block_size = page_size; | ||
| 44 | |||
| 45 | // Validate that we can read the actual entry. | ||
| 46 | const auto page = context.next_page; | ||
| 47 | if (page >= backing_addr.size()) { | ||
| 48 | return false; | ||
| 49 | } | ||
| 50 | |||
| 51 | // Validate that the entry is mapped. | ||
| 52 | const auto phys_addr = backing_addr[page]; | ||
| 53 | if (phys_addr == 0) { | ||
| 54 | return false; | ||
| 55 | } | ||
| 56 | |||
| 57 | // Populate the results. | ||
| 58 | out_entry.phys_addr = phys_addr + context.next_offset; | ||
| 59 | context.next_page = page + 1; | ||
| 60 | context.next_offset += page_size; | ||
| 61 | |||
| 62 | return true; | ||
| 63 | } | ||
| 64 | |||
| 65 | void PageTable::Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits) { | ||
| 66 | const std::size_t num_page_table_entries{1ULL | ||
| 67 | << (address_space_width_in_bits - page_size_in_bits)}; | ||
| 15 | pointers.resize(num_page_table_entries); | 68 | pointers.resize(num_page_table_entries); |
| 16 | backing_addr.resize(num_page_table_entries); | 69 | backing_addr.resize(num_page_table_entries); |
| 17 | current_address_space_width_in_bits = address_space_width_in_bits; | 70 | current_address_space_width_in_bits = address_space_width_in_bits; |
| 71 | page_size = 1ULL << page_size_in_bits; | ||
| 18 | } | 72 | } |
| 19 | 73 | ||
| 20 | } // namespace Common | 74 | } // namespace Common |
diff --git a/src/common/page_table.h b/src/common/page_table.h index 8267e8b4d..82d91e9f3 100644 --- a/src/common/page_table.h +++ b/src/common/page_table.h | |||
| @@ -27,6 +27,16 @@ enum class PageType : u8 { | |||
| 27 | * mimics the way a real CPU page table works. | 27 | * mimics the way a real CPU page table works. |
| 28 | */ | 28 | */ |
| 29 | struct PageTable { | 29 | struct PageTable { |
| 30 | struct TraversalEntry { | ||
| 31 | u64 phys_addr{}; | ||
| 32 | std::size_t block_size{}; | ||
| 33 | }; | ||
| 34 | |||
| 35 | struct TraversalContext { | ||
| 36 | u64 next_page{}; | ||
| 37 | u64 next_offset{}; | ||
| 38 | }; | ||
| 39 | |||
| 30 | /// Number of bits reserved for attribute tagging. | 40 | /// Number of bits reserved for attribute tagging. |
| 31 | /// This can be at most the guaranteed alignment of the pointers in the page table. | 41 | /// This can be at most the guaranteed alignment of the pointers in the page table. |
| 32 | static constexpr int ATTRIBUTE_BITS = 2; | 42 | static constexpr int ATTRIBUTE_BITS = 2; |
| @@ -89,6 +99,10 @@ struct PageTable { | |||
| 89 | PageTable(PageTable&&) noexcept = default; | 99 | PageTable(PageTable&&) noexcept = default; |
| 90 | PageTable& operator=(PageTable&&) noexcept = default; | 100 | PageTable& operator=(PageTable&&) noexcept = default; |
| 91 | 101 | ||
| 102 | bool BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context, | ||
| 103 | u64 address) const; | ||
| 104 | bool ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const; | ||
| 105 | |||
| 92 | /** | 106 | /** |
| 93 | * Resizes the page table to be able to accommodate enough pages within | 107 | * Resizes the page table to be able to accommodate enough pages within |
| 94 | * a given address space. | 108 | * a given address space. |
| @@ -96,9 +110,9 @@ struct PageTable { | |||
| 96 | * @param address_space_width_in_bits The address size width in bits. | 110 | * @param address_space_width_in_bits The address size width in bits. |
| 97 | * @param page_size_in_bits The page size in bits. | 111 | * @param page_size_in_bits The page size in bits. |
| 98 | */ | 112 | */ |
| 99 | void Resize(size_t address_space_width_in_bits, size_t page_size_in_bits); | 113 | void Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits); |
| 100 | 114 | ||
| 101 | size_t GetAddressSpaceBits() const { | 115 | std::size_t GetAddressSpaceBits() const { |
| 102 | return current_address_space_width_in_bits; | 116 | return current_address_space_width_in_bits; |
| 103 | } | 117 | } |
| 104 | 118 | ||
| @@ -110,9 +124,11 @@ struct PageTable { | |||
| 110 | 124 | ||
| 111 | VirtualBuffer<u64> backing_addr; | 125 | VirtualBuffer<u64> backing_addr; |
| 112 | 126 | ||
| 113 | size_t current_address_space_width_in_bits; | 127 | std::size_t current_address_space_width_in_bits{}; |
| 128 | |||
| 129 | u8* fastmem_arena{}; | ||
| 114 | 130 | ||
| 115 | u8* fastmem_arena; | 131 | std::size_t page_size{}; |
| 116 | }; | 132 | }; |
| 117 | 133 | ||
| 118 | } // namespace Common | 134 | } // namespace Common |
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 912853e5c..88aa2a152 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp | |||
| @@ -41,24 +41,6 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT | |||
| 41 | } | 41 | } |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | constexpr u64 GetAddressInRange(const KMemoryInfo& info, VAddr addr) { | ||
| 45 | if (info.GetAddress() < addr) { | ||
| 46 | return addr; | ||
| 47 | } | ||
| 48 | return info.GetAddress(); | ||
| 49 | } | ||
| 50 | |||
| 51 | constexpr std::size_t GetSizeInRange(const KMemoryInfo& info, VAddr start, VAddr end) { | ||
| 52 | std::size_t size{info.GetSize()}; | ||
| 53 | if (info.GetAddress() < start) { | ||
| 54 | size -= start - info.GetAddress(); | ||
| 55 | } | ||
| 56 | if (info.GetEndAddress() > end) { | ||
| 57 | size -= info.GetEndAddress() - end; | ||
| 58 | } | ||
| 59 | return size; | ||
| 60 | } | ||
| 61 | |||
| 62 | } // namespace | 44 | } // namespace |
| 63 | 45 | ||
| 64 | KPageTable::KPageTable(Core::System& system_) | 46 | KPageTable::KPageTable(Core::System& system_) |
| @@ -400,148 +382,471 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, | |||
| 400 | return ResultSuccess; | 382 | return ResultSuccess; |
| 401 | } | 383 | } |
| 402 | 384 | ||
| 403 | ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) { | 385 | ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { |
| 404 | // Lock the physical memory lock. | 386 | // Lock the physical memory lock. |
| 405 | KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); | 387 | KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); |
| 406 | 388 | ||
| 407 | // Lock the table. | 389 | // Calculate the last address for convenience. |
| 408 | KScopedLightLock lk(general_lock); | 390 | const VAddr last_address = address + size - 1; |
| 409 | |||
| 410 | std::size_t mapped_size{}; | ||
| 411 | const VAddr end_addr{addr + size}; | ||
| 412 | 391 | ||
| 413 | block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { | 392 | // Define iteration variables. |
| 414 | if (info.state != KMemoryState::Free) { | 393 | VAddr cur_address; |
| 415 | mapped_size += GetSizeInRange(info, addr, end_addr); | 394 | std::size_t mapped_size; |
| 416 | } | ||
| 417 | }); | ||
| 418 | 395 | ||
| 419 | if (mapped_size == size) { | 396 | // The entire mapping process can be retried. |
| 420 | return ResultSuccess; | 397 | while (true) { |
| 421 | } | 398 | // Check if the memory is already mapped. |
| 399 | { | ||
| 400 | // Lock the table. | ||
| 401 | KScopedLightLock lk(general_lock); | ||
| 402 | |||
| 403 | // Iterate over the memory. | ||
| 404 | cur_address = address; | ||
| 405 | mapped_size = 0; | ||
| 406 | |||
| 407 | auto it = block_manager->FindIterator(cur_address); | ||
| 408 | while (true) { | ||
| 409 | // Check that the iterator is valid. | ||
| 410 | ASSERT(it != block_manager->end()); | ||
| 411 | |||
| 412 | // Get the memory info. | ||
| 413 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 414 | |||
| 415 | // Check if we're done. | ||
| 416 | if (last_address <= info.GetLastAddress()) { | ||
| 417 | if (info.GetState() != KMemoryState::Free) { | ||
| 418 | mapped_size += (last_address + 1 - cur_address); | ||
| 419 | } | ||
| 420 | break; | ||
| 421 | } | ||
| 422 | |||
| 423 | // Track the memory if it's mapped. | ||
| 424 | if (info.GetState() != KMemoryState::Free) { | ||
| 425 | mapped_size += VAddr(info.GetEndAddress()) - cur_address; | ||
| 426 | } | ||
| 427 | |||
| 428 | // Advance. | ||
| 429 | cur_address = info.GetEndAddress(); | ||
| 430 | ++it; | ||
| 431 | } | ||
| 422 | 432 | ||
| 423 | const std::size_t remaining_size{size - mapped_size}; | 433 | // If the size mapped is the size requested, we've nothing to do. |
| 424 | const std::size_t remaining_pages{remaining_size / PageSize}; | 434 | R_SUCCEED_IF(size == mapped_size); |
| 435 | } | ||
| 425 | 436 | ||
| 426 | // Reserve the memory from the process resource limit. | 437 | // Allocate and map the memory. |
| 427 | KScopedResourceReservation memory_reservation( | 438 | { |
| 428 | system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, | 439 | // Reserve the memory from the process resource limit. |
| 429 | remaining_size); | 440 | KScopedResourceReservation memory_reservation( |
| 430 | if (!memory_reservation.Succeeded()) { | 441 | system.Kernel().CurrentProcess()->GetResourceLimit(), |
| 431 | LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size); | 442 | LimitableResource::PhysicalMemory, size - mapped_size); |
| 432 | return ResultLimitReached; | 443 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 444 | |||
| 445 | // Allocate pages for the new memory. | ||
| 446 | KPageLinkedList page_linked_list; | ||
| 447 | R_TRY(system.Kernel().MemoryManager().Allocate( | ||
| 448 | page_linked_list, (size - mapped_size) / PageSize, memory_pool, allocation_option)); | ||
| 449 | |||
| 450 | // Map the memory. | ||
| 451 | { | ||
| 452 | // Lock the table. | ||
| 453 | KScopedLightLock lk(general_lock); | ||
| 454 | |||
| 455 | size_t num_allocator_blocks = 0; | ||
| 456 | |||
| 457 | // Verify that nobody has mapped memory since we first checked. | ||
| 458 | { | ||
| 459 | // Iterate over the memory. | ||
| 460 | size_t checked_mapped_size = 0; | ||
| 461 | cur_address = address; | ||
| 462 | |||
| 463 | auto it = block_manager->FindIterator(cur_address); | ||
| 464 | while (true) { | ||
| 465 | // Check that the iterator is valid. | ||
| 466 | ASSERT(it != block_manager->end()); | ||
| 467 | |||
| 468 | // Get the memory info. | ||
| 469 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 470 | |||
| 471 | const bool is_free = info.GetState() == KMemoryState::Free; | ||
| 472 | if (is_free) { | ||
| 473 | if (info.GetAddress() < address) { | ||
| 474 | ++num_allocator_blocks; | ||
| 475 | } | ||
| 476 | if (last_address < info.GetLastAddress()) { | ||
| 477 | ++num_allocator_blocks; | ||
| 478 | } | ||
| 479 | } | ||
| 480 | |||
| 481 | // Check if we're done. | ||
| 482 | if (last_address <= info.GetLastAddress()) { | ||
| 483 | if (!is_free) { | ||
| 484 | checked_mapped_size += (last_address + 1 - cur_address); | ||
| 485 | } | ||
| 486 | break; | ||
| 487 | } | ||
| 488 | |||
| 489 | // Track the memory if it's mapped. | ||
| 490 | if (!is_free) { | ||
| 491 | checked_mapped_size += VAddr(info.GetEndAddress()) - cur_address; | ||
| 492 | } | ||
| 493 | |||
| 494 | // Advance. | ||
| 495 | cur_address = info.GetEndAddress(); | ||
| 496 | ++it; | ||
| 497 | } | ||
| 498 | |||
| 499 | // If the size now isn't what it was before, somebody mapped or unmapped | ||
| 500 | // concurrently. If this happened, retry. | ||
| 501 | if (mapped_size != checked_mapped_size) { | ||
| 502 | continue; | ||
| 503 | } | ||
| 504 | } | ||
| 505 | |||
| 506 | // Reset the current tracking address, and make sure we clean up on failure. | ||
| 507 | cur_address = address; | ||
| 508 | auto unmap_guard = detail::ScopeExit([&] { | ||
| 509 | if (cur_address > address) { | ||
| 510 | const VAddr last_unmap_address = cur_address - 1; | ||
| 511 | |||
| 512 | // Iterate, unmapping the pages. | ||
| 513 | cur_address = address; | ||
| 514 | |||
| 515 | auto it = block_manager->FindIterator(cur_address); | ||
| 516 | while (true) { | ||
| 517 | // Check that the iterator is valid. | ||
| 518 | ASSERT(it != block_manager->end()); | ||
| 519 | |||
| 520 | // Get the memory info. | ||
| 521 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 522 | |||
| 523 | // If the memory state is free, we mapped it and need to unmap it. | ||
| 524 | if (info.GetState() == KMemoryState::Free) { | ||
| 525 | // Determine the range to unmap. | ||
| 526 | const size_t cur_pages = | ||
| 527 | std::min(VAddr(info.GetEndAddress()) - cur_address, | ||
| 528 | last_unmap_address + 1 - cur_address) / | ||
| 529 | PageSize; | ||
| 530 | |||
| 531 | // Unmap. | ||
| 532 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, | ||
| 533 | OperationType::Unmap) | ||
| 534 | .IsSuccess()); | ||
| 535 | } | ||
| 536 | |||
| 537 | // Check if we're done. | ||
| 538 | if (last_unmap_address <= info.GetLastAddress()) { | ||
| 539 | break; | ||
| 540 | } | ||
| 541 | |||
| 542 | // Advance. | ||
| 543 | cur_address = info.GetEndAddress(); | ||
| 544 | ++it; | ||
| 545 | } | ||
| 546 | } | ||
| 547 | }); | ||
| 548 | |||
| 549 | // Iterate over the memory. | ||
| 550 | auto pg_it = page_linked_list.Nodes().begin(); | ||
| 551 | PAddr pg_phys_addr = pg_it->GetAddress(); | ||
| 552 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 553 | |||
| 554 | auto it = block_manager->FindIterator(cur_address); | ||
| 555 | while (true) { | ||
| 556 | // Check that the iterator is valid. | ||
| 557 | ASSERT(it != block_manager->end()); | ||
| 558 | |||
| 559 | // Get the memory info. | ||
| 560 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 561 | |||
| 562 | // If it's unmapped, we need to map it. | ||
| 563 | if (info.GetState() == KMemoryState::Free) { | ||
| 564 | // Determine the range to map. | ||
| 565 | size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address, | ||
| 566 | last_address + 1 - cur_address) / | ||
| 567 | PageSize; | ||
| 568 | |||
| 569 | // While we have pages to map, map them. | ||
| 570 | while (map_pages > 0) { | ||
| 571 | // Check if we're at the end of the physical block. | ||
| 572 | if (pg_pages == 0) { | ||
| 573 | // Ensure there are more pages to map. | ||
| 574 | ASSERT(pg_it != page_linked_list.Nodes().end()); | ||
| 575 | |||
| 576 | // Advance our physical block. | ||
| 577 | ++pg_it; | ||
| 578 | pg_phys_addr = pg_it->GetAddress(); | ||
| 579 | pg_pages = pg_it->GetNumPages(); | ||
| 580 | } | ||
| 581 | |||
| 582 | // Map whatever we can. | ||
| 583 | const size_t cur_pages = std::min(pg_pages, map_pages); | ||
| 584 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, | ||
| 585 | OperationType::Map, pg_phys_addr)); | ||
| 586 | |||
| 587 | // Advance. | ||
| 588 | cur_address += cur_pages * PageSize; | ||
| 589 | map_pages -= cur_pages; | ||
| 590 | |||
| 591 | pg_phys_addr += cur_pages * PageSize; | ||
| 592 | pg_pages -= cur_pages; | ||
| 593 | } | ||
| 594 | } | ||
| 595 | |||
| 596 | // Check if we're done. | ||
| 597 | if (last_address <= info.GetLastAddress()) { | ||
| 598 | break; | ||
| 599 | } | ||
| 600 | |||
| 601 | // Advance. | ||
| 602 | cur_address = info.GetEndAddress(); | ||
| 603 | ++it; | ||
| 604 | } | ||
| 605 | |||
| 606 | // We succeeded, so commit the memory reservation. | ||
| 607 | memory_reservation.Commit(); | ||
| 608 | |||
| 609 | // Increase our tracked mapped size. | ||
| 610 | mapped_physical_memory_size += (size - mapped_size); | ||
| 611 | |||
| 612 | // Update the relevant memory blocks. | ||
| 613 | block_manager->Update(address, size / PageSize, KMemoryState::Free, | ||
| 614 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 615 | KMemoryState::Normal, KMemoryPermission::UserReadWrite, | ||
| 616 | KMemoryAttribute::None); | ||
| 617 | |||
| 618 | // Cancel our guard. | ||
| 619 | unmap_guard.Cancel(); | ||
| 620 | |||
| 621 | return ResultSuccess; | ||
| 622 | } | ||
| 623 | } | ||
| 433 | } | 624 | } |
| 625 | } | ||
| 434 | 626 | ||
| 435 | KPageLinkedList page_linked_list; | 627 | ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { |
| 628 | // Lock the physical memory lock. | ||
| 629 | KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); | ||
| 436 | 630 | ||
| 437 | CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages, | 631 | // Lock the table. |
| 438 | memory_pool, allocation_option)); | 632 | KScopedLightLock lk(general_lock); |
| 439 | 633 | ||
| 440 | // We succeeded, so commit the memory reservation. | 634 | // Calculate the last address for convenience. |
| 441 | memory_reservation.Commit(); | 635 | const VAddr last_address = address + size - 1; |
| 442 | 636 | ||
| 443 | // Map the memory. | 637 | // Define iteration variables. |
| 444 | auto node{page_linked_list.Nodes().begin()}; | 638 | VAddr cur_address = 0; |
| 445 | PAddr map_addr{node->GetAddress()}; | 639 | std::size_t mapped_size = 0; |
| 446 | std::size_t src_num_pages{node->GetNumPages()}; | 640 | std::size_t num_allocator_blocks = 0; |
| 447 | block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { | ||
| 448 | if (info.state != KMemoryState::Free) { | ||
| 449 | return; | ||
| 450 | } | ||
| 451 | 641 | ||
| 452 | std::size_t dst_num_pages{GetSizeInRange(info, addr, end_addr) / PageSize}; | 642 | // Check if the memory is mapped. |
| 453 | VAddr dst_addr{GetAddressInRange(info, addr)}; | 643 | { |
| 644 | // Iterate over the memory. | ||
| 645 | cur_address = address; | ||
| 646 | mapped_size = 0; | ||
| 647 | |||
| 648 | auto it = block_manager->FindIterator(cur_address); | ||
| 649 | while (true) { | ||
| 650 | // Check that the iterator is valid. | ||
| 651 | ASSERT(it != block_manager->end()); | ||
| 652 | |||
| 653 | // Get the memory info. | ||
| 654 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 655 | |||
| 656 | // Verify the memory's state. | ||
| 657 | const bool is_normal = info.GetState() == KMemoryState::Normal && | ||
| 658 | info.GetAttribute() == KMemoryAttribute::None; | ||
| 659 | const bool is_free = info.GetState() == KMemoryState::Free; | ||
| 660 | R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory); | ||
| 661 | |||
| 662 | if (is_normal) { | ||
| 663 | R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory); | ||
| 664 | |||
| 665 | if (info.GetAddress() < address) { | ||
| 666 | ++num_allocator_blocks; | ||
| 667 | } | ||
| 668 | if (last_address < info.GetLastAddress()) { | ||
| 669 | ++num_allocator_blocks; | ||
| 670 | } | ||
| 671 | } | ||
| 454 | 672 | ||
| 455 | while (dst_num_pages) { | 673 | // Check if we're done. |
| 456 | if (!src_num_pages) { | 674 | if (last_address <= info.GetLastAddress()) { |
| 457 | node = std::next(node); | 675 | if (is_normal) { |
| 458 | map_addr = node->GetAddress(); | 676 | mapped_size += (last_address + 1 - cur_address); |
| 459 | src_num_pages = node->GetNumPages(); | 677 | } |
| 678 | break; | ||
| 460 | } | 679 | } |
| 461 | 680 | ||
| 462 | const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)}; | 681 | // Track the memory if it's mapped. |
| 463 | Operate(dst_addr, num_pages, KMemoryPermission::UserReadWrite, OperationType::Map, | 682 | if (is_normal) { |
| 464 | map_addr); | 683 | mapped_size += VAddr(info.GetEndAddress()) - cur_address; |
| 684 | } | ||
| 465 | 685 | ||
| 466 | dst_addr += num_pages * PageSize; | 686 | // Advance. |
| 467 | map_addr += num_pages * PageSize; | 687 | cur_address = info.GetEndAddress(); |
| 468 | src_num_pages -= num_pages; | 688 | ++it; |
| 469 | dst_num_pages -= num_pages; | ||
| 470 | } | 689 | } |
| 471 | }); | ||
| 472 | |||
| 473 | mapped_physical_memory_size += remaining_size; | ||
| 474 | |||
| 475 | const std::size_t num_pages{size / PageSize}; | ||
| 476 | block_manager->Update(addr, num_pages, KMemoryState::Free, KMemoryPermission::None, | ||
| 477 | KMemoryAttribute::None, KMemoryState::Normal, | ||
| 478 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None); | ||
| 479 | 690 | ||
| 480 | return ResultSuccess; | 691 | // If there's nothing mapped, we've nothing to do. |
| 481 | } | 692 | R_SUCCEED_IF(mapped_size == 0); |
| 693 | } | ||
| 482 | 694 | ||
| 483 | ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) { | 695 | // Make a page group for the unmap region. |
| 484 | // Lock the physical memory lock. | 696 | KPageLinkedList pg; |
| 485 | KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); | 697 | { |
| 698 | auto& impl = this->PageTableImpl(); | ||
| 699 | |||
| 700 | // Begin traversal. | ||
| 701 | Common::PageTable::TraversalContext context; | ||
| 702 | Common::PageTable::TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0}; | ||
| 703 | bool cur_valid = false; | ||
| 704 | Common::PageTable::TraversalEntry next_entry; | ||
| 705 | bool next_valid = false; | ||
| 706 | size_t tot_size = 0; | ||
| 707 | |||
| 708 | cur_address = address; | ||
| 709 | next_valid = impl.BeginTraversal(next_entry, context, cur_address); | ||
| 710 | next_entry.block_size = | ||
| 711 | (next_entry.block_size - (next_entry.phys_addr & (next_entry.block_size - 1))); | ||
| 712 | |||
| 713 | // Iterate, building the group. | ||
| 714 | while (true) { | ||
| 715 | if ((!next_valid && !cur_valid) || | ||
| 716 | (next_valid && cur_valid && | ||
| 717 | next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) { | ||
| 718 | cur_entry.block_size += next_entry.block_size; | ||
| 719 | } else { | ||
| 720 | if (cur_valid) { | ||
| 721 | // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr)); | ||
| 722 | R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize)); | ||
| 723 | } | ||
| 724 | |||
| 725 | // Update tracking variables. | ||
| 726 | tot_size += cur_entry.block_size; | ||
| 727 | cur_entry = next_entry; | ||
| 728 | cur_valid = next_valid; | ||
| 729 | } | ||
| 486 | 730 | ||
| 487 | // Lock the table. | 731 | if (cur_entry.block_size + tot_size >= size) { |
| 488 | KScopedLightLock lk(general_lock); | 732 | break; |
| 733 | } | ||
| 489 | 734 | ||
| 490 | const VAddr end_addr{addr + size}; | 735 | next_valid = impl.ContinueTraversal(next_entry, context); |
| 491 | ResultCode result{ResultSuccess}; | 736 | } |
| 492 | std::size_t mapped_size{}; | ||
| 493 | 737 | ||
| 494 | // Verify that the region can be unmapped | 738 | // Add the last block. |
| 495 | block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { | 739 | if (cur_valid) { |
| 496 | if (info.state == KMemoryState::Normal) { | 740 | // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr)); |
| 497 | if (info.attribute != KMemoryAttribute::None) { | 741 | R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize)); |
| 498 | result = ResultInvalidCurrentMemory; | 742 | } |
| 499 | return; | 743 | } |
| 744 | ASSERT(pg.GetNumPages() == mapped_size / PageSize); | ||
| 745 | |||
| 746 | // Reset the current tracking address, and make sure we clean up on failure. | ||
| 747 | cur_address = address; | ||
| 748 | auto remap_guard = detail::ScopeExit([&] { | ||
| 749 | if (cur_address > address) { | ||
| 750 | const VAddr last_map_address = cur_address - 1; | ||
| 751 | cur_address = address; | ||
| 752 | |||
| 753 | // Iterate over the memory we unmapped. | ||
| 754 | auto it = block_manager->FindIterator(cur_address); | ||
| 755 | auto pg_it = pg.Nodes().begin(); | ||
| 756 | PAddr pg_phys_addr = pg_it->GetAddress(); | ||
| 757 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 758 | |||
| 759 | while (true) { | ||
| 760 | // Get the memory info for the pages we unmapped, convert to property. | ||
| 761 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 762 | |||
| 763 | // If the memory is normal, we unmapped it and need to re-map it. | ||
| 764 | if (info.GetState() == KMemoryState::Normal) { | ||
| 765 | // Determine the range to map. | ||
| 766 | size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address, | ||
| 767 | last_map_address + 1 - cur_address) / | ||
| 768 | PageSize; | ||
| 769 | |||
| 770 | // While we have pages to map, map them. | ||
| 771 | while (map_pages > 0) { | ||
| 772 | // Check if we're at the end of the physical block. | ||
| 773 | if (pg_pages == 0) { | ||
| 774 | // Ensure there are more pages to map. | ||
| 775 | ASSERT(pg_it != pg.Nodes().end()); | ||
| 776 | |||
| 777 | // Advance our physical block. | ||
| 778 | ++pg_it; | ||
| 779 | pg_phys_addr = pg_it->GetAddress(); | ||
| 780 | pg_pages = pg_it->GetNumPages(); | ||
| 781 | } | ||
| 782 | |||
| 783 | // Map whatever we can. | ||
| 784 | const size_t cur_pages = std::min(pg_pages, map_pages); | ||
| 785 | ASSERT(this->Operate(cur_address, cur_pages, info.GetPermission(), | ||
| 786 | OperationType::Map, pg_phys_addr) == ResultSuccess); | ||
| 787 | |||
| 788 | // Advance. | ||
| 789 | cur_address += cur_pages * PageSize; | ||
| 790 | map_pages -= cur_pages; | ||
| 791 | |||
| 792 | pg_phys_addr += cur_pages * PageSize; | ||
| 793 | pg_pages -= cur_pages; | ||
| 794 | } | ||
| 795 | } | ||
| 796 | |||
| 797 | // Check if we're done. | ||
| 798 | if (last_map_address <= info.GetLastAddress()) { | ||
| 799 | break; | ||
| 800 | } | ||
| 801 | |||
| 802 | // Advance. | ||
| 803 | ++it; | ||
| 500 | } | 804 | } |
| 501 | mapped_size += GetSizeInRange(info, addr, end_addr); | ||
| 502 | } else if (info.state != KMemoryState::Free) { | ||
| 503 | result = ResultInvalidCurrentMemory; | ||
| 504 | } | 805 | } |
| 505 | }); | 806 | }); |
| 506 | 807 | ||
| 507 | if (result.IsError()) { | 808 | // Iterate over the memory, unmapping as we go. |
| 508 | return result; | 809 | auto it = block_manager->FindIterator(cur_address); |
| 509 | } | 810 | while (true) { |
| 811 | // Check that the iterator is valid. | ||
| 812 | ASSERT(it != block_manager->end()); | ||
| 510 | 813 | ||
| 511 | if (!mapped_size) { | 814 | // Get the memory info. |
| 512 | return ResultSuccess; | 815 | const KMemoryInfo info = it->GetMemoryInfo(); |
| 513 | } | ||
| 514 | 816 | ||
| 515 | // Unmap each region within the range | 817 | // If the memory state is normal, we need to unmap it. |
| 516 | KPageLinkedList page_linked_list; | 818 | if (info.GetState() == KMemoryState::Normal) { |
| 517 | block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { | 819 | // Determine the range to unmap. |
| 518 | if (info.state == KMemoryState::Normal) { | 820 | const size_t cur_pages = std::min(VAddr(info.GetEndAddress()) - cur_address, |
| 519 | const std::size_t block_size{GetSizeInRange(info, addr, end_addr)}; | 821 | last_address + 1 - cur_address) / |
| 520 | const std::size_t block_num_pages{block_size / PageSize}; | 822 | PageSize; |
| 521 | const VAddr block_addr{GetAddressInRange(info, addr)}; | 823 | |
| 522 | 824 | // Unmap. | |
| 523 | AddRegionToPages(block_addr, block_size / PageSize, page_linked_list); | 825 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)); |
| 524 | |||
| 525 | if (result = Operate(block_addr, block_num_pages, KMemoryPermission::None, | ||
| 526 | OperationType::Unmap); | ||
| 527 | result.IsError()) { | ||
| 528 | return; | ||
| 529 | } | ||
| 530 | } | 826 | } |
| 531 | }); | ||
| 532 | if (result.IsError()) { | ||
| 533 | return result; | ||
| 534 | } | ||
| 535 | 827 | ||
| 536 | const std::size_t num_pages{size / PageSize}; | 828 | // Check if we're done. |
| 537 | system.Kernel().MemoryManager().Free(page_linked_list, num_pages, memory_pool, | 829 | if (last_address <= info.GetLastAddress()) { |
| 538 | allocation_option); | 830 | break; |
| 831 | } | ||
| 539 | 832 | ||
| 540 | block_manager->Update(addr, num_pages, KMemoryState::Free); | 833 | // Advance. |
| 834 | cur_address = info.GetEndAddress(); | ||
| 835 | ++it; | ||
| 836 | } | ||
| 541 | 837 | ||
| 838 | // Release the memory resource. | ||
| 839 | mapped_physical_memory_size -= mapped_size; | ||
| 542 | auto process{system.Kernel().CurrentProcess()}; | 840 | auto process{system.Kernel().CurrentProcess()}; |
| 543 | process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); | 841 | process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); |
| 544 | mapped_physical_memory_size -= mapped_size; | 842 | |
| 843 | // Update memory blocks. | ||
| 844 | system.Kernel().MemoryManager().Free(pg, size / PageSize, memory_pool, allocation_option); | ||
| 845 | block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, | ||
| 846 | KMemoryAttribute::None); | ||
| 847 | |||
| 848 | // We succeeded. | ||
| 849 | remap_guard.Cancel(); | ||
| 545 | 850 | ||
| 546 | return ResultSuccess; | 851 | return ResultSuccess; |
| 547 | } | 852 | } |
| @@ -681,9 +986,8 @@ ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked | |||
| 681 | VAddr cur_addr{addr}; | 986 | VAddr cur_addr{addr}; |
| 682 | 987 | ||
| 683 | for (const auto& node : page_linked_list.Nodes()) { | 988 | for (const auto& node : page_linked_list.Nodes()) { |
| 684 | const std::size_t num_pages{(addr - cur_addr) / PageSize}; | 989 | if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, |
| 685 | if (const auto result{ | 990 | OperationType::Unmap)}; |
| 686 | Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)}; | ||
| 687 | result.IsError()) { | 991 | result.IsError()) { |
| 688 | return result; | 992 | return result; |
| 689 | } | 993 | } |