diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/common/page_table.cpp | 58 | ||||
| -rw-r--r-- | src/common/page_table.h | 24 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_table.cpp | 539 |
3 files changed, 508 insertions, 113 deletions
diff --git a/src/common/page_table.cpp b/src/common/page_table.cpp index 9fffd816f..75897eeae 100644 --- a/src/common/page_table.cpp +++ b/src/common/page_table.cpp | |||
| @@ -10,11 +10,65 @@ PageTable::PageTable() = default; | |||
| 10 | 10 | ||
| 11 | PageTable::~PageTable() noexcept = default; | 11 | PageTable::~PageTable() noexcept = default; |
| 12 | 12 | ||
| 13 | void PageTable::Resize(size_t address_space_width_in_bits, size_t page_size_in_bits) { | 13 | bool PageTable::BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context, |
| 14 | const size_t num_page_table_entries{1ULL << (address_space_width_in_bits - page_size_in_bits)}; | 14 | u64 address) const { |
| 15 | // Setup invalid defaults. | ||
| 16 | out_entry->phys_addr = 0; | ||
| 17 | out_entry->block_size = page_size; | ||
| 18 | out_context->next_page = 0; | ||
| 19 | |||
| 20 | // Validate that we can read the actual entry. | ||
| 21 | const auto page = address / page_size; | ||
| 22 | if (page >= backing_addr.size()) { | ||
| 23 | return false; | ||
| 24 | } | ||
| 25 | |||
| 26 | // Validate that the entry is mapped. | ||
| 27 | const auto phys_addr = backing_addr[page]; | ||
| 28 | if (phys_addr == 0) { | ||
| 29 | return false; | ||
| 30 | } | ||
| 31 | |||
| 32 | // Populate the results. | ||
| 33 | out_entry->phys_addr = phys_addr + address; | ||
| 34 | out_context->next_page = page + 1; | ||
| 35 | out_context->next_offset = address + page_size; | ||
| 36 | |||
| 37 | return true; | ||
| 38 | } | ||
| 39 | |||
| 40 | bool PageTable::ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const { | ||
| 41 | // Setup invalid defaults. | ||
| 42 | out_entry->phys_addr = 0; | ||
| 43 | out_entry->block_size = page_size; | ||
| 44 | |||
| 45 | // Validate that we can read the actual entry. | ||
| 46 | const auto page = context->next_page; | ||
| 47 | if (page >= backing_addr.size()) { | ||
| 48 | return false; | ||
| 49 | } | ||
| 50 | |||
| 51 | // Validate that the entry is mapped. | ||
| 52 | const auto phys_addr = backing_addr[page]; | ||
| 53 | if (phys_addr == 0) { | ||
| 54 | return false; | ||
| 55 | } | ||
| 56 | |||
| 57 | // Populate the results. | ||
| 58 | out_entry->phys_addr = phys_addr + context->next_offset; | ||
| 59 | context->next_page = page + 1; | ||
| 60 | context->next_offset += page_size; | ||
| 61 | |||
| 62 | return true; | ||
| 63 | } | ||
| 64 | |||
| 65 | void PageTable::Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits) { | ||
| 66 | const std::size_t num_page_table_entries{1ULL | ||
| 67 | << (address_space_width_in_bits - page_size_in_bits)}; | ||
| 15 | pointers.resize(num_page_table_entries); | 68 | pointers.resize(num_page_table_entries); |
| 16 | backing_addr.resize(num_page_table_entries); | 69 | backing_addr.resize(num_page_table_entries); |
| 17 | current_address_space_width_in_bits = address_space_width_in_bits; | 70 | current_address_space_width_in_bits = address_space_width_in_bits; |
| 71 | page_size = 1ULL << page_size_in_bits; | ||
| 18 | } | 72 | } |
| 19 | 73 | ||
| 20 | } // namespace Common | 74 | } // namespace Common |
diff --git a/src/common/page_table.h b/src/common/page_table.h index 8267e8b4d..fe254d7ae 100644 --- a/src/common/page_table.h +++ b/src/common/page_table.h | |||
| @@ -27,6 +27,16 @@ enum class PageType : u8 { | |||
| 27 | * mimics the way a real CPU page table works. | 27 | * mimics the way a real CPU page table works. |
| 28 | */ | 28 | */ |
| 29 | struct PageTable { | 29 | struct PageTable { |
| 30 | struct TraversalEntry { | ||
| 31 | u64 phys_addr{}; | ||
| 32 | std::size_t block_size{}; | ||
| 33 | }; | ||
| 34 | |||
| 35 | struct TraversalContext { | ||
| 36 | u64 next_page{}; | ||
| 37 | u64 next_offset{}; | ||
| 38 | }; | ||
| 39 | |||
| 30 | /// Number of bits reserved for attribute tagging. | 40 | /// Number of bits reserved for attribute tagging. |
| 31 | /// This can be at most the guaranteed alignment of the pointers in the page table. | 41 | /// This can be at most the guaranteed alignment of the pointers in the page table. |
| 32 | static constexpr int ATTRIBUTE_BITS = 2; | 42 | static constexpr int ATTRIBUTE_BITS = 2; |
| @@ -89,6 +99,10 @@ struct PageTable { | |||
| 89 | PageTable(PageTable&&) noexcept = default; | 99 | PageTable(PageTable&&) noexcept = default; |
| 90 | PageTable& operator=(PageTable&&) noexcept = default; | 100 | PageTable& operator=(PageTable&&) noexcept = default; |
| 91 | 101 | ||
| 102 | bool BeginTraversal(TraversalEntry* out_entry, TraversalContext* out_context, | ||
| 103 | u64 address) const; | ||
| 104 | bool ContinueTraversal(TraversalEntry* out_entry, TraversalContext* context) const; | ||
| 105 | |||
| 92 | /** | 106 | /** |
| 93 | * Resizes the page table to be able to accommodate enough pages within | 107 | * Resizes the page table to be able to accommodate enough pages within |
| 94 | * a given address space. | 108 | * a given address space. |
| @@ -96,9 +110,9 @@ struct PageTable { | |||
| 96 | * @param address_space_width_in_bits The address size width in bits. | 110 | * @param address_space_width_in_bits The address size width in bits. |
| 97 | * @param page_size_in_bits The page size in bits. | 111 | * @param page_size_in_bits The page size in bits. |
| 98 | */ | 112 | */ |
| 99 | void Resize(size_t address_space_width_in_bits, size_t page_size_in_bits); | 113 | void Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits); |
| 100 | 114 | ||
| 101 | size_t GetAddressSpaceBits() const { | 115 | std::size_t GetAddressSpaceBits() const { |
| 102 | return current_address_space_width_in_bits; | 116 | return current_address_space_width_in_bits; |
| 103 | } | 117 | } |
| 104 | 118 | ||
| @@ -110,9 +124,11 @@ struct PageTable { | |||
| 110 | 124 | ||
| 111 | VirtualBuffer<u64> backing_addr; | 125 | VirtualBuffer<u64> backing_addr; |
| 112 | 126 | ||
| 113 | size_t current_address_space_width_in_bits; | 127 | std::size_t current_address_space_width_in_bits{}; |
| 128 | |||
| 129 | u8* fastmem_arena{}; | ||
| 114 | 130 | ||
| 115 | u8* fastmem_arena; | 131 | std::size_t page_size{}; |
| 116 | }; | 132 | }; |
| 117 | 133 | ||
| 118 | } // namespace Common | 134 | } // namespace Common |
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 912853e5c..28cea9ab3 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp | |||
| @@ -400,148 +400,473 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, | |||
| 400 | return ResultSuccess; | 400 | return ResultSuccess; |
| 401 | } | 401 | } |
| 402 | 402 | ||
| 403 | ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) { | 403 | ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { |
| 404 | // Lock the physical memory lock. | 404 | // Lock the physical memory lock. |
| 405 | KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); | 405 | KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); |
| 406 | 406 | ||
| 407 | // Lock the table. | 407 | // Calculate the last address for convenience. |
| 408 | KScopedLightLock lk(general_lock); | 408 | const VAddr last_address = address + size - 1; |
| 409 | 409 | ||
| 410 | std::size_t mapped_size{}; | 410 | // Define iteration variables. |
| 411 | const VAddr end_addr{addr + size}; | 411 | VAddr cur_address; |
| 412 | 412 | std::size_t mapped_size; | |
| 413 | block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { | ||
| 414 | if (info.state != KMemoryState::Free) { | ||
| 415 | mapped_size += GetSizeInRange(info, addr, end_addr); | ||
| 416 | } | ||
| 417 | }); | ||
| 418 | 413 | ||
| 419 | if (mapped_size == size) { | 414 | // The entire mapping process can be retried. |
| 420 | return ResultSuccess; | 415 | while (true) { |
| 421 | } | 416 | // Check if the memory is already mapped. |
| 417 | { | ||
| 418 | // Lock the table. | ||
| 419 | KScopedLightLock lk(general_lock); | ||
| 420 | |||
| 421 | // Iterate over the memory. | ||
| 422 | cur_address = address; | ||
| 423 | mapped_size = 0; | ||
| 424 | |||
| 425 | auto it = block_manager->FindIterator(cur_address); | ||
| 426 | while (true) { | ||
| 427 | // Check that the iterator is valid. | ||
| 428 | ASSERT(it != block_manager->end()); | ||
| 429 | |||
| 430 | // Get the memory info. | ||
| 431 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 432 | |||
| 433 | // Check if we're done. | ||
| 434 | if (last_address <= info.GetLastAddress()) { | ||
| 435 | if (info.GetState() != KMemoryState::Free) { | ||
| 436 | mapped_size += (last_address + 1 - cur_address); | ||
| 437 | } | ||
| 438 | break; | ||
| 439 | } | ||
| 440 | |||
| 441 | // Track the memory if it's mapped. | ||
| 442 | if (info.GetState() != KMemoryState::Free) { | ||
| 443 | mapped_size += VAddr(info.GetEndAddress()) - cur_address; | ||
| 444 | } | ||
| 445 | |||
| 446 | // Advance. | ||
| 447 | cur_address = info.GetEndAddress(); | ||
| 448 | ++it; | ||
| 449 | } | ||
| 422 | 450 | ||
| 423 | const std::size_t remaining_size{size - mapped_size}; | 451 | // If the size mapped is the size requested, we've nothing to do. |
| 424 | const std::size_t remaining_pages{remaining_size / PageSize}; | 452 | R_SUCCEED_IF(size == mapped_size); |
| 453 | } | ||
| 425 | 454 | ||
| 426 | // Reserve the memory from the process resource limit. | 455 | // Allocate and map the memory. |
| 427 | KScopedResourceReservation memory_reservation( | 456 | { |
| 428 | system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, | 457 | // Reserve the memory from the process resource limit. |
| 429 | remaining_size); | 458 | KScopedResourceReservation memory_reservation( |
| 430 | if (!memory_reservation.Succeeded()) { | 459 | system.Kernel().CurrentProcess()->GetResourceLimit(), |
| 431 | LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size); | 460 | LimitableResource::PhysicalMemory, size - mapped_size); |
| 432 | return ResultLimitReached; | 461 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 462 | |||
| 463 | // Allocate pages for the new memory. | ||
| 464 | KPageLinkedList page_linked_list; | ||
| 465 | R_TRY(system.Kernel().MemoryManager().Allocate( | ||
| 466 | page_linked_list, (size - mapped_size) / PageSize, memory_pool, allocation_option)); | ||
| 467 | |||
| 468 | // Map the memory. | ||
| 469 | { | ||
| 470 | // Lock the table. | ||
| 471 | KScopedLightLock lk(general_lock); | ||
| 472 | |||
| 473 | size_t num_allocator_blocks = 0; | ||
| 474 | |||
| 475 | // Verify that nobody has mapped memory since we first checked. | ||
| 476 | { | ||
| 477 | // Iterate over the memory. | ||
| 478 | size_t checked_mapped_size = 0; | ||
| 479 | cur_address = address; | ||
| 480 | |||
| 481 | auto it = block_manager->FindIterator(cur_address); | ||
| 482 | while (true) { | ||
| 483 | // Check that the iterator is valid. | ||
| 484 | ASSERT(it != block_manager->end()); | ||
| 485 | |||
| 486 | // Get the memory info. | ||
| 487 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 488 | |||
| 489 | const bool is_free = info.GetState() == KMemoryState::Free; | ||
| 490 | if (is_free) { | ||
| 491 | if (info.GetAddress() < address) { | ||
| 492 | ++num_allocator_blocks; | ||
| 493 | } | ||
| 494 | if (last_address < info.GetLastAddress()) { | ||
| 495 | ++num_allocator_blocks; | ||
| 496 | } | ||
| 497 | } | ||
| 498 | |||
| 499 | // Check if we're done. | ||
| 500 | if (last_address <= info.GetLastAddress()) { | ||
| 501 | if (!is_free) { | ||
| 502 | checked_mapped_size += (last_address + 1 - cur_address); | ||
| 503 | } | ||
| 504 | break; | ||
| 505 | } | ||
| 506 | |||
| 507 | // Track the memory if it's mapped. | ||
| 508 | if (!is_free) { | ||
| 509 | checked_mapped_size += VAddr(info.GetEndAddress()) - cur_address; | ||
| 510 | } | ||
| 511 | |||
| 512 | // Advance. | ||
| 513 | cur_address = info.GetEndAddress(); | ||
| 514 | ++it; | ||
| 515 | } | ||
| 516 | |||
| 517 | // If the size now isn't what it was before, somebody mapped or unmapped | ||
| 518 | // concurrently. If this happened, retry. | ||
| 519 | if (mapped_size != checked_mapped_size) { | ||
| 520 | continue; | ||
| 521 | } | ||
| 522 | } | ||
| 523 | |||
| 524 | // Reset the current tracking address, and make sure we clean up on failure. | ||
| 525 | cur_address = address; | ||
| 526 | auto unmap_guard = detail::ScopeExit([&] { | ||
| 527 | if (cur_address > address) { | ||
| 528 | const VAddr last_unmap_address = cur_address - 1; | ||
| 529 | |||
| 530 | // Iterate, unmapping the pages. | ||
| 531 | cur_address = address; | ||
| 532 | |||
| 533 | auto it = block_manager->FindIterator(cur_address); | ||
| 534 | while (true) { | ||
| 535 | // Check that the iterator is valid. | ||
| 536 | ASSERT(it != block_manager->end()); | ||
| 537 | |||
| 538 | // Get the memory info. | ||
| 539 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 540 | |||
| 541 | // If the memory state is free, we mapped it and need to unmap it. | ||
| 542 | if (info.GetState() == KMemoryState::Free) { | ||
| 543 | // Determine the range to unmap. | ||
| 544 | const size_t cur_pages = | ||
| 545 | std::min(VAddr(info.GetEndAddress()) - cur_address, | ||
| 546 | last_unmap_address + 1 - cur_address) / | ||
| 547 | PageSize; | ||
| 548 | |||
| 549 | // Unmap. | ||
| 550 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, | ||
| 551 | OperationType::Unmap) | ||
| 552 | .IsSuccess()); | ||
| 553 | } | ||
| 554 | |||
| 555 | // Check if we're done. | ||
| 556 | if (last_unmap_address <= info.GetLastAddress()) { | ||
| 557 | break; | ||
| 558 | } | ||
| 559 | |||
| 560 | // Advance. | ||
| 561 | cur_address = info.GetEndAddress(); | ||
| 562 | ++it; | ||
| 563 | } | ||
| 564 | } | ||
| 565 | }); | ||
| 566 | |||
| 567 | // Iterate over the memory. | ||
| 568 | auto pg_it = page_linked_list.Nodes().begin(); | ||
| 569 | PAddr pg_phys_addr = pg_it->GetAddress(); | ||
| 570 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 571 | |||
| 572 | auto it = block_manager->FindIterator(cur_address); | ||
| 573 | while (true) { | ||
| 574 | // Check that the iterator is valid. | ||
| 575 | ASSERT(it != block_manager->end()); | ||
| 576 | |||
| 577 | // Get the memory info. | ||
| 578 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 579 | |||
| 580 | // If it's unmapped, we need to map it. | ||
| 581 | if (info.GetState() == KMemoryState::Free) { | ||
| 582 | // Determine the range to map. | ||
| 583 | size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address, | ||
| 584 | last_address + 1 - cur_address) / | ||
| 585 | PageSize; | ||
| 586 | |||
| 587 | // While we have pages to map, map them. | ||
| 588 | while (map_pages > 0) { | ||
| 589 | // Check if we're at the end of the physical block. | ||
| 590 | if (pg_pages == 0) { | ||
| 591 | // Ensure there are more pages to map. | ||
| 592 | ASSERT(pg_it != page_linked_list.Nodes().end()); | ||
| 593 | |||
| 594 | // Advance our physical block. | ||
| 595 | ++pg_it; | ||
| 596 | pg_phys_addr = pg_it->GetAddress(); | ||
| 597 | pg_pages = pg_it->GetNumPages(); | ||
| 598 | } | ||
| 599 | |||
| 600 | // Map whatever we can. | ||
| 601 | const size_t cur_pages = std::min(pg_pages, map_pages); | ||
| 602 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, | ||
| 603 | OperationType::Map, pg_phys_addr)); | ||
| 604 | |||
| 605 | // Advance. | ||
| 606 | cur_address += cur_pages * PageSize; | ||
| 607 | map_pages -= cur_pages; | ||
| 608 | |||
| 609 | pg_phys_addr += cur_pages * PageSize; | ||
| 610 | pg_pages -= cur_pages; | ||
| 611 | } | ||
| 612 | } | ||
| 613 | |||
| 614 | // Check if we're done. | ||
| 615 | if (last_address <= info.GetLastAddress()) { | ||
| 616 | break; | ||
| 617 | } | ||
| 618 | |||
| 619 | // Advance. | ||
| 620 | cur_address = info.GetEndAddress(); | ||
| 621 | ++it; | ||
| 622 | } | ||
| 623 | |||
| 624 | // We succeeded, so commit the memory reservation. | ||
| 625 | memory_reservation.Commit(); | ||
| 626 | |||
| 627 | // Increase our tracked mapped size. | ||
| 628 | mapped_physical_memory_size += (size - mapped_size); | ||
| 629 | |||
| 630 | // Update the relevant memory blocks. | ||
| 631 | block_manager->Update(address, size / PageSize, KMemoryState::Free, | ||
| 632 | KMemoryPermission::None, KMemoryAttribute::None, | ||
| 633 | KMemoryState::Normal, KMemoryPermission::UserReadWrite, | ||
| 634 | KMemoryAttribute::None); | ||
| 635 | |||
| 636 | // Cancel our guard. | ||
| 637 | unmap_guard.Cancel(); | ||
| 638 | |||
| 639 | return ResultSuccess; | ||
| 640 | } | ||
| 641 | } | ||
| 433 | } | 642 | } |
| 643 | } | ||
| 434 | 644 | ||
| 435 | KPageLinkedList page_linked_list; | 645 | ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { |
| 646 | // Lock the physical memory lock. | ||
| 647 | KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); | ||
| 436 | 648 | ||
| 437 | CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages, | 649 | // Lock the table. |
| 438 | memory_pool, allocation_option)); | 650 | KScopedLightLock lk(general_lock); |
| 439 | 651 | ||
| 440 | // We succeeded, so commit the memory reservation. | 652 | // Calculate the last address for convenience. |
| 441 | memory_reservation.Commit(); | 653 | const VAddr last_address = address + size - 1; |
| 442 | 654 | ||
| 443 | // Map the memory. | 655 | // Define iteration variables. |
| 444 | auto node{page_linked_list.Nodes().begin()}; | 656 | VAddr cur_address = 0; |
| 445 | PAddr map_addr{node->GetAddress()}; | 657 | std::size_t mapped_size = 0; |
| 446 | std::size_t src_num_pages{node->GetNumPages()}; | 658 | std::size_t num_allocator_blocks = 0; |
| 447 | block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { | ||
| 448 | if (info.state != KMemoryState::Free) { | ||
| 449 | return; | ||
| 450 | } | ||
| 451 | 659 | ||
| 452 | std::size_t dst_num_pages{GetSizeInRange(info, addr, end_addr) / PageSize}; | 660 | // Check if the memory is mapped. |
| 453 | VAddr dst_addr{GetAddressInRange(info, addr)}; | 661 | { |
| 662 | // Iterate over the memory. | ||
| 663 | cur_address = address; | ||
| 664 | mapped_size = 0; | ||
| 665 | |||
| 666 | auto it = block_manager->FindIterator(cur_address); | ||
| 667 | while (true) { | ||
| 668 | // Check that the iterator is valid. | ||
| 669 | ASSERT(it != block_manager->end()); | ||
| 670 | |||
| 671 | // Get the memory info. | ||
| 672 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 673 | |||
| 674 | // Verify the memory's state. | ||
| 675 | const bool is_normal = info.GetState() == KMemoryState::Normal && | ||
| 676 | info.GetAttribute() == KMemoryAttribute::None; | ||
| 677 | const bool is_free = info.GetState() == KMemoryState::Free; | ||
| 678 | R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory); | ||
| 679 | |||
| 680 | if (is_normal) { | ||
| 681 | R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory); | ||
| 682 | |||
| 683 | if (info.GetAddress() < address) { | ||
| 684 | ++num_allocator_blocks; | ||
| 685 | } | ||
| 686 | if (last_address < info.GetLastAddress()) { | ||
| 687 | ++num_allocator_blocks; | ||
| 688 | } | ||
| 689 | } | ||
| 454 | 690 | ||
| 455 | while (dst_num_pages) { | 691 | // Check if we're done. |
| 456 | if (!src_num_pages) { | 692 | if (last_address <= info.GetLastAddress()) { |
| 457 | node = std::next(node); | 693 | if (is_normal) { |
| 458 | map_addr = node->GetAddress(); | 694 | mapped_size += (last_address + 1 - cur_address); |
| 459 | src_num_pages = node->GetNumPages(); | 695 | } |
| 696 | break; | ||
| 460 | } | 697 | } |
| 461 | 698 | ||
| 462 | const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)}; | 699 | // Track the memory if it's mapped. |
| 463 | Operate(dst_addr, num_pages, KMemoryPermission::UserReadWrite, OperationType::Map, | 700 | if (is_normal) { |
| 464 | map_addr); | 701 | mapped_size += VAddr(info.GetEndAddress()) - cur_address; |
| 702 | } | ||
| 465 | 703 | ||
| 466 | dst_addr += num_pages * PageSize; | 704 | // Advance. |
| 467 | map_addr += num_pages * PageSize; | 705 | cur_address = info.GetEndAddress(); |
| 468 | src_num_pages -= num_pages; | 706 | ++it; |
| 469 | dst_num_pages -= num_pages; | ||
| 470 | } | 707 | } |
| 471 | }); | ||
| 472 | |||
| 473 | mapped_physical_memory_size += remaining_size; | ||
| 474 | |||
| 475 | const std::size_t num_pages{size / PageSize}; | ||
| 476 | block_manager->Update(addr, num_pages, KMemoryState::Free, KMemoryPermission::None, | ||
| 477 | KMemoryAttribute::None, KMemoryState::Normal, | ||
| 478 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None); | ||
| 479 | 708 | ||
| 480 | return ResultSuccess; | 709 | // If there's nothing mapped, we've nothing to do. |
| 481 | } | 710 | R_SUCCEED_IF(mapped_size == 0); |
| 711 | } | ||
| 482 | 712 | ||
| 483 | ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) { | 713 | // Make a page group for the unmap region. |
| 484 | // Lock the physical memory lock. | 714 | KPageLinkedList pg; |
| 485 | KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); | 715 | { |
| 716 | auto& impl = this->PageTableImpl(); | ||
| 717 | |||
| 718 | // Begin traversal. | ||
| 719 | Common::PageTable::TraversalContext context; | ||
| 720 | Common::PageTable::TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0}; | ||
| 721 | bool cur_valid = false; | ||
| 722 | Common::PageTable::TraversalEntry next_entry; | ||
| 723 | bool next_valid = false; | ||
| 724 | size_t tot_size = 0; | ||
| 725 | |||
| 726 | cur_address = address; | ||
| 727 | next_valid = | ||
| 728 | impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), cur_address); | ||
| 729 | next_entry.block_size = | ||
| 730 | (next_entry.block_size - (next_entry.phys_addr & (next_entry.block_size - 1))); | ||
| 731 | |||
| 732 | // Iterate, building the group. | ||
| 733 | while (true) { | ||
| 734 | if ((!next_valid && !cur_valid) || | ||
| 735 | (next_valid && cur_valid && | ||
| 736 | next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) { | ||
| 737 | cur_entry.block_size += next_entry.block_size; | ||
| 738 | } else { | ||
| 739 | if (cur_valid) { | ||
| 740 | // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr)); | ||
| 741 | R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize)); | ||
| 742 | } | ||
| 743 | |||
| 744 | // Update tracking variables. | ||
| 745 | tot_size += cur_entry.block_size; | ||
| 746 | cur_entry = next_entry; | ||
| 747 | cur_valid = next_valid; | ||
| 748 | } | ||
| 486 | 749 | ||
| 487 | // Lock the table. | 750 | if (cur_entry.block_size + tot_size >= size) { |
| 488 | KScopedLightLock lk(general_lock); | 751 | break; |
| 752 | } | ||
| 489 | 753 | ||
| 490 | const VAddr end_addr{addr + size}; | 754 | next_valid = |
| 491 | ResultCode result{ResultSuccess}; | 755 | impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); |
| 492 | std::size_t mapped_size{}; | 756 | } |
| 493 | 757 | ||
| 494 | // Verify that the region can be unmapped | 758 | // Add the last block. |
| 495 | block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { | 759 | if (cur_valid) { |
| 496 | if (info.state == KMemoryState::Normal) { | 760 | // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr)); |
| 497 | if (info.attribute != KMemoryAttribute::None) { | 761 | R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize)); |
| 498 | result = ResultInvalidCurrentMemory; | 762 | } |
| 499 | return; | 763 | } |
| 764 | ASSERT(pg.GetNumPages() == mapped_size / PageSize); | ||
| 765 | |||
| 766 | // Reset the current tracking address, and make sure we clean up on failure. | ||
| 767 | cur_address = address; | ||
| 768 | auto remap_guard = detail::ScopeExit([&] { | ||
| 769 | if (cur_address > address) { | ||
| 770 | const VAddr last_map_address = cur_address - 1; | ||
| 771 | cur_address = address; | ||
| 772 | |||
| 773 | // Iterate over the memory we unmapped. | ||
| 774 | auto it = block_manager->FindIterator(cur_address); | ||
| 775 | auto pg_it = pg.Nodes().begin(); | ||
| 776 | PAddr pg_phys_addr = pg_it->GetAddress(); | ||
| 777 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 778 | |||
| 779 | while (true) { | ||
| 780 | // Get the memory info for the pages we unmapped, convert to property. | ||
| 781 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 782 | |||
| 783 | // If the memory is normal, we unmapped it and need to re-map it. | ||
| 784 | if (info.GetState() == KMemoryState::Normal) { | ||
| 785 | // Determine the range to map. | ||
| 786 | size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address, | ||
| 787 | last_map_address + 1 - cur_address) / | ||
| 788 | PageSize; | ||
| 789 | |||
| 790 | // While we have pages to map, map them. | ||
| 791 | while (map_pages > 0) { | ||
| 792 | // Check if we're at the end of the physical block. | ||
| 793 | if (pg_pages == 0) { | ||
| 794 | // Ensure there are more pages to map. | ||
| 795 | ASSERT(pg_it != pg.Nodes().end()); | ||
| 796 | |||
| 797 | // Advance our physical block. | ||
| 798 | ++pg_it; | ||
| 799 | pg_phys_addr = pg_it->GetAddress(); | ||
| 800 | pg_pages = pg_it->GetNumPages(); | ||
| 801 | } | ||
| 802 | |||
| 803 | // Map whatever we can. | ||
| 804 | const size_t cur_pages = std::min(pg_pages, map_pages); | ||
| 805 | ASSERT(this->Operate(cur_address, cur_pages, info.GetPermission(), | ||
| 806 | OperationType::Map, pg_phys_addr) == ResultSuccess); | ||
| 807 | |||
| 808 | // Advance. | ||
| 809 | cur_address += cur_pages * PageSize; | ||
| 810 | map_pages -= cur_pages; | ||
| 811 | |||
| 812 | pg_phys_addr += cur_pages * PageSize; | ||
| 813 | pg_pages -= cur_pages; | ||
| 814 | } | ||
| 815 | } | ||
| 816 | |||
| 817 | // Check if we're done. | ||
| 818 | if (last_map_address <= info.GetLastAddress()) { | ||
| 819 | break; | ||
| 820 | } | ||
| 821 | |||
| 822 | // Advance. | ||
| 823 | ++it; | ||
| 500 | } | 824 | } |
| 501 | mapped_size += GetSizeInRange(info, addr, end_addr); | ||
| 502 | } else if (info.state != KMemoryState::Free) { | ||
| 503 | result = ResultInvalidCurrentMemory; | ||
| 504 | } | 825 | } |
| 505 | }); | 826 | }); |
| 506 | 827 | ||
| 507 | if (result.IsError()) { | 828 | // Iterate over the memory, unmapping as we go. |
| 508 | return result; | 829 | auto it = block_manager->FindIterator(cur_address); |
| 509 | } | 830 | while (true) { |
| 831 | // Check that the iterator is valid. | ||
| 832 | ASSERT(it != block_manager->end()); | ||
| 510 | 833 | ||
| 511 | if (!mapped_size) { | 834 | // Get the memory info. |
| 512 | return ResultSuccess; | 835 | const KMemoryInfo info = it->GetMemoryInfo(); |
| 513 | } | ||
| 514 | 836 | ||
| 515 | // Unmap each region within the range | 837 | // If the memory state is normal, we need to unmap it. |
| 516 | KPageLinkedList page_linked_list; | 838 | if (info.GetState() == KMemoryState::Normal) { |
| 517 | block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) { | 839 | // Determine the range to unmap. |
| 518 | if (info.state == KMemoryState::Normal) { | 840 | const size_t cur_pages = std::min(VAddr(info.GetEndAddress()) - cur_address, |
| 519 | const std::size_t block_size{GetSizeInRange(info, addr, end_addr)}; | 841 | last_address + 1 - cur_address) / |
| 520 | const std::size_t block_num_pages{block_size / PageSize}; | 842 | PageSize; |
| 521 | const VAddr block_addr{GetAddressInRange(info, addr)}; | 843 | |
| 522 | 844 | // Unmap. | |
| 523 | AddRegionToPages(block_addr, block_size / PageSize, page_linked_list); | 845 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)); |
| 524 | |||
| 525 | if (result = Operate(block_addr, block_num_pages, KMemoryPermission::None, | ||
| 526 | OperationType::Unmap); | ||
| 527 | result.IsError()) { | ||
| 528 | return; | ||
| 529 | } | ||
| 530 | } | 846 | } |
| 531 | }); | ||
| 532 | if (result.IsError()) { | ||
| 533 | return result; | ||
| 534 | } | ||
| 535 | 847 | ||
| 536 | const std::size_t num_pages{size / PageSize}; | 848 | // Check if we're done. |
| 537 | system.Kernel().MemoryManager().Free(page_linked_list, num_pages, memory_pool, | 849 | if (last_address <= info.GetLastAddress()) { |
| 538 | allocation_option); | 850 | break; |
| 851 | } | ||
| 539 | 852 | ||
| 540 | block_manager->Update(addr, num_pages, KMemoryState::Free); | 853 | // Advance. |
| 854 | cur_address = info.GetEndAddress(); | ||
| 855 | ++it; | ||
| 856 | } | ||
| 541 | 857 | ||
| 858 | // Release the memory resource. | ||
| 859 | mapped_physical_memory_size -= mapped_size; | ||
| 542 | auto process{system.Kernel().CurrentProcess()}; | 860 | auto process{system.Kernel().CurrentProcess()}; |
| 543 | process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); | 861 | process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); |
| 544 | mapped_physical_memory_size -= mapped_size; | 862 | |
| 863 | // Update memory blocks. | ||
| 864 | system.Kernel().MemoryManager().Free(pg, size / PageSize, memory_pool, allocation_option); | ||
| 865 | block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, | ||
| 866 | KMemoryAttribute::None); | ||
| 867 | |||
| 868 | // We succeeded. | ||
| 869 | remap_guard.Cancel(); | ||
| 545 | 870 | ||
| 546 | return ResultSuccess; | 871 | return ResultSuccess; |
| 547 | } | 872 | } |