summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp3
-rw-r--r--src/core/hle/kernel/k_page_linked_list.h4
-rw-r--r--src/core/hle/kernel/k_page_table.cpp242
-rw-r--r--src/core/hle/kernel/k_page_table.h33
-rw-r--r--src/core/hle/kernel/svc.cpp13
5 files changed, 241 insertions, 54 deletions
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index b365ce7b7..63bbe02e9 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -28,7 +28,8 @@ ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr
28 auto& page_table = m_owner->PageTable(); 28 auto& page_table = m_owner->PageTable();
29 29
30 // Construct the page group. 30 // Construct the page group.
31 m_page_group = KPageLinkedList(addr, Common::DivideUp(size, PageSize)); 31 m_page_group =
32 KPageLinkedList(page_table.GetPhysicalAddr(addr), Common::DivideUp(size, PageSize));
32 33
33 // Lock the memory. 34 // Lock the memory.
34 R_TRY(page_table.LockForCodeMemory(addr, size)) 35 R_TRY(page_table.LockForCodeMemory(addr, size))
diff --git a/src/core/hle/kernel/k_page_linked_list.h b/src/core/hle/kernel/k_page_linked_list.h
index 0e2ae582a..869228322 100644
--- a/src/core/hle/kernel/k_page_linked_list.h
+++ b/src/core/hle/kernel/k_page_linked_list.h
@@ -89,6 +89,10 @@ public:
89 return ResultSuccess; 89 return ResultSuccess;
90 } 90 }
91 91
92 bool Empty() const {
93 return nodes.empty();
94 }
95
92private: 96private:
93 std::list<Node> nodes; 97 std::list<Node> nodes;
94}; 98};
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 02d93b12e..599013cf6 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -486,6 +486,58 @@ VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
486 return address; 486 return address;
487} 487}
488 488
489ResultCode KPageTable::MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages) {
490 ASSERT(this->IsLockedByCurrentThread());
491
492 const size_t size = num_pages * PageSize;
493
494 // We're making a new group, not adding to an existing one.
495 R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory);
496
497 // Begin traversal.
498 Common::PageTable::TraversalContext context;
499 Common::PageTable::TraversalEntry next_entry;
500 R_UNLESS(page_table_impl.BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory);
501
502 // Prepare tracking variables.
503 PAddr cur_addr = next_entry.phys_addr;
504 size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
505 size_t tot_size = cur_size;
506
507 // Iterate, adding to group as we go.
508 const auto& memory_layout = system.Kernel().MemoryLayout();
509 while (tot_size < size) {
510 R_UNLESS(page_table_impl.ContinueTraversal(next_entry, context),
511 ResultInvalidCurrentMemory);
512
513 if (next_entry.phys_addr != (cur_addr + cur_size)) {
514 const size_t cur_pages = cur_size / PageSize;
515
516 R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
517 R_TRY(pg.AddBlock(cur_addr, cur_pages));
518
519 cur_addr = next_entry.phys_addr;
520 cur_size = next_entry.block_size;
521 } else {
522 cur_size += next_entry.block_size;
523 }
524
525 tot_size += next_entry.block_size;
526 }
527
528 // Ensure we add the right amount for the last block.
529 if (tot_size > size) {
530 cur_size -= (tot_size - size);
531 }
532
533 // Add the last block.
534 const size_t cur_pages = cur_size / PageSize;
535 R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
536 R_TRY(pg.AddBlock(cur_addr, cur_pages));
537
538 return ResultSuccess;
539}
540
489ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, 541ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
490 KPageTable& src_page_table, VAddr src_addr) { 542 KPageTable& src_page_table, VAddr src_addr) {
491 KScopedLightLock lk(general_lock); 543 KScopedLightLock lk(general_lock);
@@ -1223,6 +1275,31 @@ ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryS
1223 return ResultSuccess; 1275 return ResultSuccess;
1224} 1276}
1225 1277
1278ResultCode KPageTable::MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
1279 KMemoryState state_mask, KMemoryState state,
1280 KMemoryPermission perm_mask, KMemoryPermission perm,
1281 KMemoryAttribute attr_mask, KMemoryAttribute attr) {
1282 // Ensure that the page group isn't null.
1283 ASSERT(out != nullptr);
1284
1285 // Make sure that the region we're mapping is valid for the table.
1286 const size_t size = num_pages * PageSize;
1287 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1288
1289 // Lock the table.
1290 KScopedLightLock lk(general_lock);
1291
1292 // Check if state allows us to create the group.
1293 R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
1294 state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
1295 attr_mask, attr));
1296
1297 // Create a new page group for the region.
1298 R_TRY(this->MakePageGroup(*out, address, num_pages));
1299
1300 return ResultSuccess;
1301}
1302
1226ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, 1303ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
1227 Svc::MemoryPermission svc_perm) { 1304 Svc::MemoryPermission svc_perm) {
1228 const size_t num_pages = size / PageSize; 1305 const size_t num_pages = size / PageSize;
@@ -1605,57 +1682,21 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
1605} 1682}
1606 1683
1607ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) { 1684ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
1608 KScopedLightLock lk(general_lock); 1685 return this->LockMemoryAndOpen(
1609 1686 nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
1610 KMemoryPermission new_perm = KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite; 1687 KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite,
1611 1688 KMemoryAttribute::All, KMemoryAttribute::None,
1612 KMemoryPermission old_perm{}; 1689 static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
1613 1690 KMemoryPermission::KernelReadWrite),
1614 if (const ResultCode result{CheckMemoryState( 1691 KMemoryAttribute::Locked);
1615 nullptr, &old_perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
1616 KMemoryState::FlagCanCodeMemory, KMemoryPermission::All,
1617 KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)};
1618 result.IsError()) {
1619 return result;
1620 }
1621
1622 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
1623
1624 block_manager->UpdateLock(
1625 addr, size / PageSize,
1626 [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
1627 block->ShareToDevice(permission);
1628 },
1629 new_perm);
1630
1631 return ResultSuccess;
1632} 1692}
1633 1693
1634ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) { 1694ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) {
1635 KScopedLightLock lk(general_lock); 1695 return this->UnlockMemory(addr, size, KMemoryState::FlagCanCodeMemory,
1636 1696 KMemoryState::FlagCanCodeMemory, KMemoryPermission::None,
1637 KMemoryPermission new_perm = KMemoryPermission::UserReadWrite; 1697 KMemoryPermission::None, KMemoryAttribute::All,
1638 1698 KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
1639 KMemoryPermission old_perm{}; 1699 KMemoryAttribute::Locked, nullptr);
1640
1641 if (const ResultCode result{CheckMemoryState(
1642 nullptr, &old_perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
1643 KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, KMemoryPermission::None,
1644 KMemoryAttribute::All, KMemoryAttribute::Locked)};
1645 result.IsError()) {
1646 return result;
1647 }
1648
1649 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
1650
1651 block_manager->UpdateLock(
1652 addr, size / PageSize,
1653 [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
1654 block->UnshareToDevice(permission);
1655 },
1656 new_perm);
1657
1658 return ResultSuccess;
1659} 1700}
1660 1701
1661ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) { 1702ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
@@ -1991,4 +2032,109 @@ ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermissi
1991 return ResultSuccess; 2032 return ResultSuccess;
1992} 2033}
1993 2034
2035ResultCode KPageTable::LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr,
2036 size_t size, KMemoryState state_mask, KMemoryState state,
2037 KMemoryPermission perm_mask, KMemoryPermission perm,
2038 KMemoryAttribute attr_mask, KMemoryAttribute attr,
2039 KMemoryPermission new_perm, KMemoryAttribute lock_attr) {
2040 // Validate basic preconditions.
2041 ASSERT((lock_attr & attr) == KMemoryAttribute::None);
2042 ASSERT((lock_attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
2043 KMemoryAttribute::None);
2044
2045 // Validate the lock request.
2046 const size_t num_pages = size / PageSize;
2047 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
2048
2049 // Lock the table.
2050 KScopedLightLock lk(general_lock);
2051
2052 // Check that the output page group is empty, if it exists.
2053 if (out_pg) {
2054 ASSERT(out_pg->GetNumPages() == 0);
2055 }
2056
2057 // Check the state.
2058 KMemoryState old_state{};
2059 KMemoryPermission old_perm{};
2060 KMemoryAttribute old_attr{};
2061 size_t num_allocator_blocks{};
2062 R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
2063 std::addressof(old_attr), std::addressof(num_allocator_blocks),
2064 addr, size, state_mask | KMemoryState::FlagReferenceCounted,
2065 state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
2066 attr_mask, attr));
2067
2068 // Get the physical address, if we're supposed to.
2069 if (out_paddr != nullptr) {
2070 ASSERT(this->GetPhysicalAddressLocked(out_paddr, addr));
2071 }
2072
2073 // Make the page group, if we're supposed to.
2074 if (out_pg != nullptr) {
2075 R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
2076 }
2077
2078 // Decide on new perm and attr.
2079 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
2080 KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
2081
2082 // Update permission, if we need to.
2083 if (new_perm != old_perm) {
2084 R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
2085 }
2086
2087 // Apply the memory block updates.
2088 block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
2089
2090 return ResultSuccess;
2091}
2092
2093ResultCode KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
2094 KMemoryState state, KMemoryPermission perm_mask,
2095 KMemoryPermission perm, KMemoryAttribute attr_mask,
2096 KMemoryAttribute attr, KMemoryPermission new_perm,
2097 KMemoryAttribute lock_attr, const KPageLinkedList* pg) {
2098 // Validate basic preconditions.
2099 ASSERT((attr_mask & lock_attr) == lock_attr);
2100 ASSERT((attr & lock_attr) == lock_attr);
2101
2102 // Validate the unlock request.
2103 const size_t num_pages = size / PageSize;
2104 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
2105
2106 // Lock the table.
2107 KScopedLightLock lk(general_lock);
2108
2109 // Check the state.
2110 KMemoryState old_state{};
2111 KMemoryPermission old_perm{};
2112 KMemoryAttribute old_attr{};
2113 size_t num_allocator_blocks{};
2114 R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm),
2115 std::addressof(old_attr), std::addressof(num_allocator_blocks),
2116 addr, size, state_mask | KMemoryState::FlagReferenceCounted,
2117 state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
2118 attr_mask, attr));
2119
2120 // Check the page group.
2121 if (pg != nullptr) {
2122 UNIMPLEMENTED_MSG("PageGroup support is unimplemented!");
2123 }
2124
2125 // Decide on new perm and attr.
2126 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
2127 KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
2128
2129 // Update permission, if we need to.
2130 if (new_perm != old_perm) {
2131 R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions));
2132 }
2133
2134 // Apply the memory block updates.
2135 block_manager->Update(addr, num_pages, old_state, new_perm, new_attr);
2136
2137 return ResultSuccess;
2138}
2139
1994} // namespace Kernel 2140} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 54c6adf8d..bfabdf38c 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -12,6 +12,7 @@
12#include "core/file_sys/program_metadata.h" 12#include "core/file_sys/program_metadata.h"
13#include "core/hle/kernel/k_light_lock.h" 13#include "core/hle/kernel/k_light_lock.h"
14#include "core/hle/kernel/k_memory_block.h" 14#include "core/hle/kernel/k_memory_block.h"
15#include "core/hle/kernel/k_memory_layout.h"
15#include "core/hle/kernel/k_memory_manager.h" 16#include "core/hle/kernel/k_memory_manager.h"
16#include "core/hle/result.h" 17#include "core/hle/result.h"
17 18
@@ -71,6 +72,10 @@ public:
71 ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size); 72 ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
72 ResultCode LockForCodeMemory(VAddr addr, std::size_t size); 73 ResultCode LockForCodeMemory(VAddr addr, std::size_t size);
73 ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size); 74 ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size);
75 ResultCode MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
76 KMemoryState state_mask, KMemoryState state,
77 KMemoryPermission perm_mask, KMemoryPermission perm,
78 KMemoryAttribute attr_mask, KMemoryAttribute attr);
74 79
75 Common::PageTable& PageTableImpl() { 80 Common::PageTable& PageTableImpl() {
76 return page_table_impl; 81 return page_table_impl;
@@ -159,10 +164,37 @@ private:
159 attr_mask, attr, ignore_attr); 164 attr_mask, attr, ignore_attr);
160 } 165 }
161 166
167 ResultCode LockMemoryAndOpen(KPageLinkedList* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
168 KMemoryState state_mask, KMemoryState state,
169 KMemoryPermission perm_mask, KMemoryPermission perm,
170 KMemoryAttribute attr_mask, KMemoryAttribute attr,
171 KMemoryPermission new_perm, KMemoryAttribute lock_attr);
172 ResultCode UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
173 KMemoryPermission perm_mask, KMemoryPermission perm,
174 KMemoryAttribute attr_mask, KMemoryAttribute attr,
175 KMemoryPermission new_perm, KMemoryAttribute lock_attr,
176 const KPageLinkedList* pg);
177
178 ResultCode MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages);
179
162 bool IsLockedByCurrentThread() const { 180 bool IsLockedByCurrentThread() const {
163 return general_lock.IsLockedByCurrentThread(); 181 return general_lock.IsLockedByCurrentThread();
164 } 182 }
165 183
184 bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) {
185 ASSERT(this->IsLockedByCurrentThread());
186
187 return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr);
188 }
189
190 bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
191 ASSERT(this->IsLockedByCurrentThread());
192
193 *out = GetPhysicalAddr(virt_addr);
194
195 return *out != 0;
196 }
197
166 mutable KLightLock general_lock; 198 mutable KLightLock general_lock;
167 mutable KLightLock map_physical_memory_lock; 199 mutable KLightLock map_physical_memory_lock;
168 200
@@ -322,6 +354,7 @@ private:
322 bool is_aslr_enabled{}; 354 bool is_aslr_enabled{};
323 355
324 u32 heap_fill_value{}; 356 u32 heap_fill_value{};
357 const KMemoryRegion* cached_physical_heap_region{};
325 358
326 KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application}; 359 KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application};
327 KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront}; 360 KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront};
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 839171e85..976d63234 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -1362,8 +1362,11 @@ static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Hand
1362 ResultInvalidMemoryRegion); 1362 ResultInvalidMemoryRegion);
1363 1363
1364 // Create a new page group. 1364 // Create a new page group.
1365 KMemoryInfo kBlockInfo = dst_pt.QueryInfo(dst_address); 1365 KPageLinkedList pg;
1366 KPageLinkedList pg(kBlockInfo.GetAddress(), kBlockInfo.GetNumPages()); 1366 R_TRY(src_pt.MakeAndOpenPageGroup(
1367 std::addressof(pg), src_address, size / PageSize, KMemoryState::FlagCanMapProcess,
1368 KMemoryState::FlagCanMapProcess, KMemoryPermission::None, KMemoryPermission::None,
1369 KMemoryAttribute::All, KMemoryAttribute::None));
1367 1370
1368 // Map the group. 1371 // Map the group.
1369 R_TRY(dst_pt.MapPages(dst_address, pg, KMemoryState::SharedCode, 1372 R_TRY(dst_pt.MapPages(dst_address, pg, KMemoryState::SharedCode,
@@ -1408,8 +1411,8 @@ static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Ha
1408} 1411}
1409 1412
1410static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) { 1413static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
1411 LOG_TRACE(Kernel_SVC, "called, handle_out={}, address=0x{:X}, size=0x{:X}", 1414 LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, size=0x{:X}", address, size);
1412 static_cast<void*>(out), address, size); 1415
1413 // Get kernel instance. 1416 // Get kernel instance.
1414 auto& kernel = system.Kernel(); 1417 auto& kernel = system.Kernel();
1415 1418
@@ -1664,7 +1667,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha
1664 return ResultInvalidAddress; 1667 return ResultInvalidAddress;
1665 } 1668 }
1666 1669
1667 if (size == 0 || Common::Is4KBAligned(size)) { 1670 if (size == 0 || !Common::Is4KBAligned(size)) {
1668 LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size); 1671 LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size);
1669 return ResultInvalidSize; 1672 return ResultInvalidSize;
1670 } 1673 }