summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/core/hle/kernel/k_page_table.cpp77
-rw-r--r--src/core/hle/kernel/k_page_table.h6
2 files changed, 83 insertions, 0 deletions
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 02d93b12e..0a932334f 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -486,6 +486,58 @@ VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
486 return address; 486 return address;
487} 487}
488 488
489ResultCode KPageTable::MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages) {
490 ASSERT(this->IsLockedByCurrentThread());
491
492 const size_t size = num_pages * PageSize;
493
494 // We're making a new group, not adding to an existing one.
495 R_UNLESS(pg.Empty(), ResultInvalidCurrentMemory);
496
497 // Begin traversal.
498 Common::PageTable::TraversalContext context;
499 Common::PageTable::TraversalEntry next_entry;
500 R_UNLESS(page_table_impl.BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory);
501
502 // Prepare tracking variables.
503 PAddr cur_addr = next_entry.phys_addr;
504 size_t cur_size = next_entry.block_size - (cur_addr & (next_entry.block_size - 1));
505 size_t tot_size = cur_size;
506
507 // Iterate, adding to group as we go.
508 const auto& memory_layout = system.Kernel().MemoryLayout();
509 while (tot_size < size) {
510 R_UNLESS(page_table_impl.ContinueTraversal(next_entry, context),
511 ResultInvalidCurrentMemory);
512
513 if (next_entry.phys_addr != (cur_addr + cur_size)) {
514 const size_t cur_pages = cur_size / PageSize;
515
516 R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
517 R_TRY(pg.AddBlock(cur_addr, cur_pages));
518
519 cur_addr = next_entry.phys_addr;
520 cur_size = next_entry.block_size;
521 } else {
522 cur_size += next_entry.block_size;
523 }
524
525 tot_size += next_entry.block_size;
526 }
527
528 // Ensure we add the right amount for the last block.
529 if (tot_size > size) {
530 cur_size -= (tot_size - size);
531 }
532
533 // Add the last block.
534 const size_t cur_pages = cur_size / PageSize;
535 R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
536 R_TRY(pg.AddBlock(cur_addr, cur_pages));
537
538 return ResultSuccess;
539}
540
489ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, 541ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
490 KPageTable& src_page_table, VAddr src_addr) { 542 KPageTable& src_page_table, VAddr src_addr) {
491 KScopedLightLock lk(general_lock); 543 KScopedLightLock lk(general_lock);
@@ -1223,6 +1275,31 @@ ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryS
1223 return ResultSuccess; 1275 return ResultSuccess;
1224} 1276}
1225 1277
1278ResultCode KPageTable::MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
1279 KMemoryState state_mask, KMemoryState state,
1280 KMemoryPermission perm_mask, KMemoryPermission perm,
1281 KMemoryAttribute attr_mask, KMemoryAttribute attr) {
1282 // Ensure that the page group isn't null.
1283 ASSERT(out != nullptr);
1284
1285 // Make sure that the region we're mapping is valid for the table.
1286 const size_t size = num_pages * PageSize;
1287 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1288
1289 // Lock the table.
1290 KScopedLightLock lk(general_lock);
1291
1292 // Check if state allows us to create the group.
1293 R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted,
1294 state | KMemoryState::FlagReferenceCounted, perm_mask, perm,
1295 attr_mask, attr));
1296
1297 // Create a new page group for the region.
1298 R_TRY(this->MakePageGroup(*out, address, num_pages));
1299
1300 return ResultSuccess;
1301}
1302
1226ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, 1303ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
1227 Svc::MemoryPermission svc_perm) { 1304 Svc::MemoryPermission svc_perm) {
1228 const size_t num_pages = size / PageSize; 1305 const size_t num_pages = size / PageSize;
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 2ae7affa0..b61a39145 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -72,6 +72,10 @@ public:
72 ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size); 72 ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
73 ResultCode LockForCodeMemory(VAddr addr, std::size_t size); 73 ResultCode LockForCodeMemory(VAddr addr, std::size_t size);
74 ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size); 74 ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size);
75 ResultCode MakeAndOpenPageGroup(KPageLinkedList* out, VAddr address, size_t num_pages,
76 KMemoryState state_mask, KMemoryState state,
77 KMemoryPermission perm_mask, KMemoryPermission perm,
78 KMemoryAttribute attr_mask, KMemoryAttribute attr);
75 79
76 Common::PageTable& PageTableImpl() { 80 Common::PageTable& PageTableImpl() {
77 return page_table_impl; 81 return page_table_impl;
@@ -160,6 +164,8 @@ private:
160 attr_mask, attr, ignore_attr); 164 attr_mask, attr, ignore_attr);
161 } 165 }
162 166
167 ResultCode MakePageGroup(KPageLinkedList& pg, VAddr addr, size_t num_pages);
168
163 bool IsLockedByCurrentThread() const { 169 bool IsLockedByCurrentThread() const {
164 return general_lock.IsLockedByCurrentThread(); 170 return general_lock.IsLockedByCurrentThread();
165 } 171 }