summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/core/hle/kernel/k_page_table.cpp126
-rw-r--r--src/core/hle/kernel/k_page_table.h17
2 files changed, 141 insertions, 2 deletions
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 0602de1f7..02d93b12e 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -424,6 +424,68 @@ ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std
424 return ResultSuccess; 424 return ResultSuccess;
425} 425}
426 426
427VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
428 std::size_t num_pages, std::size_t alignment, std::size_t offset,
429 std::size_t guard_pages) {
430 VAddr address = 0;
431
432 if (num_pages <= region_num_pages) {
433 if (this->IsAslrEnabled()) {
434 // Try to directly find a free area up to 8 times.
435 for (std::size_t i = 0; i < 8; i++) {
436 const std::size_t random_offset =
437 KSystemControl::GenerateRandomRange(
438 0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) *
439 alignment;
440 const VAddr candidate =
441 Common::AlignDown((region_start + random_offset), alignment) + offset;
442
443 KMemoryInfo info = this->QueryInfoImpl(candidate);
444
445 if (info.state != KMemoryState::Free) {
446 continue;
447 }
448 if (region_start > candidate) {
449 continue;
450 }
451 if (info.GetAddress() + guard_pages * PageSize > candidate) {
452 continue;
453 }
454
455 const VAddr candidate_end = candidate + (num_pages + guard_pages) * PageSize - 1;
456 if (candidate_end > info.GetLastAddress()) {
457 continue;
458 }
459 if (candidate_end > region_start + region_num_pages * PageSize - 1) {
460 continue;
461 }
462
463 address = candidate;
464 break;
465 }
466 // Fall back to finding the first free area with a random offset.
467 if (address == 0) {
468 // NOTE: Nintendo does not account for guard pages here.
469 // This may theoretically cause an offset to be chosen that cannot be mapped. We
470 // will account for guard pages.
471 const std::size_t offset_pages = KSystemControl::GenerateRandomRange(
472 0, region_num_pages - num_pages - guard_pages);
473 address = block_manager->FindFreeArea(region_start + offset_pages * PageSize,
474 region_num_pages - offset_pages, num_pages,
475 alignment, offset, guard_pages);
476 }
477 }
478
479 // Find the first free area.
480 if (address == 0) {
481 address = block_manager->FindFreeArea(region_start, region_num_pages, num_pages,
482 alignment, offset, guard_pages);
483 }
484 }
485
486 return address;
487}
488
427ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, 489ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
428 KPageTable& src_page_table, VAddr src_addr) { 490 KPageTable& src_page_table, VAddr src_addr) {
429 KScopedLightLock lk(general_lock); 491 KScopedLightLock lk(general_lock);
@@ -1055,6 +1117,46 @@ ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list
1055 return ResultSuccess; 1117 return ResultSuccess;
1056} 1118}
1057 1119
1120ResultCode KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
1121 PAddr phys_addr, bool is_pa_valid, VAddr region_start,
1122 std::size_t region_num_pages, KMemoryState state,
1123 KMemoryPermission perm) {
1124 ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize);
1125
1126 // Ensure this is a valid map request.
1127 R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state),
1128 ResultInvalidCurrentMemory);
1129 R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory);
1130
1131 // Lock the table.
1132 KScopedLightLock lk(general_lock);
1133
1134 // Find a random address to map at.
1135 VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0,
1136 this->GetNumGuardPages());
1137 R_UNLESS(addr != 0, ResultOutOfMemory);
1138 ASSERT(Common::IsAligned(addr, alignment));
1139 ASSERT(this->CanContain(addr, num_pages * PageSize, state));
1140 ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free,
1141 KMemoryPermission::None, KMemoryPermission::None,
1142 KMemoryAttribute::None, KMemoryAttribute::None)
1143 .IsSuccess());
1144
1145 // Perform mapping operation.
1146 if (is_pa_valid) {
1147 R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr));
1148 } else {
1149 UNIMPLEMENTED();
1150 }
1151
1152 // Update the blocks.
1153 block_manager->Update(addr, num_pages, state, perm);
1154
1155 // We successfully mapped the pages.
1156 *out_addr = addr;
1157 return ResultSuccess;
1158}
1159
1058ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) { 1160ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) {
1059 ASSERT(this->IsLockedByCurrentThread()); 1161 ASSERT(this->IsLockedByCurrentThread());
1060 1162
@@ -1097,6 +1199,30 @@ ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
1097 return ResultSuccess; 1199 return ResultSuccess;
1098} 1200}
1099 1201
1202ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) {
1203 // Check that the unmap is in range.
1204 const std::size_t size = num_pages * PageSize;
1205 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1206
1207 // Lock the table.
1208 KScopedLightLock lk(general_lock);
1209
1210 // Check the memory state.
1211 std::size_t num_allocator_blocks{};
1212 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
1213 KMemoryState::All, state, KMemoryPermission::None,
1214 KMemoryPermission::None, KMemoryAttribute::All,
1215 KMemoryAttribute::None));
1216
1217 // Perform the unmap.
1218 R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap));
1219
1220 // Update the blocks.
1221 block_manager->Update(address, num_pages, KMemoryState::Free, KMemoryPermission::None);
1222
1223 return ResultSuccess;
1224}
1225
1100ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, 1226ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
1101 Svc::MemoryPermission svc_perm) { 1227 Svc::MemoryPermission svc_perm) {
1102 const size_t num_pages = size / PageSize; 1228 const size_t num_pages = size / PageSize;
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index e99abe36a..54c6adf8d 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -46,7 +46,14 @@ public:
46 ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); 46 ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
47 ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state, 47 ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
48 KMemoryPermission perm); 48 KMemoryPermission perm);
49 ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
50 PAddr phys_addr, KMemoryState state, KMemoryPermission perm) {
51 return this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
52 this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize,
53 state, perm);
54 }
49 ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state); 55 ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state);
56 ResultCode UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state);
50 ResultCode SetProcessMemoryPermission(VAddr addr, std::size_t size, 57 ResultCode SetProcessMemoryPermission(VAddr addr, std::size_t size,
51 Svc::MemoryPermission svc_perm); 58 Svc::MemoryPermission svc_perm);
52 KMemoryInfo QueryInfo(VAddr addr); 59 KMemoryInfo QueryInfo(VAddr addr);
@@ -91,6 +98,9 @@ private:
91 ResultCode InitializeMemoryLayout(VAddr start, VAddr end); 98 ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
92 ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list, 99 ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
93 KMemoryPermission perm); 100 KMemoryPermission perm);
101 ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment,
102 PAddr phys_addr, bool is_pa_valid, VAddr region_start,
103 std::size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
94 ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list); 104 ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list);
95 bool IsRegionMapped(VAddr address, u64 size); 105 bool IsRegionMapped(VAddr address, u64 size);
96 bool IsRegionContiguous(VAddr addr, u64 size) const; 106 bool IsRegionContiguous(VAddr addr, u64 size) const;
@@ -105,6 +115,9 @@ private:
105 VAddr GetRegionAddress(KMemoryState state) const; 115 VAddr GetRegionAddress(KMemoryState state) const;
106 std::size_t GetRegionSize(KMemoryState state) const; 116 std::size_t GetRegionSize(KMemoryState state) const;
107 117
118 VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
119 std::size_t alignment, std::size_t offset, std::size_t guard_pages);
120
108 ResultCode CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, 121 ResultCode CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr,
109 std::size_t size, KMemoryState state_mask, 122 std::size_t size, KMemoryState state_mask,
110 KMemoryState state, KMemoryPermission perm_mask, 123 KMemoryState state, KMemoryPermission perm_mask,
@@ -137,7 +150,7 @@ private:
137 return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, 150 return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
138 state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr); 151 state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
139 } 152 }
140 ResultCode CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, 153 ResultCode CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
141 KMemoryState state, KMemoryPermission perm_mask, 154 KMemoryState state, KMemoryPermission perm_mask,
142 KMemoryPermission perm, KMemoryAttribute attr_mask, 155 KMemoryPermission perm, KMemoryAttribute attr_mask,
143 KMemoryAttribute attr, 156 KMemoryAttribute attr,
@@ -210,7 +223,7 @@ public:
210 constexpr VAddr GetAliasCodeRegionSize() const { 223 constexpr VAddr GetAliasCodeRegionSize() const {
211 return alias_code_region_end - alias_code_region_start; 224 return alias_code_region_end - alias_code_region_start;
212 } 225 }
213 size_t GetNormalMemorySize() { 226 std::size_t GetNormalMemorySize() {
214 KScopedLightLock lk(general_lock); 227 KScopedLightLock lk(general_lock);
215 return GetHeapSize() + mapped_physical_memory_size; 228 return GetHeapSize() + mapped_physical_memory_size;
216 } 229 }