summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/CMakeLists.txt2
-rw-r--r--src/core/hle/kernel/memory/page_table.cpp1234
-rw-r--r--src/core/hle/kernel/memory/page_table.h274
3 files changed, 1510 insertions, 0 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index b0a010846..4ca68a309 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -166,6 +166,8 @@ add_library(core STATIC
166 hle/kernel/memory/page_linked_list.h 166 hle/kernel/memory/page_linked_list.h
167 hle/kernel/memory/page_heap.cpp 167 hle/kernel/memory/page_heap.cpp
168 hle/kernel/memory/page_heap.h 168 hle/kernel/memory/page_heap.h
169 hle/kernel/memory/page_table.cpp
170 hle/kernel/memory/page_table.h
169 hle/kernel/memory/slab_heap.h 171 hle/kernel/memory/slab_heap.h
170 hle/kernel/memory/system_control.cpp 172 hle/kernel/memory/system_control.cpp
171 hle/kernel/memory/system_control.h 173 hle/kernel/memory/system_control.h
diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp
new file mode 100644
index 000000000..01f9e99eb
--- /dev/null
+++ b/src/core/hle/kernel/memory/page_table.cpp
@@ -0,0 +1,1234 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/alignment.h"
6#include "common/assert.h"
7#include "common/scope_exit.h"
8#include "core/core.h"
9#include "core/device_memory.h"
10#include "core/hle/kernel/errors.h"
11#include "core/hle/kernel/kernel.h"
12#include "core/hle/kernel/memory/address_space_info.h"
13#include "core/hle/kernel/memory/memory_block.h"
14#include "core/hle/kernel/memory/memory_block_manager.h"
15#include "core/hle/kernel/memory/page_linked_list.h"
16#include "core/hle/kernel/memory/page_table.h"
17#include "core/hle/kernel/memory/system_control.h"
18#include "core/hle/kernel/process.h"
19#include "core/hle/kernel/resource_limit.h"
20#include "core/memory.h"
21
22namespace Kernel::Memory {
23
24namespace {
25
26constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) {
27 switch (as_type) {
28 case FileSys::ProgramAddressSpaceType::Is32Bit:
29 case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
30 return 32;
31 case FileSys::ProgramAddressSpaceType::Is36Bit:
32 return 36;
33 case FileSys::ProgramAddressSpaceType::Is39Bit:
34 return 39;
35 default:
36 UNREACHABLE();
37 return {};
38 }
39}
40
41constexpr u64 GetAddressInRange(const MemoryInfo& info, VAddr addr) {
42 if (info.GetAddress() < addr) {
43 return addr;
44 }
45 return info.GetAddress();
46}
47
48constexpr std::size_t GetSizeInRange(const MemoryInfo& info, VAddr start, VAddr end) {
49 std::size_t size{info.GetSize()};
50 if (info.GetAddress() < start) {
51 size -= start - info.GetAddress();
52 }
53 if (info.GetEndAddress() > end) {
54 size -= info.GetEndAddress() - end;
55 }
56 return size;
57}
58
59} // namespace
60
61PageTable::PageTable(Core::System& system) : system{system} {}
62
63ResultCode PageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type,
64 bool enable_aslr, VAddr code_addr, std::size_t code_size,
65 Memory::MemoryManager::Pool pool) {
66
67 const auto GetSpaceStart = [&](AddressSpaceInfo::Type type) {
68 return AddressSpaceInfo::GetAddressSpaceStart(address_space_width, type);
69 };
70 const auto GetSpaceSize = [&](AddressSpaceInfo::Type type) {
71 return AddressSpaceInfo::GetAddressSpaceSize(address_space_width, type);
72 };
73
74 // Set our width and heap/alias sizes
75 address_space_width = GetAddressSpaceWidthFromType(as_type);
76 const VAddr start = 0;
77 const VAddr end{1ULL << address_space_width};
78 std::size_t alias_region_size{GetSpaceSize(AddressSpaceInfo::Type::Alias)};
79 std::size_t heap_region_size{GetSpaceSize(AddressSpaceInfo::Type::Heap)};
80
81 ASSERT(start <= code_addr);
82 ASSERT(code_addr < code_addr + code_size);
83 ASSERT(code_addr + code_size - 1 <= end - 1);
84
85 // Adjust heap/alias size if we don't have an alias region
86 if (as_type == FileSys::ProgramAddressSpaceType::Is32BitNoMap) {
87 heap_region_size += alias_region_size;
88 alias_region_size = 0;
89 }
90
91 // Set code regions and determine remaining
92 constexpr std::size_t RegionAlignment{2 * 1024 * 1024};
93 VAddr process_code_start{};
94 VAddr process_code_end{};
95 std::size_t stack_region_size{};
96 std::size_t kernel_map_region_size{};
97
98 if (address_space_width == 39) {
99 alias_region_size = GetSpaceSize(AddressSpaceInfo::Type::Alias);
100 heap_region_size = GetSpaceSize(AddressSpaceInfo::Type::Heap);
101 stack_region_size = GetSpaceSize(AddressSpaceInfo::Type::Stack);
102 kernel_map_region_size = GetSpaceSize(AddressSpaceInfo::Type::Is32Bit);
103 code_region_start = GetSpaceStart(AddressSpaceInfo::Type::Large64Bit);
104 code_region_end = code_region_start + GetSpaceSize(AddressSpaceInfo::Type::Large64Bit);
105 alias_code_region_start = code_region_start;
106 alias_code_region_end = code_region_end;
107 process_code_start = Common::AlignDown(code_addr, RegionAlignment);
108 process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment);
109 } else {
110 stack_region_size = 0;
111 kernel_map_region_size = 0;
112 code_region_start = GetSpaceStart(AddressSpaceInfo::Type::Is32Bit);
113 code_region_end = code_region_start + GetSpaceSize(AddressSpaceInfo::Type::Is32Bit);
114 stack_region_start = code_region_start;
115 alias_code_region_start = code_region_start;
116 alias_code_region_end = GetSpaceStart(AddressSpaceInfo::Type::Small64Bit) +
117 GetSpaceSize(AddressSpaceInfo::Type::Small64Bit);
118 stack_region_end = code_region_end;
119 kernel_map_region_start = code_region_start;
120 kernel_map_region_end = code_region_end;
121 process_code_start = code_region_start;
122 process_code_end = code_region_end;
123 }
124
125 // Set other basic fields
126 is_aslr_enabled = enable_aslr;
127 address_space_start = start;
128 address_space_end = end;
129 is_kernel = false;
130
131 // Determine the region we can place our undetermineds in
132 VAddr alloc_start{};
133 std::size_t alloc_size{};
134 if ((process_code_start - code_region_start) >= (end - process_code_end)) {
135 alloc_start = code_region_start;
136 alloc_size = process_code_start - code_region_start;
137 } else {
138 alloc_start = process_code_end;
139 alloc_size = end - process_code_end;
140 }
141 const std::size_t needed_size{
142 (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)};
143 if (alloc_size < needed_size) {
144 UNREACHABLE();
145 return ERR_OUT_OF_MEMORY;
146 }
147
148 const std::size_t remaining_size{alloc_size - needed_size};
149
150 // Determine random placements for each region
151 std::size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{};
152 if (enable_aslr) {
153 alias_rnd = SystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
154 RegionAlignment;
155 heap_rnd = SystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
156 RegionAlignment;
157 stack_rnd = SystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
158 RegionAlignment;
159 kmap_rnd = SystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) *
160 RegionAlignment;
161 }
162
163 // Setup heap and alias regions
164 alias_region_start = alloc_start + alias_rnd;
165 alias_region_end = alias_region_start + alias_region_size;
166 heap_region_start = alloc_start + heap_rnd;
167 heap_region_end = heap_region_start + heap_region_size;
168
169 if (alias_rnd <= heap_rnd) {
170 heap_region_start += alias_region_size;
171 heap_region_end += alias_region_size;
172 } else {
173 alias_region_start += heap_region_size;
174 alias_region_end += heap_region_size;
175 }
176
177 // Setup stack region
178 if (stack_region_size) {
179 stack_region_start = alloc_start + stack_rnd;
180 stack_region_end = stack_region_start + stack_region_size;
181
182 if (alias_rnd < stack_rnd) {
183 stack_region_start += alias_region_size;
184 stack_region_end += alias_region_size;
185 } else {
186 alias_region_start += stack_region_size;
187 alias_region_end += stack_region_size;
188 }
189
190 if (heap_rnd < stack_rnd) {
191 stack_region_start += heap_region_size;
192 stack_region_end += heap_region_size;
193 } else {
194 heap_region_start += stack_region_size;
195 heap_region_end += stack_region_size;
196 }
197 }
198
199 // Setup kernel map region
200 if (kernel_map_region_size) {
201 kernel_map_region_start = alloc_start + kmap_rnd;
202 kernel_map_region_end = kernel_map_region_start + kernel_map_region_size;
203
204 if (alias_rnd < kmap_rnd) {
205 kernel_map_region_start += alias_region_size;
206 kernel_map_region_end += alias_region_size;
207 } else {
208 alias_region_start += kernel_map_region_size;
209 alias_region_end += kernel_map_region_size;
210 }
211
212 if (heap_rnd < kmap_rnd) {
213 kernel_map_region_start += heap_region_size;
214 kernel_map_region_end += heap_region_size;
215 } else {
216 heap_region_start += kernel_map_region_size;
217 heap_region_end += kernel_map_region_size;
218 }
219
220 if (stack_region_size) {
221 if (stack_rnd < kmap_rnd) {
222 kernel_map_region_start += stack_region_size;
223 kernel_map_region_end += stack_region_size;
224 } else {
225 stack_region_start += kernel_map_region_size;
226 stack_region_end += kernel_map_region_size;
227 }
228 }
229 }
230
231 // Set heap members
232 current_heap_end = heap_region_start;
233 max_heap_size = 0;
234 max_physical_memory_size = 0;
235
236 // Ensure that we regions inside our address space
237 auto IsInAddressSpace = [&](VAddr addr) {
238 return address_space_start <= addr && addr <= address_space_end;
239 };
240 ASSERT(IsInAddressSpace(alias_region_start));
241 ASSERT(IsInAddressSpace(alias_region_end));
242 ASSERT(IsInAddressSpace(heap_region_start));
243 ASSERT(IsInAddressSpace(heap_region_end));
244 ASSERT(IsInAddressSpace(stack_region_start));
245 ASSERT(IsInAddressSpace(stack_region_end));
246 ASSERT(IsInAddressSpace(kernel_map_region_start));
247 ASSERT(IsInAddressSpace(kernel_map_region_end));
248
249 // Ensure that we selected regions that don't overlap
250 const VAddr alias_start{alias_region_start};
251 const VAddr alias_last{alias_region_end - 1};
252 const VAddr heap_start{heap_region_start};
253 const VAddr heap_last{heap_region_end - 1};
254 const VAddr stack_start{stack_region_start};
255 const VAddr stack_last{stack_region_end - 1};
256 const VAddr kmap_start{kernel_map_region_start};
257 const VAddr kmap_last{kernel_map_region_end - 1};
258 ASSERT(alias_last < heap_start || heap_last < alias_start);
259 ASSERT(alias_last < stack_start || stack_last < alias_start);
260 ASSERT(alias_last < kmap_start || kmap_last < alias_start);
261 ASSERT(heap_last < stack_start || stack_last < heap_start);
262 ASSERT(heap_last < kmap_start || kmap_last < heap_start);
263
264 current_heap_addr = heap_region_start;
265 heap_capacity = 0;
266 physical_memory_usage = 0;
267 memory_pool = pool;
268
269 page_table_impl.Resize(address_space_width, PageBits, true);
270
271 return InitializeMemoryLayout(start, end);
272}
273
274ResultCode PageTable::MapProcessCode(VAddr addr, std::size_t num_pages, MemoryState state,
275 MemoryPermission perm) {
276 std::lock_guard lock{page_table_lock};
277
278 const u64 size{num_pages * PageSize};
279
280 if (!CanContain(addr, size, state)) {
281 return ERR_INVALID_ADDRESS_STATE;
282 }
283
284 if (IsRegionMapped(addr, size)) {
285 return ERR_INVALID_ADDRESS_STATE;
286 }
287
288 PageLinkedList page_linked_list;
289 if (const ResultCode result{
290 system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool)};
291 result.IsError()) {
292 return result;
293 }
294
295 if (const ResultCode result{
296 Operate(addr, num_pages, page_linked_list, OperationType::MapGroup)};
297 result.IsError()) {
298 return result;
299 }
300
301 block_manager->Update(addr, num_pages, state, perm);
302
303 return RESULT_SUCCESS;
304}
305
306ResultCode PageTable::MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
307 std::lock_guard lock{page_table_lock};
308
309 const std::size_t num_pages{size / PageSize};
310
311 MemoryState state{};
312 MemoryPermission perm{};
313 if (const ResultCode result{CheckMemoryState(
314 &state, &perm, nullptr, src_addr, size, MemoryState::All, MemoryState::Normal,
315 MemoryPermission::Mask, MemoryPermission::ReadAndWrite, MemoryAttribute::Mask,
316 MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped)};
317 result.IsError()) {
318 return result;
319 }
320
321 if (IsRegionMapped(dst_addr, size)) {
322 return ERR_INVALID_ADDRESS_STATE;
323 }
324
325 PageLinkedList page_linked_list;
326 AddRegionToPages(src_addr, num_pages, page_linked_list);
327
328 {
329 auto block_guard = detail::ScopeExit(
330 [&] { Operate(src_addr, num_pages, perm, OperationType::ChangePermissions); });
331
332 if (const ResultCode result{Operate(src_addr, num_pages, MemoryPermission::None,
333 OperationType::ChangePermissions)};
334 result.IsError()) {
335 return result;
336 }
337
338 if (const ResultCode result{MapPages(dst_addr, page_linked_list, MemoryPermission::None)};
339 result.IsError()) {
340 return result;
341 }
342
343 block_guard.Cancel();
344 }
345
346 block_manager->Update(src_addr, num_pages, state, MemoryPermission::None,
347 MemoryAttribute::Locked);
348 block_manager->Update(dst_addr, num_pages, MemoryState::AliasCode);
349
350 return RESULT_SUCCESS;
351}
352
353ResultCode PageTable::UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
354 std::lock_guard lock{page_table_lock};
355
356 if (!size) {
357 return RESULT_SUCCESS;
358 }
359
360 const std::size_t num_pages{size / PageSize};
361
362 if (const ResultCode result{CheckMemoryState(
363 nullptr, nullptr, nullptr, src_addr, size, MemoryState::All, MemoryState::Normal,
364 MemoryPermission::None, MemoryPermission::None, MemoryAttribute::Mask,
365 MemoryAttribute::Locked, MemoryAttribute::IpcAndDeviceMapped)};
366 result.IsError()) {
367 return result;
368 }
369
370 MemoryState state{};
371 if (const ResultCode result{CheckMemoryState(
372 &state, nullptr, nullptr, dst_addr, PageSize, MemoryState::FlagCanCodeAlias,
373 MemoryState::FlagCanCodeAlias, MemoryPermission::None, MemoryPermission::None,
374 MemoryAttribute::Mask, MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped)};
375 result.IsError()) {
376 return result;
377 }
378
379 if (const ResultCode result{CheckMemoryState(dst_addr, size, MemoryState::All, state,
380 MemoryPermission::None, MemoryPermission::None,
381 MemoryAttribute::Mask, MemoryAttribute::None)};
382 result.IsError()) {
383 return result;
384 }
385
386 if (const ResultCode result{
387 Operate(dst_addr, num_pages, MemoryPermission::None, OperationType::Unmap)};
388 result.IsError()) {
389 return result;
390 }
391
392 block_manager->Update(dst_addr, num_pages, MemoryState::Free);
393 block_manager->Update(src_addr, num_pages, MemoryState::Normal, MemoryPermission::ReadAndWrite);
394
395 return RESULT_SUCCESS;
396}
397
398void PageTable::MapPhysicalMemory(PageLinkedList& page_linked_list, VAddr start, VAddr end) {
399 auto node{page_linked_list.Nodes().begin()};
400 PAddr map_addr{node->GetAddress()};
401 std::size_t src_num_pages{node->GetNumPages()};
402
403 block_manager->IterateForRange(start, end, [&](const MemoryInfo& info) {
404 if (info.state != MemoryState::Free) {
405 return;
406 }
407
408 std::size_t dst_num_pages{GetSizeInRange(info, start, end) / PageSize};
409 VAddr dst_addr{GetAddressInRange(info, start)};
410
411 while (dst_num_pages) {
412 if (!src_num_pages) {
413 node = std::next(node);
414 map_addr = node->GetAddress();
415 src_num_pages = node->GetNumPages();
416 }
417
418 const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)};
419 Operate(dst_addr, num_pages, MemoryPermission::ReadAndWrite, OperationType::Map,
420 map_addr);
421
422 dst_addr += num_pages * PageSize;
423 map_addr += num_pages * PageSize;
424 src_num_pages -= num_pages;
425 dst_num_pages -= num_pages;
426 }
427 });
428}
429
430ResultCode PageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
431 std::lock_guard lock{page_table_lock};
432
433 std::size_t mapped_size{};
434 const VAddr end_addr{addr + size};
435
436 block_manager->IterateForRange(addr, end_addr, [&](const MemoryInfo& info) {
437 if (info.state != MemoryState::Free) {
438 mapped_size += GetSizeInRange(info, addr, end_addr);
439 }
440 });
441
442 if (mapped_size == size) {
443 return RESULT_SUCCESS;
444 }
445
446 auto process{system.Kernel().CurrentProcess()};
447 const std::size_t remaining_size{size - mapped_size};
448 const std::size_t remaining_pages{remaining_size / PageSize};
449
450 if (process->GetResourceLimit() &&
451 !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, remaining_size)) {
452 return ERR_RESOURCE_LIMIT_EXCEEDED;
453 }
454
455 PageLinkedList page_linked_list;
456 {
457 auto block_guard = detail::ScopeExit([&] {
458 system.Kernel().MemoryManager().Free(page_linked_list, remaining_pages, memory_pool);
459 process->GetResourceLimit()->Release(ResourceType::PhysicalMemory, remaining_size);
460 });
461
462 if (const ResultCode result{system.Kernel().MemoryManager().Allocate(
463 page_linked_list, remaining_pages, memory_pool)};
464 result.IsError()) {
465 return result;
466 }
467
468 block_guard.Cancel();
469 }
470
471 MapPhysicalMemory(page_linked_list, addr, end_addr);
472
473 physical_memory_usage += remaining_size;
474
475 const std::size_t num_pages{size / PageSize};
476 block_manager->Update(addr, num_pages, MemoryState::Free, MemoryPermission::None,
477 MemoryAttribute::None, MemoryState::Normal,
478 MemoryPermission::ReadAndWrite, MemoryAttribute::None);
479
480 return RESULT_SUCCESS;
481}
482
483ResultCode PageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
484 std::lock_guard lock{page_table_lock};
485
486 const VAddr end_addr{addr + size};
487 ResultCode result{RESULT_SUCCESS};
488 std::size_t mapped_size{};
489
490 // Verify that the region can be unmapped
491 block_manager->IterateForRange(addr, end_addr, [&](const MemoryInfo& info) {
492 if (info.state == MemoryState::Normal) {
493 if (info.attribute != MemoryAttribute::None) {
494 result = ERR_INVALID_ADDRESS_STATE;
495 return;
496 }
497 mapped_size += GetSizeInRange(info, addr, end_addr);
498 } else if (info.state != MemoryState::Free) {
499 result = ERR_INVALID_ADDRESS_STATE;
500 }
501 });
502
503 if (result.IsError()) {
504 return result;
505 }
506
507 if (!mapped_size) {
508 return RESULT_SUCCESS;
509 }
510
511 if (const ResultCode result{UnmapMemory(addr, size)}; result.IsError()) {
512 return result;
513 }
514
515 auto process{system.Kernel().CurrentProcess()};
516 process->GetResourceLimit()->Release(ResourceType::PhysicalMemory, mapped_size);
517 physical_memory_usage -= mapped_size;
518
519 return RESULT_SUCCESS;
520}
521
522ResultCode PageTable::UnmapMemory(VAddr addr, std::size_t size) {
523 std::lock_guard lock{page_table_lock};
524
525 const VAddr end_addr{addr + size};
526 ResultCode result{RESULT_SUCCESS};
527 PageLinkedList page_linked_list;
528
529 // Unmap each region within the range
530 block_manager->IterateForRange(addr, end_addr, [&](const MemoryInfo& info) {
531 if (info.state == MemoryState::Normal) {
532 const std::size_t block_size{GetSizeInRange(info, addr, end_addr)};
533 const std::size_t block_num_pages{block_size / PageSize};
534 const VAddr block_addr{GetAddressInRange(info, addr)};
535
536 AddRegionToPages(block_addr, block_size / PageSize, page_linked_list);
537
538 if (result = Operate(block_addr, block_num_pages, MemoryPermission::None,
539 OperationType::Unmap);
540 result.IsError()) {
541 return;
542 }
543 }
544 });
545
546 if (result.IsError()) {
547 return result;
548 }
549
550 const std::size_t num_pages{size / PageSize};
551 system.Kernel().MemoryManager().Free(page_linked_list, num_pages, memory_pool);
552
553 block_manager->Update(addr, num_pages, MemoryState::Free);
554
555 return RESULT_SUCCESS;
556}
557
558ResultCode PageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) {
559 std::lock_guard lock{page_table_lock};
560
561 MemoryState src_state{};
562 if (const ResultCode result{CheckMemoryState(
563 &src_state, nullptr, nullptr, src_addr, size, MemoryState::FlagCanAlias,
564 MemoryState::FlagCanAlias, MemoryPermission::Mask, MemoryPermission::ReadAndWrite,
565 MemoryAttribute::Mask, MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped)};
566 result.IsError()) {
567 return result;
568 }
569
570 if (IsRegionMapped(dst_addr, size)) {
571 return ERR_INVALID_ADDRESS_STATE;
572 }
573
574 PageLinkedList page_linked_list;
575 const std::size_t num_pages{size / PageSize};
576
577 AddRegionToPages(src_addr, num_pages, page_linked_list);
578
579 {
580 auto block_guard = detail::ScopeExit([&] {
581 Operate(src_addr, num_pages, MemoryPermission::ReadAndWrite,
582 OperationType::ChangePermissions);
583 });
584
585 if (const ResultCode result{Operate(src_addr, num_pages, MemoryPermission::None,
586 OperationType::ChangePermissions)};
587 result.IsError()) {
588 return result;
589 }
590
591 if (const ResultCode result{
592 MapPages(dst_addr, page_linked_list, MemoryPermission::ReadAndWrite)};
593 result.IsError()) {
594 return result;
595 }
596
597 block_guard.Cancel();
598 }
599
600 block_manager->Update(src_addr, num_pages, src_state, MemoryPermission::None,
601 MemoryAttribute::Locked);
602 block_manager->Update(dst_addr, num_pages, MemoryState::Stack, MemoryPermission::ReadAndWrite);
603
604 return RESULT_SUCCESS;
605}
606
607ResultCode PageTable::Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size) {
608 std::lock_guard lock{page_table_lock};
609
610 MemoryState src_state{};
611 if (const ResultCode result{CheckMemoryState(
612 &src_state, nullptr, nullptr, src_addr, size, MemoryState::FlagCanAlias,
613 MemoryState::FlagCanAlias, MemoryPermission::Mask, MemoryPermission::None,
614 MemoryAttribute::Mask, MemoryAttribute::Locked, MemoryAttribute::IpcAndDeviceMapped)};
615 result.IsError()) {
616 return result;
617 }
618
619 MemoryPermission dst_perm{};
620 if (const ResultCode result{CheckMemoryState(
621 nullptr, &dst_perm, nullptr, dst_addr, size, MemoryState::All, MemoryState::Stack,
622 MemoryPermission::None, MemoryPermission::None, MemoryAttribute::Mask,
623 MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped)};
624 result.IsError()) {
625 return result;
626 }
627
628 PageLinkedList src_pages;
629 PageLinkedList dst_pages;
630 const std::size_t num_pages{size / PageSize};
631
632 AddRegionToPages(src_addr, num_pages, src_pages);
633 AddRegionToPages(dst_addr, num_pages, dst_pages);
634
635 if (!dst_pages.IsEqual(src_pages)) {
636 return ERR_INVALID_MEMORY_RANGE;
637 }
638
639 {
640 auto block_guard = detail::ScopeExit([&] { MapPages(dst_addr, dst_pages, dst_perm); });
641
642 if (const ResultCode result{
643 Operate(dst_addr, num_pages, MemoryPermission::None, OperationType::Unmap)};
644 result.IsError()) {
645 return result;
646 }
647
648 if (const ResultCode result{Operate(src_addr, num_pages, MemoryPermission::ReadAndWrite,
649 OperationType::ChangePermissions)};
650 result.IsError()) {
651 return result;
652 }
653
654 block_guard.Cancel();
655 }
656
657 block_manager->Update(src_addr, num_pages, src_state, MemoryPermission::ReadAndWrite);
658 block_manager->Update(dst_addr, num_pages, MemoryState::Free);
659
660 return RESULT_SUCCESS;
661}
662
663ResultCode PageTable::MapPages(VAddr addr, const PageLinkedList& page_linked_list,
664 MemoryPermission perm) {
665 VAddr cur_addr{addr};
666
667 for (const auto& node : page_linked_list.Nodes()) {
668 if (const ResultCode result{
669 Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())};
670 result.IsError()) {
671 const MemoryInfo info{block_manager->FindBlock(cur_addr).GetMemoryInfo()};
672 const std::size_t num_pages{(addr - cur_addr) / PageSize};
673
674 ASSERT(
675 Operate(addr, num_pages, MemoryPermission::None, OperationType::Unmap).IsSuccess());
676
677 return result;
678 }
679
680 cur_addr += node.GetNumPages() * PageSize;
681 }
682
683 return RESULT_SUCCESS;
684}
685
686ResultCode PageTable::MapPages(VAddr addr, PageLinkedList& page_linked_list, MemoryState state,
687 MemoryPermission perm) {
688 std::lock_guard lock{page_table_lock};
689
690 const std::size_t num_pages{page_linked_list.GetNumPages()};
691 const std::size_t size{num_pages * PageSize};
692
693 if (!CanContain(addr, size, state)) {
694 return ERR_INVALID_ADDRESS_STATE;
695 }
696
697 if (IsRegionMapped(addr, num_pages * PageSize)) {
698 return ERR_INVALID_ADDRESS_STATE;
699 }
700
701 if (const ResultCode result{MapPages(addr, page_linked_list, perm)}; result.IsError()) {
702 return result;
703 }
704
705 block_manager->Update(addr, num_pages, state, perm);
706
707 return RESULT_SUCCESS;
708}
709
710ResultCode PageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size, MemoryPermission perm) {
711
712 std::lock_guard lock{page_table_lock};
713
714 MemoryState prev_state{};
715 MemoryPermission prev_perm{};
716
717 if (const ResultCode result{CheckMemoryState(
718 &prev_state, &prev_perm, nullptr, addr, size, MemoryState::FlagCode,
719 MemoryState::FlagCode, MemoryPermission::None, MemoryPermission::None,
720 MemoryAttribute::Mask, MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped)};
721 result.IsError()) {
722 return result;
723 }
724
725 MemoryState state{prev_state};
726
727 // Ensure state is mutable if permission allows write
728 if ((perm & MemoryPermission::Write) != MemoryPermission::None) {
729 if (prev_state == MemoryState::Code) {
730 state = MemoryState::CodeData;
731 } else if (prev_state == MemoryState::AliasCode) {
732 state = MemoryState::AliasCodeData;
733 } else {
734 UNREACHABLE();
735 }
736 }
737
738 // Return early if there is nothing to change
739 if (state == prev_state && perm == prev_perm) {
740 return RESULT_SUCCESS;
741 }
742
743 const std::size_t num_pages{size / PageSize};
744 const OperationType operation{(perm & MemoryPermission::Execute) != MemoryPermission::None
745 ? OperationType::ChangePermissionsAndRefresh
746 : OperationType::ChangePermissions};
747
748 if (const ResultCode result{Operate(addr, num_pages, perm, operation)}; result.IsError()) {
749 return result;
750 }
751
752 block_manager->Update(addr, num_pages, state, perm);
753
754 return RESULT_SUCCESS;
755}
756
757MemoryInfo PageTable::QueryInfoImpl(VAddr addr) {
758 std::lock_guard lock{page_table_lock};
759
760 return block_manager->FindBlock(addr).GetMemoryInfo();
761}
762
763MemoryInfo PageTable::QueryInfo(VAddr addr) {
764 if (!Contains(addr, 1)) {
765 return {address_space_end, 0 - address_space_end, MemoryState::Inaccessible,
766 MemoryPermission::None, MemoryAttribute::None, MemoryPermission::None};
767 }
768
769 return QueryInfoImpl(addr);
770}
771
772ResultCode PageTable::ReserveTransferMemory(VAddr addr, std::size_t size, MemoryPermission perm) {
773 std::lock_guard lock{page_table_lock};
774
775 MemoryState state{};
776 MemoryAttribute attribute{};
777
778 if (const ResultCode result{CheckMemoryState(
779 &state, nullptr, &attribute, addr, size,
780 MemoryState::FlagCanTransfer | MemoryState::FlagReferenceCounted,
781 MemoryState::FlagCanTransfer | MemoryState::FlagReferenceCounted,
782 MemoryPermission::Mask, MemoryPermission::ReadAndWrite, MemoryAttribute::Mask,
783 MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped)};
784 result.IsError()) {
785 return result;
786 }
787
788 block_manager->Update(addr, size / PageSize, state, perm, attribute | MemoryAttribute::Locked);
789
790 return RESULT_SUCCESS;
791}
792
793ResultCode PageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
794 std::lock_guard lock{page_table_lock};
795
796 MemoryState state{};
797
798 if (const ResultCode result{
799 CheckMemoryState(&state, nullptr, nullptr, addr, size,
800 MemoryState::FlagCanTransfer | MemoryState::FlagReferenceCounted,
801 MemoryState::FlagCanTransfer | MemoryState::FlagReferenceCounted,
802 MemoryPermission::None, MemoryPermission::None, MemoryAttribute::Mask,
803 MemoryAttribute::Locked, MemoryAttribute::IpcAndDeviceMapped)};
804 result.IsError()) {
805 return result;
806 }
807
808 block_manager->Update(addr, size / PageSize, state, MemoryPermission::ReadAndWrite);
809
810 return RESULT_SUCCESS;
811}
812
813ResultCode PageTable::SetMemoryAttribute(VAddr addr, std::size_t size, MemoryAttribute mask,
814 MemoryAttribute value) {
815 std::lock_guard lock{page_table_lock};
816
817 MemoryState state{};
818 MemoryPermission perm{};
819 MemoryAttribute attribute{};
820
821 if (const ResultCode result{CheckMemoryState(
822 &state, &perm, &attribute, addr, size, MemoryState::FlagCanChangeAttribute,
823 MemoryState::FlagCanChangeAttribute, MemoryPermission::None, MemoryPermission::None,
824 MemoryAttribute::LockedAndIpcLocked, MemoryAttribute::None,
825 MemoryAttribute::DeviceSharedAndUncached)};
826 result.IsError()) {
827 return result;
828 }
829
830 attribute = attribute & ~mask;
831 attribute = attribute | (mask & value);
832
833 block_manager->Update(addr, size / PageSize, state, perm, attribute);
834
835 return RESULT_SUCCESS;
836}
837
838ResultCode PageTable::SetHeapCapacity(std::size_t new_heap_capacity) {
839 std::lock_guard lock{page_table_lock};
840 heap_capacity = new_heap_capacity;
841 return RESULT_SUCCESS;
842}
843
844ResultVal<VAddr> PageTable::SetHeapSize(std::size_t size) {
845
846 if (size > heap_region_end - heap_region_start) {
847 return ERR_OUT_OF_MEMORY;
848 }
849
850 const u64 previous_heap_size{GetHeapSize()};
851
852 UNIMPLEMENTED_IF_MSG(previous_heap_size > size, "Heap shrink is unimplemented");
853
854 // Increase the heap size
855 {
856 std::lock_guard lock{page_table_lock};
857
858 const u64 delta{size - previous_heap_size};
859
860 auto process{system.Kernel().CurrentProcess()};
861 if (process->GetResourceLimit() && delta != 0 &&
862 !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, delta)) {
863 return ERR_RESOURCE_LIMIT_EXCEEDED;
864 }
865
866 PageLinkedList page_linked_list;
867 const std::size_t num_pages{delta / PageSize};
868
869 if (const ResultCode result{
870 system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool)};
871 result.IsError()) {
872 return result;
873 }
874
875 if (IsRegionMapped(current_heap_addr, delta)) {
876 return ERR_INVALID_ADDRESS_STATE;
877 }
878
879 if (const ResultCode result{
880 Operate(current_heap_addr, num_pages, page_linked_list, OperationType::MapGroup)};
881 result.IsError()) {
882 return result;
883 }
884
885 block_manager->Update(current_heap_addr, num_pages, MemoryState::Normal,
886 MemoryPermission::ReadAndWrite);
887
888 current_heap_addr = heap_region_start + size;
889 }
890
891 return MakeResult<VAddr>(heap_region_start);
892}
893
894ResultVal<VAddr> PageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
895 bool is_map_only, VAddr region_start,
896 std::size_t region_num_pages, MemoryState state,
897 MemoryPermission perm, PAddr map_addr) {
898 std::lock_guard lock{page_table_lock};
899
900 if (!CanContain(region_start, region_num_pages * PageSize, state)) {
901 return ERR_INVALID_ADDRESS_STATE;
902 }
903
904 if (region_num_pages <= needed_num_pages) {
905 return ERR_OUT_OF_MEMORY;
906 }
907
908 const VAddr addr{
909 AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)};
910 if (!addr) {
911 return ERR_OUT_OF_MEMORY;
912 }
913
914 if (is_map_only) {
915 if (const ResultCode result{
916 Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)};
917 result.IsError()) {
918 return result;
919 }
920 } else {
921 PageLinkedList page_group;
922 if (const ResultCode result{system.Kernel().MemoryManager().Allocate(
923 page_group, needed_num_pages, memory_pool)};
924 result.IsError()) {
925 return result;
926 }
927 if (const ResultCode result{
928 Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)};
929 result.IsError()) {
930 return result;
931 }
932 }
933
934 block_manager->Update(addr, needed_num_pages, state, perm);
935
936 return MakeResult<VAddr>(addr);
937}
938
939PAddr PageTable::GetPhysicalAddr(VAddr addr) {
940 return system.GetDeviceMemory().GetPhysicalAddr(addr);
941}
942
943ResultCode PageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
944 block_manager = std::make_unique<MemoryBlockManager>(start, end);
945
946 return RESULT_SUCCESS;
947}
948
949bool PageTable::IsRegionMapped(VAddr address, u64 size) {
950 return CheckMemoryState(address, size, MemoryState::All, MemoryState::Free,
951 MemoryPermission::Mask, MemoryPermission::None, MemoryAttribute::Mask,
952 MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped)
953 .IsError();
954}
955
956bool PageTable::IsRegionContiguous(VAddr addr, u64 size) const {
957 auto start_ptr = system.Memory().GetPointer(addr);
958 for (u64 offset{}; offset < size; offset += PageSize) {
959 if (start_ptr != system.Memory().GetPointer(addr + offset)) {
960 return false;
961 }
962 start_ptr += PageSize;
963 }
964 return true;
965}
966
967void PageTable::AddRegionToPages(VAddr start, std::size_t num_pages,
968 PageLinkedList& page_linked_list) {
969 VAddr addr{start};
970 while (addr < start + (num_pages * PageSize)) {
971 const PAddr paddr{GetPhysicalAddr(addr)};
972 if (!paddr) {
973 UNREACHABLE();
974 }
975 page_linked_list.AddBlock(paddr, 1);
976 addr += PageSize;
977 }
978}
979
980VAddr PageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_pages,
981 u64 needed_num_pages, std::size_t align) {
982 if (is_aslr_enabled) {
983 UNIMPLEMENTED();
984 }
985 return block_manager->FindFreeArea(start, region_num_pages, needed_num_pages, align, 0,
986 IsKernel() ? 1 : 4);
987}
988
989ResultCode PageTable::Operate(VAddr addr, std::size_t num_pages, const PageLinkedList& page_group,
990 OperationType operation) {
991 std::lock_guard lock{page_table_lock};
992
993 ASSERT(Common::IsAligned(addr, PageSize));
994 ASSERT(num_pages > 0);
995 ASSERT(num_pages == page_group.GetNumPages());
996
997 for (const auto& node : page_group.Nodes()) {
998 const std::size_t size{node.GetNumPages() * PageSize};
999
1000 switch (operation) {
1001 case OperationType::MapGroup:
1002 system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress());
1003 break;
1004 default:
1005 UNREACHABLE();
1006 }
1007
1008 addr += size;
1009 }
1010
1011 return RESULT_SUCCESS;
1012}
1013
1014ResultCode PageTable::Operate(VAddr addr, std::size_t num_pages, MemoryPermission perm,
1015 OperationType operation, PAddr map_addr) {
1016 std::lock_guard lock{page_table_lock};
1017
1018 ASSERT(num_pages > 0);
1019 ASSERT(Common::IsAligned(addr, PageSize));
1020 ASSERT(ContainsPages(addr, num_pages));
1021
1022 switch (operation) {
1023 case OperationType::Unmap:
1024 system.Memory().UnmapRegion(page_table_impl, addr, num_pages * PageSize);
1025 break;
1026 case OperationType::Map: {
1027 ASSERT(map_addr);
1028 ASSERT(Common::IsAligned(map_addr, PageSize));
1029 system.Memory().MapMemoryRegion(page_table_impl, addr, num_pages * PageSize, map_addr);
1030 break;
1031 }
1032 case OperationType::ChangePermissions:
1033 case OperationType::ChangePermissionsAndRefresh:
1034 break;
1035 default:
1036 UNREACHABLE();
1037 }
1038 return RESULT_SUCCESS;
1039}
1040
1041constexpr VAddr PageTable::GetRegionAddress(MemoryState state) const {
1042 switch (state) {
1043 case MemoryState::Free:
1044 case MemoryState::Kernel:
1045 return address_space_start;
1046 case MemoryState::Normal:
1047 return heap_region_start;
1048 case MemoryState::Ipc:
1049 case MemoryState::NonSecureIpc:
1050 case MemoryState::NonDeviceIpc:
1051 return alias_region_start;
1052 case MemoryState::Stack:
1053 return stack_region_start;
1054 case MemoryState::Io:
1055 case MemoryState::Static:
1056 case MemoryState::ThreadLocal:
1057 return kernel_map_region_start;
1058 case MemoryState::Shared:
1059 case MemoryState::AliasCode:
1060 case MemoryState::AliasCodeData:
1061 case MemoryState::Transfered:
1062 case MemoryState::SharedTransfered:
1063 case MemoryState::SharedCode:
1064 case MemoryState::GeneratedCode:
1065 case MemoryState::CodeOut:
1066 return alias_code_region_start;
1067 case MemoryState::Code:
1068 case MemoryState::CodeData:
1069 return code_region_start;
1070 default:
1071 UNREACHABLE();
1072 return {};
1073 }
1074}
1075
1076constexpr std::size_t PageTable::GetRegionSize(MemoryState state) const {
1077 switch (state) {
1078 case MemoryState::Free:
1079 case MemoryState::Kernel:
1080 return address_space_end - address_space_start;
1081 case MemoryState::Normal:
1082 return heap_region_end - heap_region_start;
1083 case MemoryState::Ipc:
1084 case MemoryState::NonSecureIpc:
1085 case MemoryState::NonDeviceIpc:
1086 return alias_region_end - alias_region_start;
1087 case MemoryState::Stack:
1088 return stack_region_end - stack_region_start;
1089 case MemoryState::Io:
1090 case MemoryState::Static:
1091 case MemoryState::ThreadLocal:
1092 return kernel_map_region_end - kernel_map_region_start;
1093 case MemoryState::Shared:
1094 case MemoryState::AliasCode:
1095 case MemoryState::AliasCodeData:
1096 case MemoryState::Transfered:
1097 case MemoryState::SharedTransfered:
1098 case MemoryState::SharedCode:
1099 case MemoryState::GeneratedCode:
1100 case MemoryState::CodeOut:
1101 return alias_code_region_end - alias_code_region_start;
1102 case MemoryState::Code:
1103 case MemoryState::CodeData:
1104 return code_region_end - code_region_start;
1105 default:
1106 UNREACHABLE();
1107 return {};
1108 }
1109}
1110
1111constexpr bool PageTable::CanContain(VAddr addr, std::size_t size, MemoryState state) const {
1112 const VAddr end{addr + size};
1113 const VAddr last{end - 1};
1114 const VAddr region_start{GetRegionAddress(state)};
1115 const std::size_t region_size{GetRegionSize(state)};
1116 const bool is_in_region{region_start <= addr && addr < end &&
1117 last <= region_start + region_size - 1};
1118 const bool is_in_heap{!(end <= heap_region_start || heap_region_end <= addr)};
1119 const bool is_in_alias{!(end <= alias_region_start || alias_region_end <= addr)};
1120
1121 switch (state) {
1122 case MemoryState::Free:
1123 case MemoryState::Kernel:
1124 return is_in_region;
1125 case MemoryState::Io:
1126 case MemoryState::Static:
1127 case MemoryState::Code:
1128 case MemoryState::CodeData:
1129 case MemoryState::Shared:
1130 case MemoryState::AliasCode:
1131 case MemoryState::AliasCodeData:
1132 case MemoryState::Stack:
1133 case MemoryState::ThreadLocal:
1134 case MemoryState::Transfered:
1135 case MemoryState::SharedTransfered:
1136 case MemoryState::SharedCode:
1137 case MemoryState::GeneratedCode:
1138 case MemoryState::CodeOut:
1139 return is_in_region && !is_in_heap && !is_in_alias;
1140 case MemoryState::Normal:
1141 ASSERT(is_in_heap);
1142 return is_in_region && !is_in_alias;
1143 case MemoryState::Ipc:
1144 case MemoryState::NonSecureIpc:
1145 case MemoryState::NonDeviceIpc:
1146 ASSERT(is_in_alias);
1147 return is_in_region && !is_in_heap;
1148 default:
1149 return false;
1150 }
1151}
1152
1153constexpr ResultCode PageTable::CheckMemoryState(const MemoryInfo& info, MemoryState state_mask,
1154 MemoryState state, MemoryPermission perm_mask,
1155 MemoryPermission perm, MemoryAttribute attr_mask,
1156 MemoryAttribute attr) const {
1157 // Validate the states match expectation
1158 if ((info.state & state_mask) != state) {
1159 return ERR_INVALID_ADDRESS_STATE;
1160 }
1161 if ((info.perm & perm_mask) != perm) {
1162 return ERR_INVALID_ADDRESS_STATE;
1163 }
1164 if ((info.attribute & attr_mask) != attr) {
1165 return ERR_INVALID_ADDRESS_STATE;
1166 }
1167
1168 return RESULT_SUCCESS;
1169}
1170
1171ResultCode PageTable::CheckMemoryState(MemoryState* out_state, MemoryPermission* out_perm,
1172 MemoryAttribute* out_attr, VAddr addr, std::size_t size,
1173 MemoryState state_mask, MemoryState state,
1174 MemoryPermission perm_mask, MemoryPermission perm,
1175 MemoryAttribute attr_mask, MemoryAttribute attr,
1176 MemoryAttribute ignore_attr) {
1177 std::lock_guard lock{page_table_lock};
1178
1179 // Get information about the first block
1180 const VAddr last_addr{addr + size - 1};
1181 MemoryBlockManager::const_iterator it{block_manager->FindIterator(addr)};
1182 MemoryInfo info{it->GetMemoryInfo()};
1183
1184 // Validate all blocks in the range have correct state
1185 const MemoryState first_state{info.state};
1186 const MemoryPermission first_perm{info.perm};
1187 const MemoryAttribute first_attr{info.attribute};
1188
1189 while (true) {
1190 // Validate the current block
1191 if (!(info.state == first_state)) {
1192 return ERR_INVALID_ADDRESS_STATE;
1193 }
1194 if (!(info.perm == first_perm)) {
1195 return ERR_INVALID_ADDRESS_STATE;
1196 }
1197 if (!((info.attribute | static_cast<MemoryAttribute>(ignore_attr)) ==
1198 (first_attr | static_cast<MemoryAttribute>(ignore_attr)))) {
1199 return ERR_INVALID_ADDRESS_STATE;
1200 }
1201
1202 // Validate against the provided masks
1203 if (const ResultCode result{
1204 CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)};
1205 result.IsError()) {
1206 return result;
1207 }
1208
1209 // Break once we're done
1210 if (last_addr <= info.GetLastAddress()) {
1211 break;
1212 }
1213
1214 // Advance our iterator
1215 it++;
1216 ASSERT(it != block_manager->cend());
1217 info = it->GetMemoryInfo();
1218 }
1219
1220 // Write output state
1221 if (out_state) {
1222 *out_state = first_state;
1223 }
1224 if (out_perm) {
1225 *out_perm = first_perm;
1226 }
1227 if (out_attr) {
1228 *out_attr = first_attr & static_cast<MemoryAttribute>(~ignore_attr);
1229 }
1230
1231 return RESULT_SUCCESS;
1232}
1233
1234} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/page_table.h b/src/core/hle/kernel/memory/page_table.h
new file mode 100644
index 000000000..6c3a3c275
--- /dev/null
+++ b/src/core/hle/kernel/memory/page_table.h
@@ -0,0 +1,274 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <list>
8#include <memory>
9#include <mutex>
10
11#include "common/common_funcs.h"
12#include "common/common_types.h"
13#include "common/page_table.h"
14#include "core/file_sys/program_metadata.h"
15#include "core/hle/kernel/memory/memory_block.h"
16#include "core/hle/kernel/memory/memory_manager.h"
17
18namespace Core {
19class System;
20}
21
22namespace Kernel::Memory {
23
24class MemoryBlockManager;
25
26class PageTable final : NonCopyable {
27public:
28 explicit PageTable(Core::System& system);
29
30 ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
31 VAddr code_addr, std::size_t code_size,
32 Memory::MemoryManager::Pool pool);
33 ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, MemoryState state,
34 MemoryPermission perm);
35 ResultCode MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
36 ResultCode UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
37 ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
38 ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size);
39 ResultCode UnmapMemory(VAddr addr, std::size_t size);
40 ResultCode Map(VAddr dst_addr, VAddr src_addr, std::size_t size);
41 ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size);
42 ResultCode MapPages(VAddr addr, PageLinkedList& page_linked_list, MemoryState state,
43 MemoryPermission perm);
44 ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, MemoryPermission perm);
45 MemoryInfo QueryInfo(VAddr addr);
46 ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, MemoryPermission perm);
47 ResultCode ResetTransferMemory(VAddr addr, std::size_t size);
48 ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, MemoryAttribute mask,
49 MemoryAttribute value);
50 ResultCode SetHeapCapacity(std::size_t new_heap_capacity);
51 ResultVal<VAddr> SetHeapSize(std::size_t size);
52 ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
53 bool is_map_only, VAddr region_start,
54 std::size_t region_num_pages, MemoryState state,
55 MemoryPermission perm, PAddr map_addr = 0);
56 PAddr GetPhysicalAddr(VAddr addr);
57
58 Common::PageTable& PageTableImpl() {
59 return page_table_impl;
60 }
61
62 const Common::PageTable& PageTableImpl() const {
63 return page_table_impl;
64 }
65
66private:
67 enum class OperationType : u32 {
68 Map,
69 MapGroup,
70 Unmap,
71 ChangePermissions,
72 ChangePermissionsAndRefresh,
73 };
74
75 static constexpr MemoryAttribute DefaultMemoryIgnoreAttr =
76 MemoryAttribute::DontCareMask | MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared;
77
78 ResultCode InitializeMemoryLayout(VAddr start, VAddr end);
79 ResultCode MapPages(VAddr addr, const PageLinkedList& page_linked_list, MemoryPermission perm);
80 void MapPhysicalMemory(PageLinkedList& page_linked_list, VAddr start, VAddr end);
81 bool IsRegionMapped(VAddr address, u64 size);
82 bool IsRegionContiguous(VAddr addr, u64 size) const;
83 void AddRegionToPages(VAddr start, std::size_t num_pages, PageLinkedList& page_linked_list);
84 MemoryInfo QueryInfoImpl(VAddr addr);
85 VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages,
86 std::size_t align);
87 ResultCode Operate(VAddr addr, std::size_t num_pages, const PageLinkedList& page_group,
88 OperationType operation);
89 ResultCode Operate(VAddr addr, std::size_t num_pages, MemoryPermission perm,
90 OperationType operation, PAddr map_addr = 0);
91 constexpr VAddr GetRegionAddress(MemoryState state) const;
92 constexpr std::size_t GetRegionSize(MemoryState state) const;
93 constexpr bool CanContain(VAddr addr, std::size_t size, MemoryState state) const;
94
95 constexpr ResultCode CheckMemoryState(const MemoryInfo& info, MemoryState state_mask,
96 MemoryState state, MemoryPermission perm_mask,
97 MemoryPermission perm, MemoryAttribute attr_mask,
98 MemoryAttribute attr) const;
99 ResultCode CheckMemoryState(MemoryState* out_state, MemoryPermission* out_perm,
100 MemoryAttribute* out_attr, VAddr addr, std::size_t size,
101 MemoryState state_mask, MemoryState state,
102 MemoryPermission perm_mask, MemoryPermission perm,
103 MemoryAttribute attr_mask, MemoryAttribute attr,
104 MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr);
105 ResultCode CheckMemoryState(VAddr addr, std::size_t size, MemoryState state_mask,
106 MemoryState state, MemoryPermission perm_mask,
107 MemoryPermission perm, MemoryAttribute attr_mask,
108 MemoryAttribute attr,
109 MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) {
110 return CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask,
111 perm, attr_mask, attr, ignore_attr);
112 }
113
114 std::recursive_mutex page_table_lock;
115 std::unique_ptr<MemoryBlockManager> block_manager;
116
117public:
118 constexpr VAddr GetAddressSpaceStart() const {
119 return address_space_start;
120 }
121 constexpr VAddr GetAddressSpaceEnd() const {
122 return address_space_end;
123 }
124 constexpr std::size_t GetAddressSpaceSize() const {
125 return address_space_end - address_space_start;
126 }
127 constexpr VAddr GetHeapRegionStart() const {
128 return heap_region_start;
129 }
130 constexpr VAddr GetHeapRegionEnd() const {
131 return heap_region_end;
132 }
133 constexpr std::size_t GetHeapRegionSize() const {
134 return heap_region_end - heap_region_start;
135 }
136 constexpr VAddr GetAliasRegionStart() const {
137 return alias_region_start;
138 }
139 constexpr VAddr GetAliasRegionEnd() const {
140 return alias_region_end;
141 }
142 constexpr std::size_t GetAliasRegionSize() const {
143 return alias_region_end - alias_region_start;
144 }
145 constexpr VAddr GetStackRegionStart() const {
146 return stack_region_start;
147 }
148 constexpr VAddr GetStackRegionEnd() const {
149 return stack_region_end;
150 }
151 constexpr std::size_t GetStackRegionSize() const {
152 return stack_region_end - stack_region_start;
153 }
154 constexpr VAddr GetKernelMapRegionStart() const {
155 return kernel_map_region_start;
156 }
157 constexpr VAddr GetKernelMapRegionEnd() const {
158 return kernel_map_region_end;
159 }
160 constexpr VAddr GetCodeRegionStart() const {
161 return code_region_start;
162 }
163 constexpr VAddr GetCodeRegionEnd() const {
164 return code_region_end;
165 }
166 constexpr VAddr GetAliasCodeRegionStart() const {
167 return alias_code_region_start;
168 }
169 constexpr VAddr GetAliasCodeRegionSize() const {
170 return alias_code_region_end - alias_code_region_start;
171 }
172 constexpr std::size_t GetAddressSpaceWidth() const {
173 return address_space_width;
174 }
175 constexpr std::size_t GetHeapSize() {
176 return current_heap_addr - heap_region_start;
177 }
178 constexpr std::size_t GetTotalHeapSize() {
179 return GetHeapSize() + physical_memory_usage;
180 }
181 constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const {
182 return address_space_start <= address && address + size - 1 <= address_space_end - 1;
183 }
184 constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const {
185 return alias_region_start > address || address + size - 1 > alias_region_end - 1;
186 }
187 constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const {
188 return stack_region_start > address || address + size - 1 > stack_region_end - 1;
189 }
190 constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const {
191 return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
192 }
193 constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const {
194 return address + size > heap_region_start && heap_region_end > address;
195 }
196 constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const {
197 return address + size > alias_region_start && alias_region_end > address;
198 }
199 constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const {
200 if (IsInvalidRegion(address, size)) {
201 return true;
202 }
203 if (IsInsideHeapRegion(address, size)) {
204 return true;
205 }
206 if (IsInsideAliasRegion(address, size)) {
207 return true;
208 }
209 return {};
210 }
211 constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
212 return !IsOutsideASLRRegion(address, size);
213 }
214
215private:
216 constexpr bool Contains(VAddr addr) const {
217 return address_space_start <= addr && addr <= address_space_end - 1;
218 }
219 constexpr bool Contains(VAddr addr, std::size_t size) const {
220 return address_space_start <= addr && addr < addr + size &&
221 addr + size - 1 <= address_space_end - 1;
222 }
223 constexpr bool IsKernel() const {
224 return is_kernel;
225 }
226 constexpr bool IsAslrEnabled() const {
227 return is_aslr_enabled;
228 }
229
230 constexpr std::size_t GetNumGuardPages() const {
231 return IsKernel() ? 1 : 4;
232 }
233
234 constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const {
235 return (address_space_start <= addr) &&
236 (num_pages <= (address_space_end - address_space_start) / PageSize) &&
237 (addr + num_pages * PageSize - 1 <= address_space_end - 1);
238 }
239
240private:
241 VAddr address_space_start{};
242 VAddr address_space_end{};
243 VAddr heap_region_start{};
244 VAddr heap_region_end{};
245 VAddr current_heap_end{};
246 VAddr alias_region_start{};
247 VAddr alias_region_end{};
248 VAddr stack_region_start{};
249 VAddr stack_region_end{};
250 VAddr kernel_map_region_start{};
251 VAddr kernel_map_region_end{};
252 VAddr code_region_start{};
253 VAddr code_region_end{};
254 VAddr alias_code_region_start{};
255 VAddr alias_code_region_end{};
256 VAddr current_heap_addr{};
257
258 std::size_t heap_capacity{};
259 std::size_t physical_memory_usage{};
260 std::size_t max_heap_size{};
261 std::size_t max_physical_memory_size{};
262 std::size_t address_space_width{};
263
264 bool is_kernel{};
265 bool is_aslr_enabled{};
266
267 MemoryManager::Pool memory_pool{MemoryManager::Pool::Application};
268
269 Common::PageTable page_table_impl;
270
271 Core::System& system;
272};
273
274} // namespace Kernel::Memory