summaryrefslogtreecommitdiff
path: root/src/core/memory.cpp
diff options
context:
space:
mode:
authorGravatar Subv2017-07-21 21:17:57 -0500
committerGravatar Subv2017-09-10 15:13:41 -0500
commit6d2734a074f44a24129db850339677d8d7b436aa (patch)
tree418be08a059813466e7ed4495fd6198b16aa4ddc /src/core/memory.cpp
parentAdded missing parts in libnetwork (#2838) (diff)
downloadyuzu-6d2734a074f44a24129db850339677d8d7b436aa.tar.gz
yuzu-6d2734a074f44a24129db850339677d8d7b436aa.tar.xz
yuzu-6d2734a074f44a24129db850339677d8d7b436aa.zip
Kernel/Memory: Give each Process its own page table.
The loader is in charge of setting the newly created process's page table as the main one during the loading process.
Diffstat (limited to 'src/core/memory.cpp')
-rw-r--r--src/core/memory.cpp87
1 files changed, 12 insertions, 75 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 65649d9d7..ea46b6ead 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -11,75 +11,18 @@
11#include "core/hle/kernel/process.h" 11#include "core/hle/kernel/process.h"
12#include "core/memory.h" 12#include "core/memory.h"
13#include "core/memory_setup.h" 13#include "core/memory_setup.h"
14#include "core/mmio.h"
15#include "video_core/renderer_base.h" 14#include "video_core/renderer_base.h"
16#include "video_core/video_core.h" 15#include "video_core/video_core.h"
17 16
18namespace Memory { 17namespace Memory {
19 18
20enum class PageType { 19PageTable* current_page_table = nullptr;
21 /// Page is unmapped and should cause an access error.
22 Unmapped,
23 /// Page is mapped to regular memory. This is the only type you can get pointers to.
24 Memory,
25 /// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and
26 /// invalidation
27 RasterizerCachedMemory,
28 /// Page is mapped to a I/O region. Writing and reading to this page is handled by functions.
29 Special,
30 /// Page is mapped to a I/O region, but also needs to check for rasterizer cache flushing and
31 /// invalidation
32 RasterizerCachedSpecial,
33};
34
35struct SpecialRegion {
36 VAddr base;
37 u32 size;
38 MMIORegionPointer handler;
39};
40
41/**
42 * A (reasonably) fast way of allowing switchable and remappable process address spaces. It loosely
43 * mimics the way a real CPU page table works, but instead is optimized for minimal decoding and
44 * fetching requirements when accessing. In the usual case of an access to regular memory, it only
45 * requires an indexed fetch and a check for NULL.
46 */
47struct PageTable {
48 /**
49 * Array of memory pointers backing each page. An entry can only be non-null if the
50 * corresponding entry in the `attributes` array is of type `Memory`.
51 */
52 std::array<u8*, PAGE_TABLE_NUM_ENTRIES> pointers;
53
54 /**
55 * Contains MMIO handlers that back memory regions whose entries in the `attribute` array is of
56 * type `Special`.
57 */
58 std::vector<SpecialRegion> special_regions;
59
60 /**
61 * Array of fine grained page attributes. If it is set to any value other than `Memory`, then
62 * the corresponding entry in `pointers` MUST be set to null.
63 */
64 std::array<PageType, PAGE_TABLE_NUM_ENTRIES> attributes;
65
66 /**
67 * Indicates the number of externally cached resources touching a page that should be
68 * flushed before the memory is accessed
69 */
70 std::array<u8, PAGE_TABLE_NUM_ENTRIES> cached_res_count;
71};
72
73/// Singular page table used for the singleton process
74static PageTable main_page_table;
75/// Currently active page table
76static PageTable* current_page_table = &main_page_table;
77 20
78std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers() { 21std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers() {
79 return &current_page_table->pointers; 22 return &current_page_table->pointers;
80} 23}
81 24
82static void MapPages(u32 base, u32 size, u8* memory, PageType type) { 25static void MapPages(PageTable& page_table, u32 base, u32 size, u8* memory, PageType type) {
83 LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE, 26 LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE,
84 (base + size) * PAGE_SIZE); 27 (base + size) * PAGE_SIZE);
85 28
@@ -90,9 +33,9 @@ static void MapPages(u32 base, u32 size, u8* memory, PageType type) {
90 while (base != end) { 33 while (base != end) {
91 ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at %08X", base); 34 ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at %08X", base);
92 35
93 current_page_table->attributes[base] = type; 36 page_table.attributes[base] = type;
94 current_page_table->pointers[base] = memory; 37 page_table.pointers[base] = memory;
95 current_page_table->cached_res_count[base] = 0; 38 page_table.cached_res_count[base] = 0;
96 39
97 base += 1; 40 base += 1;
98 if (memory != nullptr) 41 if (memory != nullptr)
@@ -100,30 +43,24 @@ static void MapPages(u32 base, u32 size, u8* memory, PageType type) {
100 } 43 }
101} 44}
102 45
103void InitMemoryMap() { 46void MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, u8* target) {
104 main_page_table.pointers.fill(nullptr);
105 main_page_table.attributes.fill(PageType::Unmapped);
106 main_page_table.cached_res_count.fill(0);
107}
108
109void MapMemoryRegion(VAddr base, u32 size, u8* target) {
110 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); 47 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
111 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); 48 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
112 MapPages(base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory); 49 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory);
113} 50}
114 51
115void MapIoRegion(VAddr base, u32 size, MMIORegionPointer mmio_handler) { 52void MapIoRegion(PageTable& page_table, VAddr base, u32 size, MMIORegionPointer mmio_handler) {
116 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); 53 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
117 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); 54 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
118 MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); 55 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special);
119 56
120 current_page_table->special_regions.emplace_back(SpecialRegion{base, size, mmio_handler}); 57 page_table.special_regions.emplace_back(SpecialRegion{base, size, mmio_handler});
121} 58}
122 59
123void UnmapRegion(VAddr base, u32 size) { 60void UnmapRegion(PageTable& page_table, VAddr base, u32 size) {
124 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); 61 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
125 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); 62 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
126 MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped); 63 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
127} 64}
128 65
129/** 66/**