summaryrefslogtreecommitdiff
path: root/src/core/memory.cpp
diff options
context:
space:
mode:
authorGravatar B3n302017-09-15 22:41:45 +0200
committerGravatar GitHub2017-09-15 22:41:45 +0200
commit813837c5cf3e63a4ac08f4ec463bd2b2b87ab1c6 (patch)
treedf43bf978de3b699a22650d3ff2a3ebb5d86b2de /src/core/memory.cpp
parentMerge pull request #2915 from wwylele/font-archive-2 (diff)
parentCPU/Dynarmic: Disable the fast page-table access in dynarmic until it support... (diff)
downloadyuzu-813837c5cf3e63a4ac08f4ec463bd2b2b87ab1c6.tar.gz
yuzu-813837c5cf3e63a4ac08f4ec463bd2b2b87ab1c6.tar.xz
yuzu-813837c5cf3e63a4ac08f4ec463bd2b2b87ab1c6.zip
Merge pull request #2842 from Subv/switchable_page_table
Kernel/Memory: Give each process its own page table and allow switching the current page table upon reschedule
Diffstat (limited to 'src/core/memory.cpp')
-rw-r--r--src/core/memory.cpp153
1 files changed, 74 insertions, 79 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 097bc5b47..68a6b1ac2 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -4,83 +4,31 @@
4 4
5#include <array> 5#include <array>
6#include <cstring> 6#include <cstring>
7#include "audio_core/audio_core.h"
7#include "common/assert.h" 8#include "common/assert.h"
8#include "common/common_types.h" 9#include "common/common_types.h"
9#include "common/logging/log.h" 10#include "common/logging/log.h"
10#include "common/swap.h" 11#include "common/swap.h"
12#include "core/hle/kernel/memory.h"
11#include "core/hle/kernel/process.h" 13#include "core/hle/kernel/process.h"
12#include "core/hle/lock.h" 14#include "core/hle/lock.h"
13#include "core/memory.h" 15#include "core/memory.h"
14#include "core/memory_setup.h" 16#include "core/memory_setup.h"
15#include "core/mmio.h"
16#include "video_core/renderer_base.h" 17#include "video_core/renderer_base.h"
17#include "video_core/video_core.h" 18#include "video_core/video_core.h"
18 19
19namespace Memory { 20namespace Memory {
20 21
21enum class PageType { 22static std::array<u8, Memory::VRAM_SIZE> vram;
22 /// Page is unmapped and should cause an access error. 23static std::array<u8, Memory::N3DS_EXTRA_RAM_SIZE> n3ds_extra_ram;
23 Unmapped,
24 /// Page is mapped to regular memory. This is the only type you can get pointers to.
25 Memory,
26 /// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and
27 /// invalidation
28 RasterizerCachedMemory,
29 /// Page is mapped to a I/O region. Writing and reading to this page is handled by functions.
30 Special,
31 /// Page is mapped to a I/O region, but also needs to check for rasterizer cache flushing and
32 /// invalidation
33 RasterizerCachedSpecial,
34};
35
36struct SpecialRegion {
37 VAddr base;
38 u32 size;
39 MMIORegionPointer handler;
40};
41 24
42/** 25PageTable* current_page_table = nullptr;
43 * A (reasonably) fast way of allowing switchable and remappable process address spaces. It loosely
44 * mimics the way a real CPU page table works, but instead is optimized for minimal decoding and
45 * fetching requirements when accessing. In the usual case of an access to regular memory, it only
46 * requires an indexed fetch and a check for NULL.
47 */
48struct PageTable {
49 /**
50 * Array of memory pointers backing each page. An entry can only be non-null if the
51 * corresponding entry in the `attributes` array is of type `Memory`.
52 */
53 std::array<u8*, PAGE_TABLE_NUM_ENTRIES> pointers;
54
55 /**
56 * Contains MMIO handlers that back memory regions whose entries in the `attribute` array is of
57 * type `Special`.
58 */
59 std::vector<SpecialRegion> special_regions;
60
61 /**
62 * Array of fine grained page attributes. If it is set to any value other than `Memory`, then
63 * the corresponding entry in `pointers` MUST be set to null.
64 */
65 std::array<PageType, PAGE_TABLE_NUM_ENTRIES> attributes;
66
67 /**
68 * Indicates the number of externally cached resources touching a page that should be
69 * flushed before the memory is accessed
70 */
71 std::array<u8, PAGE_TABLE_NUM_ENTRIES> cached_res_count;
72};
73
74/// Singular page table used for the singleton process
75static PageTable main_page_table;
76/// Currently active page table
77static PageTable* current_page_table = &main_page_table;
78 26
79std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers() { 27std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers() {
80 return &current_page_table->pointers; 28 return &current_page_table->pointers;
81} 29}
82 30
83static void MapPages(u32 base, u32 size, u8* memory, PageType type) { 31static void MapPages(PageTable& page_table, u32 base, u32 size, u8* memory, PageType type) {
84 LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE, 32 LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE,
85 (base + size) * PAGE_SIZE); 33 (base + size) * PAGE_SIZE);
86 34
@@ -91,9 +39,9 @@ static void MapPages(u32 base, u32 size, u8* memory, PageType type) {
91 while (base != end) { 39 while (base != end) {
92 ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at %08X", base); 40 ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at %08X", base);
93 41
94 current_page_table->attributes[base] = type; 42 page_table.attributes[base] = type;
95 current_page_table->pointers[base] = memory; 43 page_table.pointers[base] = memory;
96 current_page_table->cached_res_count[base] = 0; 44 page_table.cached_res_count[base] = 0;
97 45
98 base += 1; 46 base += 1;
99 if (memory != nullptr) 47 if (memory != nullptr)
@@ -101,30 +49,24 @@ static void MapPages(u32 base, u32 size, u8* memory, PageType type) {
101 } 49 }
102} 50}
103 51
104void InitMemoryMap() { 52void MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, u8* target) {
105 main_page_table.pointers.fill(nullptr);
106 main_page_table.attributes.fill(PageType::Unmapped);
107 main_page_table.cached_res_count.fill(0);
108}
109
110void MapMemoryRegion(VAddr base, u32 size, u8* target) {
111 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); 53 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
112 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); 54 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
113 MapPages(base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory); 55 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory);
114} 56}
115 57
116void MapIoRegion(VAddr base, u32 size, MMIORegionPointer mmio_handler) { 58void MapIoRegion(PageTable& page_table, VAddr base, u32 size, MMIORegionPointer mmio_handler) {
117 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); 59 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
118 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); 60 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
119 MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); 61 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special);
120 62
121 current_page_table->special_regions.emplace_back(SpecialRegion{base, size, mmio_handler}); 63 page_table.special_regions.emplace_back(SpecialRegion{base, size, mmio_handler});
122} 64}
123 65
124void UnmapRegion(VAddr base, u32 size) { 66void UnmapRegion(PageTable& page_table, VAddr base, u32 size) {
125 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); 67 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
126 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); 68 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
127 MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped); 69 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
128} 70}
129 71
130/** 72/**
@@ -273,8 +215,7 @@ bool IsValidVirtualAddress(const VAddr vaddr) {
273} 215}
274 216
275bool IsValidPhysicalAddress(const PAddr paddr) { 217bool IsValidPhysicalAddress(const PAddr paddr) {
276 boost::optional<VAddr> vaddr = PhysicalToVirtualAddress(paddr); 218 return GetPhysicalPointer(paddr) != nullptr;
277 return vaddr && IsValidVirtualAddress(*vaddr);
278} 219}
279 220
280u8* GetPointer(const VAddr vaddr) { 221u8* GetPointer(const VAddr vaddr) {
@@ -306,9 +247,63 @@ std::string ReadCString(VAddr vaddr, std::size_t max_length) {
306} 247}
307 248
308u8* GetPhysicalPointer(PAddr address) { 249u8* GetPhysicalPointer(PAddr address) {
309 // TODO(Subv): This call should not go through the application's memory mapping. 250 struct MemoryArea {
310 boost::optional<VAddr> vaddr = PhysicalToVirtualAddress(address); 251 PAddr paddr_base;
311 return vaddr ? GetPointer(*vaddr) : nullptr; 252 u32 size;
253 };
254
255 static constexpr MemoryArea memory_areas[] = {
256 {VRAM_PADDR, VRAM_SIZE},
257 {IO_AREA_PADDR, IO_AREA_SIZE},
258 {DSP_RAM_PADDR, DSP_RAM_SIZE},
259 {FCRAM_PADDR, FCRAM_N3DS_SIZE},
260 {N3DS_EXTRA_RAM_PADDR, N3DS_EXTRA_RAM_SIZE},
261 };
262
263 const auto area =
264 std::find_if(std::begin(memory_areas), std::end(memory_areas), [&](const auto& area) {
265 return address >= area.paddr_base && address < area.paddr_base + area.size;
266 });
267
268 if (area == std::end(memory_areas)) {
269 LOG_ERROR(HW_Memory, "unknown GetPhysicalPointer @ 0x%08X", address);
270 return nullptr;
271 }
272
273 if (area->paddr_base == IO_AREA_PADDR) {
274 LOG_ERROR(HW_Memory, "MMIO mappings are not supported yet. phys_addr=0x%08X", address);
275 return nullptr;
276 }
277
278 u32 offset_into_region = address - area->paddr_base;
279
280 u8* target_pointer = nullptr;
281 switch (area->paddr_base) {
282 case VRAM_PADDR:
283 target_pointer = vram.data() + offset_into_region;
284 break;
285 case DSP_RAM_PADDR:
286 target_pointer = AudioCore::GetDspMemory().data() + offset_into_region;
287 break;
288 case FCRAM_PADDR:
289 for (const auto& region : Kernel::memory_regions) {
290 if (offset_into_region >= region.base &&
291 offset_into_region < region.base + region.size) {
292 target_pointer =
293 region.linear_heap_memory->data() + offset_into_region - region.base;
294 break;
295 }
296 }
297 ASSERT_MSG(target_pointer != nullptr, "Invalid FCRAM address");
298 break;
299 case N3DS_EXTRA_RAM_PADDR:
300 target_pointer = n3ds_extra_ram.data() + offset_into_region;
301 break;
302 default:
303 UNREACHABLE();
304 }
305
306 return target_pointer;
312} 307}
313 308
314void RasterizerMarkRegionCached(PAddr start, u32 size, int count_delta) { 309void RasterizerMarkRegionCached(PAddr start, u32 size, int count_delta) {