summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic.cpp4
-rw-r--r--src/core/core.cpp1
-rw-r--r--src/core/hle/kernel/memory.cpp30
-rw-r--r--src/core/hle/kernel/memory.h2
-rw-r--r--src/core/hle/kernel/thread.cpp12
-rw-r--r--src/core/hle/kernel/vm_manager.cpp13
-rw-r--r--src/core/hle/kernel/vm_manager.h6
-rw-r--r--src/core/loader/3dsx.cpp1
-rw-r--r--src/core/loader/elf.cpp1
-rw-r--r--src/core/loader/ncch.cpp1
-rw-r--r--src/core/memory.cpp153
-rw-r--r--src/core/memory.h62
-rw-r--r--src/core/memory_setup.h10
-rw-r--r--src/tests/core/arm/arm_test_common.cpp18
14 files changed, 191 insertions, 123 deletions
diff --git a/src/core/arm/dynarmic/arm_dynarmic.cpp b/src/core/arm/dynarmic/arm_dynarmic.cpp
index 0a0b91590..34c5aa381 100644
--- a/src/core/arm/dynarmic/arm_dynarmic.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic.cpp
@@ -56,7 +56,9 @@ static Dynarmic::UserCallbacks GetUserCallbacks(
56 user_callbacks.memory.Write16 = &Memory::Write16; 56 user_callbacks.memory.Write16 = &Memory::Write16;
57 user_callbacks.memory.Write32 = &Memory::Write32; 57 user_callbacks.memory.Write32 = &Memory::Write32;
58 user_callbacks.memory.Write64 = &Memory::Write64; 58 user_callbacks.memory.Write64 = &Memory::Write64;
59 user_callbacks.page_table = Memory::GetCurrentPageTablePointers(); 59 // TODO(Subv): Re-add the page table pointers once dynarmic supports switching page tables at
60 // runtime.
61 user_callbacks.page_table = nullptr;
60 user_callbacks.coprocessors[15] = std::make_shared<DynarmicCP15>(interpeter_state); 62 user_callbacks.coprocessors[15] = std::make_shared<DynarmicCP15>(interpeter_state);
61 return user_callbacks; 63 return user_callbacks;
62} 64}
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 5332318cf..59b8768e7 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -137,7 +137,6 @@ void System::Reschedule() {
137} 137}
138 138
139System::ResultStatus System::Init(EmuWindow* emu_window, u32 system_mode) { 139System::ResultStatus System::Init(EmuWindow* emu_window, u32 system_mode) {
140 Memory::InitMemoryMap();
141 LOG_DEBUG(HW_Memory, "initialized OK"); 140 LOG_DEBUG(HW_Memory, "initialized OK");
142 141
143 if (Settings::values.use_cpu_jit) { 142 if (Settings::values.use_cpu_jit) {
diff --git a/src/core/hle/kernel/memory.cpp b/src/core/hle/kernel/memory.cpp
index 496d07cb5..7f27e9655 100644
--- a/src/core/hle/kernel/memory.cpp
+++ b/src/core/hle/kernel/memory.cpp
@@ -8,7 +8,6 @@
8#include <memory> 8#include <memory>
9#include <utility> 9#include <utility>
10#include <vector> 10#include <vector>
11#include "audio_core/audio_core.h"
12#include "common/assert.h" 11#include "common/assert.h"
13#include "common/common_types.h" 12#include "common/common_types.h"
14#include "common/logging/log.h" 13#include "common/logging/log.h"
@@ -24,7 +23,7 @@
24 23
25namespace Kernel { 24namespace Kernel {
26 25
27static MemoryRegionInfo memory_regions[3]; 26MemoryRegionInfo memory_regions[3];
28 27
29/// Size of the APPLICATION, SYSTEM and BASE memory regions (respectively) for each system 28/// Size of the APPLICATION, SYSTEM and BASE memory regions (respectively) for each system
30/// memory configuration type. 29/// memory configuration type.
@@ -96,9 +95,6 @@ MemoryRegionInfo* GetMemoryRegion(MemoryRegion region) {
96 } 95 }
97} 96}
98 97
99std::array<u8, Memory::VRAM_SIZE> vram;
100std::array<u8, Memory::N3DS_EXTRA_RAM_SIZE> n3ds_extra_ram;
101
102void HandleSpecialMapping(VMManager& address_space, const AddressMapping& mapping) { 98void HandleSpecialMapping(VMManager& address_space, const AddressMapping& mapping) {
103 using namespace Memory; 99 using namespace Memory;
104 100
@@ -143,30 +139,14 @@ void HandleSpecialMapping(VMManager& address_space, const AddressMapping& mappin
143 return; 139 return;
144 } 140 }
145 141
146 // TODO(yuriks): Use GetPhysicalPointer when that becomes independent of the virtual 142 u8* target_pointer = Memory::GetPhysicalPointer(area->paddr_base + offset_into_region);
147 // mappings.
148 u8* target_pointer = nullptr;
149 switch (area->paddr_base) {
150 case VRAM_PADDR:
151 target_pointer = vram.data();
152 break;
153 case DSP_RAM_PADDR:
154 target_pointer = AudioCore::GetDspMemory().data();
155 break;
156 case N3DS_EXTRA_RAM_PADDR:
157 target_pointer = n3ds_extra_ram.data();
158 break;
159 default:
160 UNREACHABLE();
161 }
162 143
163 // TODO(yuriks): This flag seems to have some other effect, but it's unknown what 144 // TODO(yuriks): This flag seems to have some other effect, but it's unknown what
164 MemoryState memory_state = mapping.unk_flag ? MemoryState::Static : MemoryState::IO; 145 MemoryState memory_state = mapping.unk_flag ? MemoryState::Static : MemoryState::IO;
165 146
166 auto vma = address_space 147 auto vma =
167 .MapBackingMemory(mapping.address, target_pointer + offset_into_region, 148 address_space.MapBackingMemory(mapping.address, target_pointer, mapping.size, memory_state)
168 mapping.size, memory_state) 149 .Unwrap();
169 .Unwrap();
170 address_space.Reprotect(vma, 150 address_space.Reprotect(vma,
171 mapping.read_only ? VMAPermission::Read : VMAPermission::ReadWrite); 151 mapping.read_only ? VMAPermission::Read : VMAPermission::ReadWrite);
172} 152}
diff --git a/src/core/hle/kernel/memory.h b/src/core/hle/kernel/memory.h
index 08c1a9989..da6bb3563 100644
--- a/src/core/hle/kernel/memory.h
+++ b/src/core/hle/kernel/memory.h
@@ -26,4 +26,6 @@ MemoryRegionInfo* GetMemoryRegion(MemoryRegion region);
26 26
27void HandleSpecialMapping(VMManager& address_space, const AddressMapping& mapping); 27void HandleSpecialMapping(VMManager& address_space, const AddressMapping& mapping);
28void MapSharedPages(VMManager& address_space); 28void MapSharedPages(VMManager& address_space);
29
30extern MemoryRegionInfo memory_regions[3];
29} // namespace Kernel 31} // namespace Kernel
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index b957c45dd..324415a36 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -171,6 +171,8 @@ static void SwitchContext(Thread* new_thread) {
171 // Cancel any outstanding wakeup events for this thread 171 // Cancel any outstanding wakeup events for this thread
172 CoreTiming::UnscheduleEvent(ThreadWakeupEventType, new_thread->callback_handle); 172 CoreTiming::UnscheduleEvent(ThreadWakeupEventType, new_thread->callback_handle);
173 173
174 auto previous_process = Kernel::g_current_process;
175
174 current_thread = new_thread; 176 current_thread = new_thread;
175 177
176 ready_queue.remove(new_thread->current_priority, new_thread); 178 ready_queue.remove(new_thread->current_priority, new_thread);
@@ -178,8 +180,18 @@ static void SwitchContext(Thread* new_thread) {
178 180
179 Core::CPU().LoadContext(new_thread->context); 181 Core::CPU().LoadContext(new_thread->context);
180 Core::CPU().SetCP15Register(CP15_THREAD_URO, new_thread->GetTLSAddress()); 182 Core::CPU().SetCP15Register(CP15_THREAD_URO, new_thread->GetTLSAddress());
183
184 if (previous_process != current_thread->owner_process) {
185 Kernel::g_current_process = current_thread->owner_process;
186 Memory::current_page_table = &Kernel::g_current_process->vm_manager.page_table;
187 // We have switched processes and thus, page tables, clear the instruction cache so we
188 // don't keep stale data from the previous process.
189 Core::CPU().ClearInstructionCache();
190 }
181 } else { 191 } else {
182 current_thread = nullptr; 192 current_thread = nullptr;
193 // Note: We do not reset the current process and current page table when idling because
194 // technically we haven't changed processes, our threads are just paused.
183 } 195 }
184} 196}
185 197
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index cef1f7fa8..7a007c065 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -56,6 +56,10 @@ void VMManager::Reset() {
56 initial_vma.size = MAX_ADDRESS; 56 initial_vma.size = MAX_ADDRESS;
57 vma_map.emplace(initial_vma.base, initial_vma); 57 vma_map.emplace(initial_vma.base, initial_vma);
58 58
59 page_table.pointers.fill(nullptr);
60 page_table.attributes.fill(Memory::PageType::Unmapped);
61 page_table.cached_res_count.fill(0);
62
59 UpdatePageTableForVMA(initial_vma); 63 UpdatePageTableForVMA(initial_vma);
60} 64}
61 65
@@ -328,16 +332,17 @@ VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) {
328void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { 332void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
329 switch (vma.type) { 333 switch (vma.type) {
330 case VMAType::Free: 334 case VMAType::Free:
331 Memory::UnmapRegion(vma.base, vma.size); 335 Memory::UnmapRegion(page_table, vma.base, vma.size);
332 break; 336 break;
333 case VMAType::AllocatedMemoryBlock: 337 case VMAType::AllocatedMemoryBlock:
334 Memory::MapMemoryRegion(vma.base, vma.size, vma.backing_block->data() + vma.offset); 338 Memory::MapMemoryRegion(page_table, vma.base, vma.size,
339 vma.backing_block->data() + vma.offset);
335 break; 340 break;
336 case VMAType::BackingMemory: 341 case VMAType::BackingMemory:
337 Memory::MapMemoryRegion(vma.base, vma.size, vma.backing_memory); 342 Memory::MapMemoryRegion(page_table, vma.base, vma.size, vma.backing_memory);
338 break; 343 break;
339 case VMAType::MMIO: 344 case VMAType::MMIO:
340 Memory::MapIoRegion(vma.base, vma.size, vma.mmio_handler); 345 Memory::MapIoRegion(page_table, vma.base, vma.size, vma.mmio_handler);
341 break; 346 break;
342 } 347 }
343} 348}
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 38e0d74d0..1302527bb 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -9,6 +9,7 @@
9#include <vector> 9#include <vector>
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "core/hle/result.h" 11#include "core/hle/result.h"
12#include "core/memory.h"
12#include "core/mmio.h" 13#include "core/mmio.h"
13 14
14namespace Kernel { 15namespace Kernel {
@@ -102,7 +103,6 @@ struct VirtualMemoryArea {
102 * - http://duartes.org/gustavo/blog/post/page-cache-the-affair-between-memory-and-files/ 103 * - http://duartes.org/gustavo/blog/post/page-cache-the-affair-between-memory-and-files/
103 */ 104 */
104class VMManager final { 105class VMManager final {
105 // TODO(yuriks): Make page tables switchable to support multiple VMManagers
106public: 106public:
107 /** 107 /**
108 * The maximum amount of address space managed by the kernel. Addresses above this are never 108 * The maximum amount of address space managed by the kernel. Addresses above this are never
@@ -184,6 +184,10 @@ public:
184 /// Dumps the address space layout to the log, for debugging 184 /// Dumps the address space layout to the log, for debugging
185 void LogLayout(Log::Level log_level) const; 185 void LogLayout(Log::Level log_level) const;
186 186
187 /// Each VMManager has its own page table, which is set as the main one when the owning process
188 /// is scheduled.
189 Memory::PageTable page_table;
190
187private: 191private:
188 using VMAIter = decltype(vma_map)::iterator; 192 using VMAIter = decltype(vma_map)::iterator;
189 193
diff --git a/src/core/loader/3dsx.cpp b/src/core/loader/3dsx.cpp
index 74e336487..69cdc0867 100644
--- a/src/core/loader/3dsx.cpp
+++ b/src/core/loader/3dsx.cpp
@@ -270,6 +270,7 @@ ResultStatus AppLoader_THREEDSX::Load() {
270 Kernel::g_current_process = Kernel::Process::Create(std::move(codeset)); 270 Kernel::g_current_process = Kernel::Process::Create(std::move(codeset));
271 Kernel::g_current_process->svc_access_mask.set(); 271 Kernel::g_current_process->svc_access_mask.set();
272 Kernel::g_current_process->address_mappings = default_address_mappings; 272 Kernel::g_current_process->address_mappings = default_address_mappings;
273 Memory::current_page_table = &Kernel::g_current_process->vm_manager.page_table;
273 274
274 // Attach the default resource limit (APPLICATION) to the process 275 // Attach the default resource limit (APPLICATION) to the process
275 Kernel::g_current_process->resource_limit = 276 Kernel::g_current_process->resource_limit =
diff --git a/src/core/loader/elf.cpp b/src/core/loader/elf.cpp
index cfcde9167..2f27606a1 100644
--- a/src/core/loader/elf.cpp
+++ b/src/core/loader/elf.cpp
@@ -397,6 +397,7 @@ ResultStatus AppLoader_ELF::Load() {
397 Kernel::g_current_process = Kernel::Process::Create(std::move(codeset)); 397 Kernel::g_current_process = Kernel::Process::Create(std::move(codeset));
398 Kernel::g_current_process->svc_access_mask.set(); 398 Kernel::g_current_process->svc_access_mask.set();
399 Kernel::g_current_process->address_mappings = default_address_mappings; 399 Kernel::g_current_process->address_mappings = default_address_mappings;
400 Memory::current_page_table = &Kernel::g_current_process->vm_manager.page_table;
400 401
401 // Attach the default resource limit (APPLICATION) to the process 402 // Attach the default resource limit (APPLICATION) to the process
402 Kernel::g_current_process->resource_limit = 403 Kernel::g_current_process->resource_limit =
diff --git a/src/core/loader/ncch.cpp b/src/core/loader/ncch.cpp
index 7aff7f29b..79ea50147 100644
--- a/src/core/loader/ncch.cpp
+++ b/src/core/loader/ncch.cpp
@@ -172,6 +172,7 @@ ResultStatus AppLoader_NCCH::LoadExec() {
172 codeset->memory = std::make_shared<std::vector<u8>>(std::move(code)); 172 codeset->memory = std::make_shared<std::vector<u8>>(std::move(code));
173 173
174 Kernel::g_current_process = Kernel::Process::Create(std::move(codeset)); 174 Kernel::g_current_process = Kernel::Process::Create(std::move(codeset));
175 Memory::current_page_table = &Kernel::g_current_process->vm_manager.page_table;
175 176
176 // Attach a resource limit to the process based on the resource limit category 177 // Attach a resource limit to the process based on the resource limit category
177 Kernel::g_current_process->resource_limit = 178 Kernel::g_current_process->resource_limit =
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 097bc5b47..68a6b1ac2 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -4,83 +4,31 @@
4 4
5#include <array> 5#include <array>
6#include <cstring> 6#include <cstring>
7#include "audio_core/audio_core.h"
7#include "common/assert.h" 8#include "common/assert.h"
8#include "common/common_types.h" 9#include "common/common_types.h"
9#include "common/logging/log.h" 10#include "common/logging/log.h"
10#include "common/swap.h" 11#include "common/swap.h"
12#include "core/hle/kernel/memory.h"
11#include "core/hle/kernel/process.h" 13#include "core/hle/kernel/process.h"
12#include "core/hle/lock.h" 14#include "core/hle/lock.h"
13#include "core/memory.h" 15#include "core/memory.h"
14#include "core/memory_setup.h" 16#include "core/memory_setup.h"
15#include "core/mmio.h"
16#include "video_core/renderer_base.h" 17#include "video_core/renderer_base.h"
17#include "video_core/video_core.h" 18#include "video_core/video_core.h"
18 19
19namespace Memory { 20namespace Memory {
20 21
21enum class PageType { 22static std::array<u8, Memory::VRAM_SIZE> vram;
22 /// Page is unmapped and should cause an access error. 23static std::array<u8, Memory::N3DS_EXTRA_RAM_SIZE> n3ds_extra_ram;
23 Unmapped,
24 /// Page is mapped to regular memory. This is the only type you can get pointers to.
25 Memory,
26 /// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and
27 /// invalidation
28 RasterizerCachedMemory,
29 /// Page is mapped to a I/O region. Writing and reading to this page is handled by functions.
30 Special,
31 /// Page is mapped to a I/O region, but also needs to check for rasterizer cache flushing and
32 /// invalidation
33 RasterizerCachedSpecial,
34};
35
36struct SpecialRegion {
37 VAddr base;
38 u32 size;
39 MMIORegionPointer handler;
40};
41 24
42/** 25PageTable* current_page_table = nullptr;
43 * A (reasonably) fast way of allowing switchable and remappable process address spaces. It loosely
44 * mimics the way a real CPU page table works, but instead is optimized for minimal decoding and
45 * fetching requirements when accessing. In the usual case of an access to regular memory, it only
46 * requires an indexed fetch and a check for NULL.
47 */
48struct PageTable {
49 /**
50 * Array of memory pointers backing each page. An entry can only be non-null if the
51 * corresponding entry in the `attributes` array is of type `Memory`.
52 */
53 std::array<u8*, PAGE_TABLE_NUM_ENTRIES> pointers;
54
55 /**
56 * Contains MMIO handlers that back memory regions whose entries in the `attribute` array is of
57 * type `Special`.
58 */
59 std::vector<SpecialRegion> special_regions;
60
61 /**
62 * Array of fine grained page attributes. If it is set to any value other than `Memory`, then
63 * the corresponding entry in `pointers` MUST be set to null.
64 */
65 std::array<PageType, PAGE_TABLE_NUM_ENTRIES> attributes;
66
67 /**
68 * Indicates the number of externally cached resources touching a page that should be
69 * flushed before the memory is accessed
70 */
71 std::array<u8, PAGE_TABLE_NUM_ENTRIES> cached_res_count;
72};
73
74/// Singular page table used for the singleton process
75static PageTable main_page_table;
76/// Currently active page table
77static PageTable* current_page_table = &main_page_table;
78 26
79std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers() { 27std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers() {
80 return &current_page_table->pointers; 28 return &current_page_table->pointers;
81} 29}
82 30
83static void MapPages(u32 base, u32 size, u8* memory, PageType type) { 31static void MapPages(PageTable& page_table, u32 base, u32 size, u8* memory, PageType type) {
84 LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE, 32 LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE,
85 (base + size) * PAGE_SIZE); 33 (base + size) * PAGE_SIZE);
86 34
@@ -91,9 +39,9 @@ static void MapPages(u32 base, u32 size, u8* memory, PageType type) {
91 while (base != end) { 39 while (base != end) {
92 ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at %08X", base); 40 ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at %08X", base);
93 41
94 current_page_table->attributes[base] = type; 42 page_table.attributes[base] = type;
95 current_page_table->pointers[base] = memory; 43 page_table.pointers[base] = memory;
96 current_page_table->cached_res_count[base] = 0; 44 page_table.cached_res_count[base] = 0;
97 45
98 base += 1; 46 base += 1;
99 if (memory != nullptr) 47 if (memory != nullptr)
@@ -101,30 +49,24 @@ static void MapPages(u32 base, u32 size, u8* memory, PageType type) {
101 } 49 }
102} 50}
103 51
104void InitMemoryMap() { 52void MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, u8* target) {
105 main_page_table.pointers.fill(nullptr);
106 main_page_table.attributes.fill(PageType::Unmapped);
107 main_page_table.cached_res_count.fill(0);
108}
109
110void MapMemoryRegion(VAddr base, u32 size, u8* target) {
111 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); 53 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
112 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); 54 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
113 MapPages(base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory); 55 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory);
114} 56}
115 57
116void MapIoRegion(VAddr base, u32 size, MMIORegionPointer mmio_handler) { 58void MapIoRegion(PageTable& page_table, VAddr base, u32 size, MMIORegionPointer mmio_handler) {
117 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); 59 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
118 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); 60 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
119 MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); 61 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special);
120 62
121 current_page_table->special_regions.emplace_back(SpecialRegion{base, size, mmio_handler}); 63 page_table.special_regions.emplace_back(SpecialRegion{base, size, mmio_handler});
122} 64}
123 65
124void UnmapRegion(VAddr base, u32 size) { 66void UnmapRegion(PageTable& page_table, VAddr base, u32 size) {
125 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); 67 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
126 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); 68 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
127 MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped); 69 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
128} 70}
129 71
130/** 72/**
@@ -273,8 +215,7 @@ bool IsValidVirtualAddress(const VAddr vaddr) {
273} 215}
274 216
275bool IsValidPhysicalAddress(const PAddr paddr) { 217bool IsValidPhysicalAddress(const PAddr paddr) {
276 boost::optional<VAddr> vaddr = PhysicalToVirtualAddress(paddr); 218 return GetPhysicalPointer(paddr) != nullptr;
277 return vaddr && IsValidVirtualAddress(*vaddr);
278} 219}
279 220
280u8* GetPointer(const VAddr vaddr) { 221u8* GetPointer(const VAddr vaddr) {
@@ -306,9 +247,63 @@ std::string ReadCString(VAddr vaddr, std::size_t max_length) {
306} 247}
307 248
308u8* GetPhysicalPointer(PAddr address) { 249u8* GetPhysicalPointer(PAddr address) {
309 // TODO(Subv): This call should not go through the application's memory mapping. 250 struct MemoryArea {
310 boost::optional<VAddr> vaddr = PhysicalToVirtualAddress(address); 251 PAddr paddr_base;
311 return vaddr ? GetPointer(*vaddr) : nullptr; 252 u32 size;
253 };
254
255 static constexpr MemoryArea memory_areas[] = {
256 {VRAM_PADDR, VRAM_SIZE},
257 {IO_AREA_PADDR, IO_AREA_SIZE},
258 {DSP_RAM_PADDR, DSP_RAM_SIZE},
259 {FCRAM_PADDR, FCRAM_N3DS_SIZE},
260 {N3DS_EXTRA_RAM_PADDR, N3DS_EXTRA_RAM_SIZE},
261 };
262
263 const auto area =
264 std::find_if(std::begin(memory_areas), std::end(memory_areas), [&](const auto& area) {
265 return address >= area.paddr_base && address < area.paddr_base + area.size;
266 });
267
268 if (area == std::end(memory_areas)) {
269 LOG_ERROR(HW_Memory, "unknown GetPhysicalPointer @ 0x%08X", address);
270 return nullptr;
271 }
272
273 if (area->paddr_base == IO_AREA_PADDR) {
274 LOG_ERROR(HW_Memory, "MMIO mappings are not supported yet. phys_addr=0x%08X", address);
275 return nullptr;
276 }
277
278 u32 offset_into_region = address - area->paddr_base;
279
280 u8* target_pointer = nullptr;
281 switch (area->paddr_base) {
282 case VRAM_PADDR:
283 target_pointer = vram.data() + offset_into_region;
284 break;
285 case DSP_RAM_PADDR:
286 target_pointer = AudioCore::GetDspMemory().data() + offset_into_region;
287 break;
288 case FCRAM_PADDR:
289 for (const auto& region : Kernel::memory_regions) {
290 if (offset_into_region >= region.base &&
291 offset_into_region < region.base + region.size) {
292 target_pointer =
293 region.linear_heap_memory->data() + offset_into_region - region.base;
294 break;
295 }
296 }
297 ASSERT_MSG(target_pointer != nullptr, "Invalid FCRAM address");
298 break;
299 case N3DS_EXTRA_RAM_PADDR:
300 target_pointer = n3ds_extra_ram.data() + offset_into_region;
301 break;
302 default:
303 UNREACHABLE();
304 }
305
306 return target_pointer;
312} 307}
313 308
314void RasterizerMarkRegionCached(PAddr start, u32 size, int count_delta) { 309void RasterizerMarkRegionCached(PAddr start, u32 size, int count_delta) {
diff --git a/src/core/memory.h b/src/core/memory.h
index c8c56babd..b228a48c2 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -7,8 +7,10 @@
7#include <array> 7#include <array>
8#include <cstddef> 8#include <cstddef>
9#include <string> 9#include <string>
10#include <vector>
10#include <boost/optional.hpp> 11#include <boost/optional.hpp>
11#include "common/common_types.h" 12#include "common/common_types.h"
13#include "core/mmio.h"
12 14
13namespace Memory { 15namespace Memory {
14 16
@@ -21,6 +23,59 @@ const u32 PAGE_MASK = PAGE_SIZE - 1;
21const int PAGE_BITS = 12; 23const int PAGE_BITS = 12;
22const size_t PAGE_TABLE_NUM_ENTRIES = 1 << (32 - PAGE_BITS); 24const size_t PAGE_TABLE_NUM_ENTRIES = 1 << (32 - PAGE_BITS);
23 25
26enum class PageType {
27 /// Page is unmapped and should cause an access error.
28 Unmapped,
29 /// Page is mapped to regular memory. This is the only type you can get pointers to.
30 Memory,
31 /// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and
32 /// invalidation
33 RasterizerCachedMemory,
34 /// Page is mapped to a I/O region. Writing and reading to this page is handled by functions.
35 Special,
36 /// Page is mapped to a I/O region, but also needs to check for rasterizer cache flushing and
37 /// invalidation
38 RasterizerCachedSpecial,
39};
40
41struct SpecialRegion {
42 VAddr base;
43 u32 size;
44 MMIORegionPointer handler;
45};
46
47/**
48 * A (reasonably) fast way of allowing switchable and remappable process address spaces. It loosely
49 * mimics the way a real CPU page table works, but instead is optimized for minimal decoding and
50 * fetching requirements when accessing. In the usual case of an access to regular memory, it only
51 * requires an indexed fetch and a check for NULL.
52 */
53struct PageTable {
54 /**
55 * Array of memory pointers backing each page. An entry can only be non-null if the
56 * corresponding entry in the `attributes` array is of type `Memory`.
57 */
58 std::array<u8*, PAGE_TABLE_NUM_ENTRIES> pointers;
59
60 /**
61 * Contains MMIO handlers that back memory regions whose entries in the `attribute` array is of
62 * type `Special`.
63 */
64 std::vector<SpecialRegion> special_regions;
65
66 /**
67 * Array of fine grained page attributes. If it is set to any value other than `Memory`, then
68 * the corresponding entry in `pointers` MUST be set to null.
69 */
70 std::array<PageType, PAGE_TABLE_NUM_ENTRIES> attributes;
71
72 /**
73 * Indicates the number of externally cached resources touching a page that should be
74 * flushed before the memory is accessed
75 */
76 std::array<u8, PAGE_TABLE_NUM_ENTRIES> cached_res_count;
77};
78
24/// Physical memory regions as seen from the ARM11 79/// Physical memory regions as seen from the ARM11
25enum : PAddr { 80enum : PAddr {
26 /// IO register area 81 /// IO register area
@@ -126,6 +181,9 @@ enum : VAddr {
126 NEW_LINEAR_HEAP_VADDR_END = NEW_LINEAR_HEAP_VADDR + NEW_LINEAR_HEAP_SIZE, 181 NEW_LINEAR_HEAP_VADDR_END = NEW_LINEAR_HEAP_VADDR + NEW_LINEAR_HEAP_SIZE,
127}; 182};
128 183
184/// Currently active page table
185extern PageTable* current_page_table;
186
129bool IsValidVirtualAddress(const VAddr addr); 187bool IsValidVirtualAddress(const VAddr addr);
130bool IsValidPhysicalAddress(const PAddr addr); 188bool IsValidPhysicalAddress(const PAddr addr);
131 189
@@ -169,8 +227,6 @@ boost::optional<VAddr> PhysicalToVirtualAddress(PAddr addr);
169 227
170/** 228/**
171 * Gets a pointer to the memory region beginning at the specified physical address. 229 * Gets a pointer to the memory region beginning at the specified physical address.
172 *
173 * @note This is currently implemented using PhysicalToVirtualAddress().
174 */ 230 */
175u8* GetPhysicalPointer(PAddr address); 231u8* GetPhysicalPointer(PAddr address);
176 232
@@ -209,4 +265,4 @@ void RasterizerFlushVirtualRegion(VAddr start, u32 size, FlushMode mode);
209 * retrieve the current page table for that purpose. 265 * retrieve the current page table for that purpose.
210 */ 266 */
211std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers(); 267std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers();
212} 268} // namespace Memory
diff --git a/src/core/memory_setup.h b/src/core/memory_setup.h
index 3fdf3a87d..c58baa50b 100644
--- a/src/core/memory_setup.h
+++ b/src/core/memory_setup.h
@@ -9,24 +9,24 @@
9 9
10namespace Memory { 10namespace Memory {
11 11
12void InitMemoryMap();
13
14/** 12/**
15 * Maps an allocated buffer onto a region of the emulated process address space. 13 * Maps an allocated buffer onto a region of the emulated process address space.
16 * 14 *
15 * @param page_table The page table of the emulated process.
17 * @param base The address to start mapping at. Must be page-aligned. 16 * @param base The address to start mapping at. Must be page-aligned.
18 * @param size The amount of bytes to map. Must be page-aligned. 17 * @param size The amount of bytes to map. Must be page-aligned.
19 * @param target Buffer with the memory backing the mapping. Must be of length at least `size`. 18 * @param target Buffer with the memory backing the mapping. Must be of length at least `size`.
20 */ 19 */
21void MapMemoryRegion(VAddr base, u32 size, u8* target); 20void MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, u8* target);
22 21
23/** 22/**
24 * Maps a region of the emulated process address space as a IO region. 23 * Maps a region of the emulated process address space as a IO region.
24 * @param page_table The page table of the emulated process.
25 * @param base The address to start mapping at. Must be page-aligned. 25 * @param base The address to start mapping at. Must be page-aligned.
26 * @param size The amount of bytes to map. Must be page-aligned. 26 * @param size The amount of bytes to map. Must be page-aligned.
27 * @param mmio_handler The handler that backs the mapping. 27 * @param mmio_handler The handler that backs the mapping.
28 */ 28 */
29void MapIoRegion(VAddr base, u32 size, MMIORegionPointer mmio_handler); 29void MapIoRegion(PageTable& page_table, VAddr base, u32 size, MMIORegionPointer mmio_handler);
30 30
31void UnmapRegion(VAddr base, u32 size); 31void UnmapRegion(PageTable& page_table, VAddr base, u32 size);
32} 32}
diff --git a/src/tests/core/arm/arm_test_common.cpp b/src/tests/core/arm/arm_test_common.cpp
index 1df6c5677..8384ce744 100644
--- a/src/tests/core/arm/arm_test_common.cpp
+++ b/src/tests/core/arm/arm_test_common.cpp
@@ -3,20 +3,30 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "core/core.h" 5#include "core/core.h"
6#include "core/memory.h"
6#include "core/memory_setup.h" 7#include "core/memory_setup.h"
7#include "tests/core/arm/arm_test_common.h" 8#include "tests/core/arm/arm_test_common.h"
8 9
9namespace ArmTests { 10namespace ArmTests {
10 11
12static Memory::PageTable page_table;
13
11TestEnvironment::TestEnvironment(bool mutable_memory_) 14TestEnvironment::TestEnvironment(bool mutable_memory_)
12 : mutable_memory(mutable_memory_), test_memory(std::make_shared<TestMemory>(this)) { 15 : mutable_memory(mutable_memory_), test_memory(std::make_shared<TestMemory>(this)) {
13 Memory::MapIoRegion(0x00000000, 0x80000000, test_memory); 16
14 Memory::MapIoRegion(0x80000000, 0x80000000, test_memory); 17 page_table.pointers.fill(nullptr);
18 page_table.attributes.fill(Memory::PageType::Unmapped);
19 page_table.cached_res_count.fill(0);
20
21 Memory::MapIoRegion(page_table, 0x00000000, 0x80000000, test_memory);
22 Memory::MapIoRegion(page_table, 0x80000000, 0x80000000, test_memory);
23
24 Memory::current_page_table = &page_table;
15} 25}
16 26
17TestEnvironment::~TestEnvironment() { 27TestEnvironment::~TestEnvironment() {
18 Memory::UnmapRegion(0x80000000, 0x80000000); 28 Memory::UnmapRegion(page_table, 0x80000000, 0x80000000);
19 Memory::UnmapRegion(0x00000000, 0x80000000); 29 Memory::UnmapRegion(page_table, 0x00000000, 0x80000000);
20} 30}
21 31
22void TestEnvironment::SetMemory64(VAddr vaddr, u64 value) { 32void TestEnvironment::SetMemory64(VAddr vaddr, u64 value) {