summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar bunnei2019-03-17 14:42:57 -0400
committerGravatar GitHub2019-03-17 14:42:57 -0400
commit57ca1e3e6942d1ef1b59c458e76ba969f0b739d5 (patch)
tree1d7a026c695a73932030048329a2c0707656666e
parentMerge pull request #2251 from bunnei/skip-zero-flush (diff)
parentcore: Move PageTable struct into Common. (diff)
downloadyuzu-57ca1e3e6942d1ef1b59c458e76ba969f0b739d5.tar.gz
yuzu-57ca1e3e6942d1ef1b59c458e76ba969f0b739d5.tar.xz
yuzu-57ca1e3e6942d1ef1b59c458e76ba969f0b739d5.zip
Merge pull request #2252 from bunnei/move-page-table
core: Move PageTable struct into Common.
Diffstat (limited to '')
-rw-r--r--src/common/CMakeLists.txt4
-rw-r--r--src/common/memory_hook.cpp (renamed from src/core/memory_hook.cpp)6
-rw-r--r--src/common/memory_hook.h (renamed from src/core/memory_hook.h)4
-rw-r--r--src/common/page_table.cpp29
-rw-r--r--src/common/page_table.h80
-rw-r--r--src/core/CMakeLists.txt2
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic.h4
-rw-r--r--src/core/hle/kernel/process.cpp2
-rw-r--r--src/core/hle/kernel/scheduler.cpp2
-rw-r--r--src/core/hle/kernel/vm_manager.cpp6
-rw-r--r--src/core/hle/kernel/vm_manager.h9
-rw-r--r--src/core/memory.cpp134
-rw-r--r--src/core/memory.h74
-rw-r--r--src/core/memory_setup.h19
-rw-r--r--src/tests/core/arm/arm_test_common.cpp3
-rw-r--r--src/tests/core/arm/arm_test_common.h8
16 files changed, 215 insertions, 171 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index c538c6415..43ae8a9e7 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -92,10 +92,14 @@ add_library(common STATIC
92 logging/text_formatter.cpp 92 logging/text_formatter.cpp
93 logging/text_formatter.h 93 logging/text_formatter.h
94 math_util.h 94 math_util.h
95 memory_hook.cpp
96 memory_hook.h
95 microprofile.cpp 97 microprofile.cpp
96 microprofile.h 98 microprofile.h
97 microprofileui.h 99 microprofileui.h
98 misc.cpp 100 misc.cpp
101 page_table.cpp
102 page_table.h
99 param_package.cpp 103 param_package.cpp
100 param_package.h 104 param_package.h
101 quaternion.h 105 quaternion.h
diff --git a/src/core/memory_hook.cpp b/src/common/memory_hook.cpp
index c61c6c1fb..3986986d6 100644
--- a/src/core/memory_hook.cpp
+++ b/src/common/memory_hook.cpp
@@ -2,10 +2,10 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "core/memory_hook.h" 5#include "common/memory_hook.h"
6 6
7namespace Memory { 7namespace Common {
8 8
9MemoryHook::~MemoryHook() = default; 9MemoryHook::~MemoryHook() = default;
10 10
11} // namespace Memory 11} // namespace Common
diff --git a/src/core/memory_hook.h b/src/common/memory_hook.h
index 940777107..adaa4c2c5 100644
--- a/src/core/memory_hook.h
+++ b/src/common/memory_hook.h
@@ -9,7 +9,7 @@
9 9
10#include "common/common_types.h" 10#include "common/common_types.h"
11 11
12namespace Memory { 12namespace Common {
13 13
14/** 14/**
15 * Memory hooks have two purposes: 15 * Memory hooks have two purposes:
@@ -44,4 +44,4 @@ public:
44}; 44};
45 45
46using MemoryHookPointer = std::shared_ptr<MemoryHook>; 46using MemoryHookPointer = std::shared_ptr<MemoryHook>;
47} // namespace Memory 47} // namespace Common
diff --git a/src/common/page_table.cpp b/src/common/page_table.cpp
new file mode 100644
index 000000000..8eba1c3f1
--- /dev/null
+++ b/src/common/page_table.cpp
@@ -0,0 +1,29 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/page_table.h"
6
7namespace Common {
8
9PageTable::PageTable(std::size_t page_size_in_bits) : page_size_in_bits{page_size_in_bits} {}
10
11PageTable::~PageTable() = default;
12
13void PageTable::Resize(std::size_t address_space_width_in_bits) {
14 const std::size_t num_page_table_entries = 1ULL
15 << (address_space_width_in_bits - page_size_in_bits);
16
17 pointers.resize(num_page_table_entries);
18 attributes.resize(num_page_table_entries);
19
20 // The default is a 39-bit address space, which causes an initial 1GB allocation size. If the
21 // vector size is subsequently decreased (via resize), the vector might not automatically
22 // actually reallocate/resize its underlying allocation, which wastes up to ~800 MB for
23 // 36-bit titles. Call shrink_to_fit to reduce capacity to what's actually in use.
24
25 pointers.shrink_to_fit();
26 attributes.shrink_to_fit();
27}
28
29} // namespace Common
diff --git a/src/common/page_table.h b/src/common/page_table.h
new file mode 100644
index 000000000..8339f2890
--- /dev/null
+++ b/src/common/page_table.h
@@ -0,0 +1,80 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <vector>
8#include <boost/icl/interval_map.hpp>
9#include "common/common_types.h"
10#include "common/memory_hook.h"
11
12namespace Common {
13
14enum class PageType : u8 {
15 /// Page is unmapped and should cause an access error.
16 Unmapped,
17 /// Page is mapped to regular memory. This is the only type you can get pointers to.
18 Memory,
19 /// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and
20 /// invalidation
21 RasterizerCachedMemory,
22 /// Page is mapped to a I/O region. Writing and reading to this page is handled by functions.
23 Special,
24};
25
26struct SpecialRegion {
27 enum class Type {
28 DebugHook,
29 IODevice,
30 } type;
31
32 MemoryHookPointer handler;
33
34 bool operator<(const SpecialRegion& other) const {
35 return std::tie(type, handler) < std::tie(other.type, other.handler);
36 }
37
38 bool operator==(const SpecialRegion& other) const {
39 return std::tie(type, handler) == std::tie(other.type, other.handler);
40 }
41};
42
43/**
44 * A (reasonably) fast way of allowing switchable and remappable process address spaces. It loosely
45 * mimics the way a real CPU page table works.
46 */
47struct PageTable {
48 explicit PageTable(std::size_t page_size_in_bits);
49 ~PageTable();
50
51 /**
52 * Resizes the page table to be able to accomodate enough pages within
53 * a given address space.
54 *
55 * @param address_space_width_in_bits The address size width in bits.
56 */
57 void Resize(std::size_t address_space_width_in_bits);
58
59 /**
60 * Vector of memory pointers backing each page. An entry can only be non-null if the
61 * corresponding entry in the `attributes` vector is of type `Memory`.
62 */
63 std::vector<u8*> pointers;
64
65 /**
66 * Contains MMIO handlers that back memory regions whose entries in the `attribute` vector is
67 * of type `Special`.
68 */
69 boost::icl::interval_map<VAddr, std::set<SpecialRegion>> special_regions;
70
71 /**
72 * Vector of fine grained page attributes. If it is set to any value other than `Memory`, then
73 * the corresponding entry in `pointers` MUST be set to null.
74 */
75 std::vector<PageType> attributes;
76
77 const std::size_t page_size_in_bits{};
78};
79
80} // namespace Common
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 8ccb2d5f0..aee8bc27d 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -437,8 +437,6 @@ add_library(core STATIC
437 loader/xci.h 437 loader/xci.h
438 memory.cpp 438 memory.cpp
439 memory.h 439 memory.h
440 memory_hook.cpp
441 memory_hook.h
442 memory_setup.h 440 memory_setup.h
443 perf_stats.cpp 441 perf_stats.cpp
444 perf_stats.h 442 perf_stats.h
diff --git a/src/core/arm/dynarmic/arm_dynarmic.h b/src/core/arm/dynarmic/arm_dynarmic.h
index 6cc458296..aada1e862 100644
--- a/src/core/arm/dynarmic/arm_dynarmic.h
+++ b/src/core/arm/dynarmic/arm_dynarmic.h
@@ -12,7 +12,7 @@
12#include "core/arm/exclusive_monitor.h" 12#include "core/arm/exclusive_monitor.h"
13#include "core/arm/unicorn/arm_unicorn.h" 13#include "core/arm/unicorn/arm_unicorn.h"
14 14
15namespace Memory { 15namespace Common {
16struct PageTable; 16struct PageTable;
17} 17}
18 18
@@ -70,7 +70,7 @@ private:
70 Timing::CoreTiming& core_timing; 70 Timing::CoreTiming& core_timing;
71 DynarmicExclusiveMonitor& exclusive_monitor; 71 DynarmicExclusiveMonitor& exclusive_monitor;
72 72
73 Memory::PageTable* current_page_table = nullptr; 73 Common::PageTable* current_page_table = nullptr;
74}; 74};
75 75
76class DynarmicExclusiveMonitor final : public ExclusiveMonitor { 76class DynarmicExclusiveMonitor final : public ExclusiveMonitor {
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 49fced7b1..65c51003d 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -31,7 +31,7 @@ namespace {
31 */ 31 */
32void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_point, u32 priority) { 32void SetupMainThread(Process& owner_process, KernelCore& kernel, VAddr entry_point, u32 priority) {
33 // Setup page table so we can write to memory 33 // Setup page table so we can write to memory
34 SetCurrentPageTable(&owner_process.VMManager().page_table); 34 Memory::SetCurrentPageTable(&owner_process.VMManager().page_table);
35 35
36 // Initialize new "main" thread 36 // Initialize new "main" thread
37 const VAddr stack_top = owner_process.VMManager().GetTLSIORegionEndAddress(); 37 const VAddr stack_top = owner_process.VMManager().GetTLSIORegionEndAddress();
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index e524509df..cc189cc64 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -96,7 +96,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
96 auto* const thread_owner_process = current_thread->GetOwnerProcess(); 96 auto* const thread_owner_process = current_thread->GetOwnerProcess();
97 if (previous_process != thread_owner_process) { 97 if (previous_process != thread_owner_process) {
98 system.Kernel().MakeCurrentProcess(thread_owner_process); 98 system.Kernel().MakeCurrentProcess(thread_owner_process);
99 SetCurrentPageTable(&thread_owner_process->VMManager().page_table); 99 Memory::SetCurrentPageTable(&thread_owner_process->VMManager().page_table);
100 } 100 }
101 101
102 cpu_core.LoadContext(new_thread->GetContext()); 102 cpu_core.LoadContext(new_thread->GetContext());
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index 05c59af34..3def3e52c 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -7,13 +7,13 @@
7#include <utility> 7#include <utility>
8#include "common/assert.h" 8#include "common/assert.h"
9#include "common/logging/log.h" 9#include "common/logging/log.h"
10#include "common/memory_hook.h"
10#include "core/arm/arm_interface.h" 11#include "core/arm/arm_interface.h"
11#include "core/core.h" 12#include "core/core.h"
12#include "core/file_sys/program_metadata.h" 13#include "core/file_sys/program_metadata.h"
13#include "core/hle/kernel/errors.h" 14#include "core/hle/kernel/errors.h"
14#include "core/hle/kernel/vm_manager.h" 15#include "core/hle/kernel/vm_manager.h"
15#include "core/memory.h" 16#include "core/memory.h"
16#include "core/memory_hook.h"
17#include "core/memory_setup.h" 17#include "core/memory_setup.h"
18 18
19namespace Kernel { 19namespace Kernel {
@@ -177,7 +177,7 @@ ResultVal<VAddr> VMManager::FindFreeRegion(u64 size) const {
177 177
178ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size, 178ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size,
179 MemoryState state, 179 MemoryState state,
180 Memory::MemoryHookPointer mmio_handler) { 180 Common::MemoryHookPointer mmio_handler) {
181 // This is the appropriately sized VMA that will turn into our allocation. 181 // This is the appropriately sized VMA that will turn into our allocation.
182 CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); 182 CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
183 VirtualMemoryArea& final_vma = vma_handle->second; 183 VirtualMemoryArea& final_vma = vma_handle->second;
@@ -624,7 +624,7 @@ void VMManager::ClearPageTable() {
624 std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr); 624 std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr);
625 page_table.special_regions.clear(); 625 page_table.special_regions.clear();
626 std::fill(page_table.attributes.begin(), page_table.attributes.end(), 626 std::fill(page_table.attributes.begin(), page_table.attributes.end(),
627 Memory::PageType::Unmapped); 627 Common::PageType::Unmapped);
628} 628}
629 629
630VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, MemoryState state_mask, 630VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, MemoryState state_mask,
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 88e0b3c02..b96980f8f 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -9,9 +9,10 @@
9#include <tuple> 9#include <tuple>
10#include <vector> 10#include <vector>
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "common/memory_hook.h"
13#include "common/page_table.h"
12#include "core/hle/result.h" 14#include "core/hle/result.h"
13#include "core/memory.h" 15#include "core/memory.h"
14#include "core/memory_hook.h"
15 16
16namespace FileSys { 17namespace FileSys {
17enum class ProgramAddressSpaceType : u8; 18enum class ProgramAddressSpaceType : u8;
@@ -290,7 +291,7 @@ struct VirtualMemoryArea {
290 // Settings for type = MMIO 291 // Settings for type = MMIO
291 /// Physical address of the register area this VMA maps to. 292 /// Physical address of the register area this VMA maps to.
292 PAddr paddr = 0; 293 PAddr paddr = 0;
293 Memory::MemoryHookPointer mmio_handler = nullptr; 294 Common::MemoryHookPointer mmio_handler = nullptr;
294 295
295 /// Tests if this area can be merged to the right with `next`. 296 /// Tests if this area can be merged to the right with `next`.
296 bool CanBeMergedWith(const VirtualMemoryArea& next) const; 297 bool CanBeMergedWith(const VirtualMemoryArea& next) const;
@@ -368,7 +369,7 @@ public:
368 * @param mmio_handler The handler that will implement read and write for this MMIO region. 369 * @param mmio_handler The handler that will implement read and write for this MMIO region.
369 */ 370 */
370 ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state, 371 ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state,
371 Memory::MemoryHookPointer mmio_handler); 372 Common::MemoryHookPointer mmio_handler);
372 373
373 /// Unmaps a range of addresses, splitting VMAs as necessary. 374 /// Unmaps a range of addresses, splitting VMAs as necessary.
374 ResultCode UnmapRange(VAddr target, u64 size); 375 ResultCode UnmapRange(VAddr target, u64 size);
@@ -509,7 +510,7 @@ public:
509 510
510 /// Each VMManager has its own page table, which is set as the main one when the owning process 511 /// Each VMManager has its own page table, which is set as the main one when the owning process
511 /// is scheduled. 512 /// is scheduled.
512 Memory::PageTable page_table; 513 Common::PageTable page_table{Memory::PAGE_BITS};
513 514
514private: 515private:
515 using VMAIter = VMAMap::iterator; 516 using VMAIter = VMAMap::iterator;
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index e0cc5175f..365ac82b4 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -10,6 +10,7 @@
10#include "common/assert.h" 10#include "common/assert.h"
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "common/logging/log.h" 12#include "common/logging/log.h"
13#include "common/page_table.h"
13#include "common/swap.h" 14#include "common/swap.h"
14#include "core/arm/arm_interface.h" 15#include "core/arm/arm_interface.h"
15#include "core/core.h" 16#include "core/core.h"
@@ -23,9 +24,9 @@
23 24
24namespace Memory { 25namespace Memory {
25 26
26static PageTable* current_page_table = nullptr; 27static Common::PageTable* current_page_table = nullptr;
27 28
28void SetCurrentPageTable(PageTable* page_table) { 29void SetCurrentPageTable(Common::PageTable* page_table) {
29 current_page_table = page_table; 30 current_page_table = page_table;
30 31
31 auto& system = Core::System::GetInstance(); 32 auto& system = Core::System::GetInstance();
@@ -37,34 +38,12 @@ void SetCurrentPageTable(PageTable* page_table) {
37 } 38 }
38} 39}
39 40
40PageTable* GetCurrentPageTable() { 41Common::PageTable* GetCurrentPageTable() {
41 return current_page_table; 42 return current_page_table;
42} 43}
43 44
44PageTable::PageTable() = default; 45static void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory,
45 46 Common::PageType type) {
46PageTable::PageTable(std::size_t address_space_width_in_bits) {
47 Resize(address_space_width_in_bits);
48}
49
50PageTable::~PageTable() = default;
51
52void PageTable::Resize(std::size_t address_space_width_in_bits) {
53 const std::size_t num_page_table_entries = 1ULL << (address_space_width_in_bits - PAGE_BITS);
54
55 pointers.resize(num_page_table_entries);
56 attributes.resize(num_page_table_entries);
57
58 // The default is a 39-bit address space, which causes an initial 1GB allocation size. If the
59 // vector size is subsequently decreased (via resize), the vector might not automatically
60 // actually reallocate/resize its underlying allocation, which wastes up to ~800 MB for
61 // 36-bit titles. Call shrink_to_fit to reduce capacity to what's actually in use.
62
63 pointers.shrink_to_fit();
64 attributes.shrink_to_fit();
65}
66
67static void MapPages(PageTable& page_table, VAddr base, u64 size, u8* memory, PageType type) {
68 LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE, 47 LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE,
69 (base + size) * PAGE_SIZE); 48 (base + size) * PAGE_SIZE);
70 49
@@ -92,41 +71,47 @@ static void MapPages(PageTable& page_table, VAddr base, u64 size, u8* memory, Pa
92 } 71 }
93} 72}
94 73
95void MapMemoryRegion(PageTable& page_table, VAddr base, u64 size, u8* target) { 74void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) {
96 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); 75 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
97 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); 76 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
98 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory); 77 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory);
99} 78}
100 79
101void MapIoRegion(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer mmio_handler) { 80void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size,
81 Common::MemoryHookPointer mmio_handler) {
102 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); 82 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
103 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); 83 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
104 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); 84 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Special);
105 85
106 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); 86 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
107 SpecialRegion region{SpecialRegion::Type::IODevice, std::move(mmio_handler)}; 87 Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice, std::move(mmio_handler)};
108 page_table.special_regions.add(std::make_pair(interval, std::set<SpecialRegion>{region})); 88 page_table.special_regions.add(
89 std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
109} 90}
110 91
111void UnmapRegion(PageTable& page_table, VAddr base, u64 size) { 92void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
112 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); 93 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
113 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); 94 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
114 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped); 95 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Unmapped);
115 96
116 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); 97 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
117 page_table.special_regions.erase(interval); 98 page_table.special_regions.erase(interval);
118} 99}
119 100
120void AddDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook) { 101void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
102 Common::MemoryHookPointer hook) {
121 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); 103 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
122 SpecialRegion region{SpecialRegion::Type::DebugHook, std::move(hook)}; 104 Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
123 page_table.special_regions.add(std::make_pair(interval, std::set<SpecialRegion>{region})); 105 page_table.special_regions.add(
106 std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
124} 107}
125 108
126void RemoveDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook) { 109void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
110 Common::MemoryHookPointer hook) {
127 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); 111 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
128 SpecialRegion region{SpecialRegion::Type::DebugHook, std::move(hook)}; 112 Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
129 page_table.special_regions.subtract(std::make_pair(interval, std::set<SpecialRegion>{region})); 113 page_table.special_regions.subtract(
114 std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
130} 115}
131 116
132/** 117/**
@@ -175,15 +160,15 @@ T Read(const VAddr vaddr) {
175 return value; 160 return value;
176 } 161 }
177 162
178 PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; 163 Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
179 switch (type) { 164 switch (type) {
180 case PageType::Unmapped: 165 case Common::PageType::Unmapped:
181 LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr); 166 LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr);
182 return 0; 167 return 0;
183 case PageType::Memory: 168 case Common::PageType::Memory:
184 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); 169 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
185 break; 170 break;
186 case PageType::RasterizerCachedMemory: { 171 case Common::PageType::RasterizerCachedMemory: {
187 auto host_ptr{GetPointerFromVMA(vaddr)}; 172 auto host_ptr{GetPointerFromVMA(vaddr)};
188 Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), sizeof(T)); 173 Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), sizeof(T));
189 T value; 174 T value;
@@ -205,16 +190,16 @@ void Write(const VAddr vaddr, const T data) {
205 return; 190 return;
206 } 191 }
207 192
208 PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; 193 Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
209 switch (type) { 194 switch (type) {
210 case PageType::Unmapped: 195 case Common::PageType::Unmapped:
211 LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, 196 LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
212 static_cast<u32>(data), vaddr); 197 static_cast<u32>(data), vaddr);
213 return; 198 return;
214 case PageType::Memory: 199 case Common::PageType::Memory:
215 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); 200 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
216 break; 201 break;
217 case PageType::RasterizerCachedMemory: { 202 case Common::PageType::RasterizerCachedMemory: {
218 auto host_ptr{GetPointerFromVMA(vaddr)}; 203 auto host_ptr{GetPointerFromVMA(vaddr)};
219 Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(T)); 204 Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(T));
220 std::memcpy(host_ptr, &data, sizeof(T)); 205 std::memcpy(host_ptr, &data, sizeof(T));
@@ -232,10 +217,10 @@ bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) {
232 if (page_pointer) 217 if (page_pointer)
233 return true; 218 return true;
234 219
235 if (page_table.attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory) 220 if (page_table.attributes[vaddr >> PAGE_BITS] == Common::PageType::RasterizerCachedMemory)
236 return true; 221 return true;
237 222
238 if (page_table.attributes[vaddr >> PAGE_BITS] != PageType::Special) 223 if (page_table.attributes[vaddr >> PAGE_BITS] != Common::PageType::Special)
239 return false; 224 return false;
240 225
241 return false; 226 return false;
@@ -255,7 +240,8 @@ u8* GetPointer(const VAddr vaddr) {
255 return page_pointer + (vaddr & PAGE_MASK); 240 return page_pointer + (vaddr & PAGE_MASK);
256 } 241 }
257 242
258 if (current_page_table->attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory) { 243 if (current_page_table->attributes[vaddr >> PAGE_BITS] ==
244 Common::PageType::RasterizerCachedMemory) {
259 return GetPointerFromVMA(vaddr); 245 return GetPointerFromVMA(vaddr);
260 } 246 }
261 247
@@ -289,20 +275,20 @@ void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
289 275
290 u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; 276 u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
291 for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { 277 for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
292 PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS]; 278 Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
293 279
294 if (cached) { 280 if (cached) {
295 // Switch page type to cached if now cached 281 // Switch page type to cached if now cached
296 switch (page_type) { 282 switch (page_type) {
297 case PageType::Unmapped: 283 case Common::PageType::Unmapped:
298 // It is not necessary for a process to have this region mapped into its address 284 // It is not necessary for a process to have this region mapped into its address
299 // space, for example, a system module need not have a VRAM mapping. 285 // space, for example, a system module need not have a VRAM mapping.
300 break; 286 break;
301 case PageType::Memory: 287 case Common::PageType::Memory:
302 page_type = PageType::RasterizerCachedMemory; 288 page_type = Common::PageType::RasterizerCachedMemory;
303 current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr; 289 current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr;
304 break; 290 break;
305 case PageType::RasterizerCachedMemory: 291 case Common::PageType::RasterizerCachedMemory:
306 // There can be more than one GPU region mapped per CPU region, so it's common that 292 // There can be more than one GPU region mapped per CPU region, so it's common that
307 // this area is already marked as cached. 293 // this area is already marked as cached.
308 break; 294 break;
@@ -312,23 +298,23 @@ void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
312 } else { 298 } else {
313 // Switch page type to uncached if now uncached 299 // Switch page type to uncached if now uncached
314 switch (page_type) { 300 switch (page_type) {
315 case PageType::Unmapped: 301 case Common::PageType::Unmapped:
316 // It is not necessary for a process to have this region mapped into its address 302 // It is not necessary for a process to have this region mapped into its address
317 // space, for example, a system module need not have a VRAM mapping. 303 // space, for example, a system module need not have a VRAM mapping.
318 break; 304 break;
319 case PageType::Memory: 305 case Common::PageType::Memory:
320 // There can be more than one GPU region mapped per CPU region, so it's common that 306 // There can be more than one GPU region mapped per CPU region, so it's common that
321 // this area is already unmarked as cached. 307 // this area is already unmarked as cached.
322 break; 308 break;
323 case PageType::RasterizerCachedMemory: { 309 case Common::PageType::RasterizerCachedMemory: {
324 u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK); 310 u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK);
325 if (pointer == nullptr) { 311 if (pointer == nullptr) {
326 // It's possible that this function has been called while updating the pagetable 312 // It's possible that this function has been called while updating the pagetable
327 // after unmapping a VMA. In that case the underlying VMA will no longer exist, 313 // after unmapping a VMA. In that case the underlying VMA will no longer exist,
328 // and we should just leave the pagetable entry blank. 314 // and we should just leave the pagetable entry blank.
329 page_type = PageType::Unmapped; 315 page_type = Common::PageType::Unmapped;
330 } else { 316 } else {
331 page_type = PageType::Memory; 317 page_type = Common::PageType::Memory;
332 current_page_table->pointers[vaddr >> PAGE_BITS] = pointer; 318 current_page_table->pointers[vaddr >> PAGE_BITS] = pointer;
333 } 319 }
334 break; 320 break;
@@ -370,21 +356,21 @@ void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_
370 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); 356 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
371 357
372 switch (page_table.attributes[page_index]) { 358 switch (page_table.attributes[page_index]) {
373 case PageType::Unmapped: { 359 case Common::PageType::Unmapped: {
374 LOG_ERROR(HW_Memory, 360 LOG_ERROR(HW_Memory,
375 "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", 361 "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
376 current_vaddr, src_addr, size); 362 current_vaddr, src_addr, size);
377 std::memset(dest_buffer, 0, copy_amount); 363 std::memset(dest_buffer, 0, copy_amount);
378 break; 364 break;
379 } 365 }
380 case PageType::Memory: { 366 case Common::PageType::Memory: {
381 DEBUG_ASSERT(page_table.pointers[page_index]); 367 DEBUG_ASSERT(page_table.pointers[page_index]);
382 368
383 const u8* src_ptr = page_table.pointers[page_index] + page_offset; 369 const u8* src_ptr = page_table.pointers[page_index] + page_offset;
384 std::memcpy(dest_buffer, src_ptr, copy_amount); 370 std::memcpy(dest_buffer, src_ptr, copy_amount);
385 break; 371 break;
386 } 372 }
387 case PageType::RasterizerCachedMemory: { 373 case Common::PageType::RasterizerCachedMemory: {
388 const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; 374 const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)};
389 Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount); 375 Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount);
390 std::memcpy(dest_buffer, host_ptr, copy_amount); 376 std::memcpy(dest_buffer, host_ptr, copy_amount);
@@ -434,20 +420,20 @@ void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const voi
434 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); 420 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
435 421
436 switch (page_table.attributes[page_index]) { 422 switch (page_table.attributes[page_index]) {
437 case PageType::Unmapped: { 423 case Common::PageType::Unmapped: {
438 LOG_ERROR(HW_Memory, 424 LOG_ERROR(HW_Memory,
439 "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", 425 "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
440 current_vaddr, dest_addr, size); 426 current_vaddr, dest_addr, size);
441 break; 427 break;
442 } 428 }
443 case PageType::Memory: { 429 case Common::PageType::Memory: {
444 DEBUG_ASSERT(page_table.pointers[page_index]); 430 DEBUG_ASSERT(page_table.pointers[page_index]);
445 431
446 u8* dest_ptr = page_table.pointers[page_index] + page_offset; 432 u8* dest_ptr = page_table.pointers[page_index] + page_offset;
447 std::memcpy(dest_ptr, src_buffer, copy_amount); 433 std::memcpy(dest_ptr, src_buffer, copy_amount);
448 break; 434 break;
449 } 435 }
450 case PageType::RasterizerCachedMemory: { 436 case Common::PageType::RasterizerCachedMemory: {
451 const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; 437 const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)};
452 Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount); 438 Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount);
453 std::memcpy(host_ptr, src_buffer, copy_amount); 439 std::memcpy(host_ptr, src_buffer, copy_amount);
@@ -480,20 +466,20 @@ void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std:
480 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); 466 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
481 467
482 switch (page_table.attributes[page_index]) { 468 switch (page_table.attributes[page_index]) {
483 case PageType::Unmapped: { 469 case Common::PageType::Unmapped: {
484 LOG_ERROR(HW_Memory, 470 LOG_ERROR(HW_Memory,
485 "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", 471 "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
486 current_vaddr, dest_addr, size); 472 current_vaddr, dest_addr, size);
487 break; 473 break;
488 } 474 }
489 case PageType::Memory: { 475 case Common::PageType::Memory: {
490 DEBUG_ASSERT(page_table.pointers[page_index]); 476 DEBUG_ASSERT(page_table.pointers[page_index]);
491 477
492 u8* dest_ptr = page_table.pointers[page_index] + page_offset; 478 u8* dest_ptr = page_table.pointers[page_index] + page_offset;
493 std::memset(dest_ptr, 0, copy_amount); 479 std::memset(dest_ptr, 0, copy_amount);
494 break; 480 break;
495 } 481 }
496 case PageType::RasterizerCachedMemory: { 482 case Common::PageType::RasterizerCachedMemory: {
497 const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; 483 const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)};
498 Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount); 484 Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount);
499 std::memset(host_ptr, 0, copy_amount); 485 std::memset(host_ptr, 0, copy_amount);
@@ -522,20 +508,20 @@ void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr,
522 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); 508 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
523 509
524 switch (page_table.attributes[page_index]) { 510 switch (page_table.attributes[page_index]) {
525 case PageType::Unmapped: { 511 case Common::PageType::Unmapped: {
526 LOG_ERROR(HW_Memory, 512 LOG_ERROR(HW_Memory,
527 "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", 513 "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
528 current_vaddr, src_addr, size); 514 current_vaddr, src_addr, size);
529 ZeroBlock(process, dest_addr, copy_amount); 515 ZeroBlock(process, dest_addr, copy_amount);
530 break; 516 break;
531 } 517 }
532 case PageType::Memory: { 518 case Common::PageType::Memory: {
533 DEBUG_ASSERT(page_table.pointers[page_index]); 519 DEBUG_ASSERT(page_table.pointers[page_index]);
534 const u8* src_ptr = page_table.pointers[page_index] + page_offset; 520 const u8* src_ptr = page_table.pointers[page_index] + page_offset;
535 WriteBlock(process, dest_addr, src_ptr, copy_amount); 521 WriteBlock(process, dest_addr, src_ptr, copy_amount);
536 break; 522 break;
537 } 523 }
538 case PageType::RasterizerCachedMemory: { 524 case Common::PageType::RasterizerCachedMemory: {
539 const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; 525 const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)};
540 Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount); 526 Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount);
541 WriteBlock(process, dest_addr, host_ptr, copy_amount); 527 WriteBlock(process, dest_addr, host_ptr, copy_amount);
diff --git a/src/core/memory.h b/src/core/memory.h
index c2c6643ee..3f60d868c 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -10,7 +10,10 @@
10#include <vector> 10#include <vector>
11#include <boost/icl/interval_map.hpp> 11#include <boost/icl/interval_map.hpp>
12#include "common/common_types.h" 12#include "common/common_types.h"
13#include "core/memory_hook.h" 13
14namespace Common {
15struct PageTable;
16}
14 17
15namespace Kernel { 18namespace Kernel {
16class Process; 19class Process;
@@ -26,71 +29,6 @@ constexpr std::size_t PAGE_BITS = 12;
26constexpr u64 PAGE_SIZE = 1ULL << PAGE_BITS; 29constexpr u64 PAGE_SIZE = 1ULL << PAGE_BITS;
27constexpr u64 PAGE_MASK = PAGE_SIZE - 1; 30constexpr u64 PAGE_MASK = PAGE_SIZE - 1;
28 31
29enum class PageType : u8 {
30 /// Page is unmapped and should cause an access error.
31 Unmapped,
32 /// Page is mapped to regular memory. This is the only type you can get pointers to.
33 Memory,
34 /// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and
35 /// invalidation
36 RasterizerCachedMemory,
37 /// Page is mapped to a I/O region. Writing and reading to this page is handled by functions.
38 Special,
39};
40
41struct SpecialRegion {
42 enum class Type {
43 DebugHook,
44 IODevice,
45 } type;
46
47 MemoryHookPointer handler;
48
49 bool operator<(const SpecialRegion& other) const {
50 return std::tie(type, handler) < std::tie(other.type, other.handler);
51 }
52
53 bool operator==(const SpecialRegion& other) const {
54 return std::tie(type, handler) == std::tie(other.type, other.handler);
55 }
56};
57
58/**
59 * A (reasonably) fast way of allowing switchable and remappable process address spaces. It loosely
60 * mimics the way a real CPU page table works.
61 */
62struct PageTable {
63 explicit PageTable();
64 explicit PageTable(std::size_t address_space_width_in_bits);
65 ~PageTable();
66
67 /**
68 * Resizes the page table to be able to accomodate enough pages within
69 * a given address space.
70 *
71 * @param address_space_width_in_bits The address size width in bits.
72 */
73 void Resize(std::size_t address_space_width_in_bits);
74
75 /**
76 * Vector of memory pointers backing each page. An entry can only be non-null if the
77 * corresponding entry in the `attributes` vector is of type `Memory`.
78 */
79 std::vector<u8*> pointers;
80
81 /**
82 * Contains MMIO handlers that back memory regions whose entries in the `attribute` vector is
83 * of type `Special`.
84 */
85 boost::icl::interval_map<VAddr, std::set<SpecialRegion>> special_regions;
86
87 /**
88 * Vector of fine grained page attributes. If it is set to any value other than `Memory`, then
89 * the corresponding entry in `pointers` MUST be set to null.
90 */
91 std::vector<PageType> attributes;
92};
93
94/// Virtual user-space memory regions 32/// Virtual user-space memory regions
95enum : VAddr { 33enum : VAddr {
96 /// Read-only page containing kernel and system configuration values. 34 /// Read-only page containing kernel and system configuration values.
@@ -116,8 +54,8 @@ enum : VAddr {
116}; 54};
117 55
118/// Currently active page table 56/// Currently active page table
119void SetCurrentPageTable(PageTable* page_table); 57void SetCurrentPageTable(Common::PageTable* page_table);
120PageTable* GetCurrentPageTable(); 58Common::PageTable* GetCurrentPageTable();
121 59
122/// Determines if the given VAddr is valid for the specified process. 60/// Determines if the given VAddr is valid for the specified process.
123bool IsValidVirtualAddress(const Kernel::Process& process, VAddr vaddr); 61bool IsValidVirtualAddress(const Kernel::Process& process, VAddr vaddr);
diff --git a/src/core/memory_setup.h b/src/core/memory_setup.h
index 9a1a4f4be..5225ee8e2 100644
--- a/src/core/memory_setup.h
+++ b/src/core/memory_setup.h
@@ -5,7 +5,11 @@
5#pragma once 5#pragma once
6 6
7#include "common/common_types.h" 7#include "common/common_types.h"
8#include "core/memory_hook.h" 8#include "common/memory_hook.h"
9
10namespace Common {
11struct PageTable;
12}
9 13
10namespace Memory { 14namespace Memory {
11 15
@@ -17,7 +21,7 @@ namespace Memory {
17 * @param size The amount of bytes to map. Must be page-aligned. 21 * @param size The amount of bytes to map. Must be page-aligned.
18 * @param target Buffer with the memory backing the mapping. Must be of length at least `size`. 22 * @param target Buffer with the memory backing the mapping. Must be of length at least `size`.
19 */ 23 */
20void MapMemoryRegion(PageTable& page_table, VAddr base, u64 size, u8* target); 24void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target);
21 25
22/** 26/**
23 * Maps a region of the emulated process address space as a IO region. 27 * Maps a region of the emulated process address space as a IO region.
@@ -26,11 +30,14 @@ void MapMemoryRegion(PageTable& page_table, VAddr base, u64 size, u8* target);
26 * @param size The amount of bytes to map. Must be page-aligned. 30 * @param size The amount of bytes to map. Must be page-aligned.
27 * @param mmio_handler The handler that backs the mapping. 31 * @param mmio_handler The handler that backs the mapping.
28 */ 32 */
29void MapIoRegion(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer mmio_handler); 33void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size,
34 Common::MemoryHookPointer mmio_handler);
30 35
31void UnmapRegion(PageTable& page_table, VAddr base, u64 size); 36void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size);
32 37
33void AddDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook); 38void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
34void RemoveDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook); 39 Common::MemoryHookPointer hook);
40void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
41 Common::MemoryHookPointer hook);
35 42
36} // namespace Memory 43} // namespace Memory
diff --git a/src/tests/core/arm/arm_test_common.cpp b/src/tests/core/arm/arm_test_common.cpp
index 6fe56833d..3e1a735c3 100644
--- a/src/tests/core/arm/arm_test_common.cpp
+++ b/src/tests/core/arm/arm_test_common.cpp
@@ -4,6 +4,7 @@
4 4
5#include <algorithm> 5#include <algorithm>
6 6
7#include "common/page_table.h"
7#include "core/core.h" 8#include "core/core.h"
8#include "core/hle/kernel/process.h" 9#include "core/hle/kernel/process.h"
9#include "core/memory.h" 10#include "core/memory.h"
@@ -22,7 +23,7 @@ TestEnvironment::TestEnvironment(bool mutable_memory_)
22 std::fill(page_table->pointers.begin(), page_table->pointers.end(), nullptr); 23 std::fill(page_table->pointers.begin(), page_table->pointers.end(), nullptr);
23 page_table->special_regions.clear(); 24 page_table->special_regions.clear();
24 std::fill(page_table->attributes.begin(), page_table->attributes.end(), 25 std::fill(page_table->attributes.begin(), page_table->attributes.end(),
25 Memory::PageType::Unmapped); 26 Common::PageType::Unmapped);
26 27
27 Memory::MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory); 28 Memory::MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory);
28 Memory::MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory); 29 Memory::MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory);
diff --git a/src/tests/core/arm/arm_test_common.h b/src/tests/core/arm/arm_test_common.h
index 0b7539601..d145dbfcc 100644
--- a/src/tests/core/arm/arm_test_common.h
+++ b/src/tests/core/arm/arm_test_common.h
@@ -9,10 +9,10 @@
9#include <vector> 9#include <vector>
10 10
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "common/memory_hook.h"
12#include "core/hle/kernel/kernel.h" 13#include "core/hle/kernel/kernel.h"
13#include "core/memory_hook.h"
14 14
15namespace Memory { 15namespace Common {
16struct PageTable; 16struct PageTable;
17} 17}
18 18
@@ -58,7 +58,7 @@ public:
58 58
59private: 59private:
60 friend struct TestMemory; 60 friend struct TestMemory;
61 struct TestMemory final : Memory::MemoryHook { 61 struct TestMemory final : Common::MemoryHook {
62 explicit TestMemory(TestEnvironment* env_) : env(env_) {} 62 explicit TestMemory(TestEnvironment* env_) : env(env_) {}
63 TestEnvironment* env; 63 TestEnvironment* env;
64 64
@@ -86,7 +86,7 @@ private:
86 bool mutable_memory; 86 bool mutable_memory;
87 std::shared_ptr<TestMemory> test_memory; 87 std::shared_ptr<TestMemory> test_memory;
88 std::vector<WriteRecord> write_records; 88 std::vector<WriteRecord> write_records;
89 Memory::PageTable* page_table = nullptr; 89 Common::PageTable* page_table = nullptr;
90 Kernel::KernelCore kernel; 90 Kernel::KernelCore kernel;
91}; 91};
92 92