summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Yuri Kunde Schlesner2015-05-21 00:37:07 -0300
committerGravatar Yuri Kunde Schlesner2015-05-27 03:24:12 -0300
commit0a60aa75c2b03b8ed6752e5c64462bf86c52fcfc (patch)
tree3113ce6d149d7adf9ab5c1eb189102869760e55a
parentMerge pull request #826 from lioncash/tables (diff)
downloadyuzu-0a60aa75c2b03b8ed6752e5c64462bf86c52fcfc.tar.gz
yuzu-0a60aa75c2b03b8ed6752e5c64462bf86c52fcfc.tar.xz
yuzu-0a60aa75c2b03b8ed6752e5c64462bf86c52fcfc.zip
Kernel: Add VMManager to manage process address spaces
This enables more dynamic management of the process address space, compared to just directly configuring the page table for major areas. This will serve as the foundation upon which the rest of the Kernel memory management functions will be built.
Diffstat (limited to '')
-rw-r--r--src/core/CMakeLists.txt2
-rw-r--r--src/core/hle/kernel/vm_manager.cpp245
-rw-r--r--src/core/hle/kernel/vm_manager.h200
-rw-r--r--src/core/mem_map.cpp42
-rw-r--r--src/core/memory.cpp12
-rw-r--r--src/core/memory_setup.h7
6 files changed, 492 insertions, 16 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 42b4be938..bbc285168 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -36,6 +36,7 @@ set(SRCS
36 hle/kernel/shared_memory.cpp 36 hle/kernel/shared_memory.cpp
37 hle/kernel/thread.cpp 37 hle/kernel/thread.cpp
38 hle/kernel/timer.cpp 38 hle/kernel/timer.cpp
39 hle/kernel/vm_manager.cpp
39 hle/service/ac_u.cpp 40 hle/service/ac_u.cpp
40 hle/service/act_u.cpp 41 hle/service/act_u.cpp
41 hle/service/am_app.cpp 42 hle/service/am_app.cpp
@@ -147,6 +148,7 @@ set(HEADERS
147 hle/kernel/shared_memory.h 148 hle/kernel/shared_memory.h
148 hle/kernel/thread.h 149 hle/kernel/thread.h
149 hle/kernel/timer.h 150 hle/kernel/timer.h
151 hle/kernel/vm_manager.h
150 hle/result.h 152 hle/result.h
151 hle/service/ac_u.h 153 hle/service/ac_u.h
152 hle/service/act_u.h 154 hle/service/act_u.h
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
new file mode 100644
index 000000000..b2dd21542
--- /dev/null
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -0,0 +1,245 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6
7#include "core/hle/kernel/vm_manager.h"
8#include "core/memory_setup.h"
9
10namespace Kernel {
11
12bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
13 ASSERT(base + size == next.base);
14 if (permissions != next.permissions ||
15 meminfo_state != next.meminfo_state ||
16 type != next.type) {
17 return false;
18 }
19 if (type == VMAType::AllocatedMemoryBlock &&
20 (backing_block != next.backing_block || offset + size != next.offset)) {
21 return false;
22 }
23 if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) {
24 return false;
25 }
26 if (type == VMAType::MMIO && paddr + size != next.paddr) {
27 return false;
28 }
29 return true;
30}
31
32VMManager::VMManager() {
33 Reset();
34}
35
36void VMManager::Reset() {
37 vma_map.clear();
38
39 // Initialize the map with a single free region covering the entire managed space.
40 VirtualMemoryArea initial_vma;
41 initial_vma.size = MAX_ADDRESS;
42 vma_map.emplace(initial_vma.base, initial_vma);
43
44 UpdatePageTableForVMA(initial_vma);
45}
46
47VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
48 return std::prev(vma_map.upper_bound(target));
49}
50
51ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
52 std::shared_ptr<std::vector<u8>> block, u32 offset, u32 size, MemoryState state) {
53 ASSERT(block != nullptr);
54 ASSERT(offset + size <= block->size());
55
56 // This is the appropriately sized VMA that will turn into our allocation.
57 CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
58 VirtualMemoryArea& final_vma = vma_handle->second;
59 ASSERT(final_vma.size == size);
60
61 final_vma.type = VMAType::AllocatedMemoryBlock;
62 final_vma.permissions = VMAPermission::ReadWrite;
63 final_vma.meminfo_state = state;
64 final_vma.backing_block = block;
65 final_vma.offset = offset;
66 UpdatePageTableForVMA(final_vma);
67
68 return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
69}
70
71ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8 * memory, u32 size, MemoryState state) {
72 ASSERT(memory != nullptr);
73
74 // This is the appropriately sized VMA that will turn into our allocation.
75 CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
76 VirtualMemoryArea& final_vma = vma_handle->second;
77 ASSERT(final_vma.size == size);
78
79 final_vma.type = VMAType::BackingMemory;
80 final_vma.permissions = VMAPermission::ReadWrite;
81 final_vma.meminfo_state = state;
82 final_vma.backing_memory = memory;
83 UpdatePageTableForVMA(final_vma);
84
85 return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
86}
87
88ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u32 size, MemoryState state) {
89 // This is the appropriately sized VMA that will turn into our allocation.
90 CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
91 VirtualMemoryArea& final_vma = vma_handle->second;
92 ASSERT(final_vma.size == size);
93
94 final_vma.type = VMAType::MMIO;
95 final_vma.permissions = VMAPermission::ReadWrite;
96 final_vma.meminfo_state = state;
97 final_vma.paddr = paddr;
98 UpdatePageTableForVMA(final_vma);
99
100 return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
101}
102
103void VMManager::Unmap(VMAHandle vma_handle) {
104 VMAIter iter = StripIterConstness(vma_handle);
105
106 VirtualMemoryArea& vma = iter->second;
107 vma.type = VMAType::Free;
108 vma.permissions = VMAPermission::None;
109 vma.meminfo_state = MemoryState::Free;
110
111 vma.backing_block = nullptr;
112 vma.offset = 0;
113 vma.backing_memory = nullptr;
114 vma.paddr = 0;
115
116 UpdatePageTableForVMA(vma);
117
118 MergeAdjacent(iter);
119}
120
121void VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) {
122 VMAIter iter = StripIterConstness(vma_handle);
123
124 VirtualMemoryArea& vma = iter->second;
125 vma.permissions = new_perms;
126 UpdatePageTableForVMA(vma);
127
128 MergeAdjacent(iter);
129}
130
131VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle & iter) {
132 // This uses a neat C++ trick to convert a const_iterator to a regular iterator, given
133 // non-const access to its container.
134 return vma_map.erase(iter, iter); // Erases an empty range of elements
135}
136
137ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) {
138 ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: %8X", size);
139 ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: %08X", base);
140
141 VMAIter vma_handle = StripIterConstness(FindVMA(base));
142 if (vma_handle == vma_map.end()) {
143 // Target address is outside the range managed by the kernel
144 return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS,
145 ErrorSummary::InvalidArgument, ErrorLevel::Usage); // 0xE0E01BF5
146 }
147
148 VirtualMemoryArea& vma = vma_handle->second;
149 if (vma.type != VMAType::Free) {
150 // Region is already allocated
151 return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS,
152 ErrorSummary::InvalidState, ErrorLevel::Usage); // 0xE0A01BF5
153 }
154
155 u32 start_in_vma = base - vma.base;
156 u32 end_in_vma = start_in_vma + size;
157
158 if (end_in_vma > vma.size) {
159 // Requested allocation doesn't fit inside VMA
160 return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS,
161 ErrorSummary::InvalidState, ErrorLevel::Usage); // 0xE0A01BF5
162 }
163
164 if (end_in_vma != vma.size) {
165 // Split VMA at the end of the allocated region
166 SplitVMA(vma_handle, end_in_vma);
167 }
168 if (start_in_vma != 0) {
169 // Split VMA at the start of the allocated region
170 vma_handle = SplitVMA(vma_handle, start_in_vma);
171 }
172
173 return MakeResult<VMAIter>(vma_handle);
174}
175
176VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u32 offset_in_vma) {
177 VirtualMemoryArea& old_vma = vma_handle->second;
178 VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA
179
180 // For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably
181 // a bug. This restriction might be removed later.
182 ASSERT(offset_in_vma < old_vma.size);
183 ASSERT(offset_in_vma > 0);
184
185 old_vma.size = offset_in_vma;
186 new_vma.base += offset_in_vma;
187 new_vma.size -= offset_in_vma;
188
189 switch (new_vma.type) {
190 case VMAType::Free:
191 break;
192 case VMAType::AllocatedMemoryBlock:
193 new_vma.offset += offset_in_vma;
194 break;
195 case VMAType::BackingMemory:
196 new_vma.backing_memory += offset_in_vma;
197 break;
198 case VMAType::MMIO:
199 new_vma.paddr += offset_in_vma;
200 break;
201 }
202
203 ASSERT(old_vma.CanBeMergedWith(new_vma));
204
205 return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma);
206}
207
208VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) {
209 VMAIter next_vma = std::next(iter);
210 if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) {
211 iter->second.size += next_vma->second.size;
212 vma_map.erase(next_vma);
213 }
214
215 if (iter != vma_map.begin()) {
216 VMAIter prev_vma = std::prev(iter);
217 if (prev_vma->second.CanBeMergedWith(iter->second)) {
218 prev_vma->second.size += iter->second.size;
219 vma_map.erase(iter);
220 iter = prev_vma;
221 }
222 }
223
224 return iter;
225}
226
227void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
228 switch (vma.type) {
229 case VMAType::Free:
230 Memory::UnmapRegion(vma.base, vma.size);
231 break;
232 case VMAType::AllocatedMemoryBlock:
233 Memory::MapMemoryRegion(vma.base, vma.size, vma.backing_block->data() + vma.offset);
234 break;
235 case VMAType::BackingMemory:
236 Memory::MapMemoryRegion(vma.base, vma.size, vma.backing_memory);
237 break;
238 case VMAType::MMIO:
239 // TODO(yuriks): Add support for MMIO handlers.
240 Memory::MapIoRegion(vma.base, vma.size);
241 break;
242 }
243}
244
245}
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
new file mode 100644
index 000000000..22b724603
--- /dev/null
+++ b/src/core/hle/kernel/vm_manager.h
@@ -0,0 +1,200 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <map>
8#include <memory>
9#include <string>
10#include <vector>
11
12#include "common/common_types.h"
13
14#include "core/hle/result.h"
15
16namespace Kernel {
17
18enum class VMAType : u8 {
19 /// VMA represents an unmapped region of the address space.
20 Free,
21 /// VMA is backed by a ref-counted allocate memory block.
22 AllocatedMemoryBlock,
23 /// VMA is backed by a raw, unmanaged pointer.
24 BackingMemory,
25 /// VMA is mapped to MMIO registers at a fixed PAddr.
26 MMIO,
27 // TODO(yuriks): Implement MemoryAlias to support MAP/UNMAP
28};
29
30/// Permissions for mapped memory blocks
31enum class VMAPermission : u8 {
32 None = 0,
33 Read = 1,
34 Write = 2,
35 Execute = 4,
36
37 ReadWrite = Read | Write,
38 ReadExecute = Read | Execute,
39 WriteExecute = Write | Execute,
40 ReadWriteExecute = Read | Write | Execute,
41};
42
43/// Set of values returned in MemoryInfo.state by svcQueryMemory.
44enum class MemoryState : u8 {
45 Free = 0,
46 Reserved = 1,
47 IO = 2,
48 Static = 3,
49 Code = 4,
50 Private = 5,
51 Shared = 6,
52 Continuous = 7,
53 Aliased = 8,
54 Alias = 9,
55 AliasCode = 10,
56 Locked = 11,
57};
58
59/**
60 * Represents a VMA in an address space. A VMA is a contiguous region of virtual addressing space
61 * with homogeneous attributes across its extents. In this particular implementation each VMA is
62 * also backed by a single host memory allocation.
63 */
64struct VirtualMemoryArea {
65 /// Virtual base address of the region.
66 VAddr base = 0;
67 /// Size of the region.
68 u32 size = 0;
69
70 VMAType type = VMAType::Free;
71 VMAPermission permissions = VMAPermission::None;
72 /// Tag returned by svcQueryMemory. Not otherwise used.
73 MemoryState meminfo_state = MemoryState::Free;
74
75 // Settings for type = AllocatedMemoryBlock
76 /// Memory block backing this VMA.
77 std::shared_ptr<std::vector<u8>> backing_block = nullptr;
78 /// Offset into the backing_memory the mapping starts from.
79 u32 offset = 0;
80
81 // Settings for type = BackingMemory
82 /// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed.
83 u8* backing_memory = nullptr;
84
85 // Settings for type = MMIO
86 /// Physical address of the register area this VMA maps to.
87 PAddr paddr = 0;
88
89 /// Tests if this area can be merged to the right with `next`.
90 bool CanBeMergedWith(const VirtualMemoryArea& next) const;
91};
92
93/**
94 * Manages a process' virtual addressing space. This class maintains a list of allocated and free
95 * regions in the address space, along with their attributes, and allows kernel clients to
96 * manipulate it, adjusting the page table to match.
97 *
98 * This is similar in idea and purpose to the VM manager present in operating system kernels, with
99 * the main difference being that it doesn't have to support swapping or memory mapping of files.
100 * The implementation is also simplified by not having to allocate page frames. See these articles
101 * about the Linux kernel for an explantion of the concept and implementation:
102 * - http://duartes.org/gustavo/blog/post/how-the-kernel-manages-your-memory/
103 * - http://duartes.org/gustavo/blog/post/page-cache-the-affair-between-memory-and-files/
104 */
105class VMManager {
106 // TODO(yuriks): Make page tables switchable to support multiple VMManagers
107public:
108 /**
109 * The maximum amount of address space managed by the kernel. Addresses above this are never used.
110 * @note This is the limit used by the New 3DS kernel. Old 3DS used 0x20000000.
111 */
112 static const u32 MAX_ADDRESS = 0x40000000;
113
114 /**
115 * A map covering the entirety of the managed address space, keyed by the `base` field of each
116 * VMA. It must always be modified by splitting or merging VMAs, so that the invariant
117 * `elem.base + elem.size == next.base` is preserved, and mergeable regions must always be
118 * merged when possible so that no two similar and adjacent regions exist that have not been
119 * merged.
120 */
121 std::map<VAddr, VirtualMemoryArea> vma_map;
122 using VMAHandle = decltype(vma_map)::const_iterator;
123
124 VMManager();
125
126 /// Clears the address space map, re-initializing with a single free area.
127 void Reset();
128
129 /// Finds the VMA in which the given address is included in, or `vma_map.end()`.
130 VMAHandle FindVMA(VAddr target) const;
131
132 // TODO(yuriks): Should these functions actually return the handle?
133
134 /**
135 * Maps part of a ref-counted block of memory at a given address.
136 *
137 * @param target The guest address to start the mapping at.
138 * @param block The block to be mapped.
139 * @param offset Offset into `block` to map from.
140 * @param size Size of the mapping.
141 * @param state MemoryState tag to attach to the VMA.
142 */
143 ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block,
144 u32 offset, u32 size, MemoryState state);
145
146 /**
147 * Maps an unmanaged host memory pointer at a given address.
148 *
149 * @param target The guest address to start the mapping at.
150 * @param memory The memory to be mapped.
151 * @param size Size of the mapping.
152 * @param state MemoryState tag to attach to the VMA.
153 */
154 ResultVal<VMAHandle> MapBackingMemory(VAddr target, u8* memory, u32 size, MemoryState state);
155
156 /**
157 * Maps a memory-mapped IO region at a given address.
158 *
159 * @param target The guest address to start the mapping at.
160 * @param paddr The physical address where the registers are present.
161 * @param size Size of the mapping.
162 * @param state MemoryState tag to attach to the VMA.
163 */
164 ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u32 size, MemoryState state);
165
166 /// Unmaps the given VMA.
167 void Unmap(VMAHandle vma);
168
169 /// Changes the permissions of the given VMA.
170 void Reprotect(VMAHandle vma, VMAPermission new_perms);
171
172private:
173 using VMAIter = decltype(vma_map)::iterator;
174
175 /// Converts a VMAHandle to a mutable VMAIter.
176 VMAIter StripIterConstness(const VMAHandle& iter);
177
178 /**
179 * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing
180 * the appropriate error checking.
181 */
182 ResultVal<VMAIter> CarveVMA(VAddr base, u32 size);
183
184 /**
185 * Splits a VMA in two, at the specified offset.
186 * @returns the right side of the split, with the original iterator becoming the left side.
187 */
188 VMAIter SplitVMA(VMAIter vma, u32 offset_in_vma);
189
190 /**
191 * Checks for and merges the specified VMA with adjacent ones if possible.
192 * @returns the merged VMA or the original if no merging was possible.
193 */
194 VMAIter MergeAdjacent(VMAIter vma);
195
196 /// Updates the pages corresponding to this VMA so they match the VMA's attributes.
197 void UpdatePageTableForVMA(const VirtualMemoryArea& vma);
198};
199
200}
diff --git a/src/core/mem_map.cpp b/src/core/mem_map.cpp
index 5ecec9566..66d95ed27 100644
--- a/src/core/mem_map.cpp
+++ b/src/core/mem_map.cpp
@@ -8,6 +8,10 @@
8#include "common/logging/log.h" 8#include "common/logging/log.h"
9 9
10#include "core/hle/config_mem.h" 10#include "core/hle/config_mem.h"
11#include "core/hle/kernel/kernel.h"
12#include "core/hle/kernel/shared_memory.h"
13#include "core/hle/kernel/vm_manager.h"
14#include "core/hle/result.h"
11#include "core/hle/shared_page.h" 15#include "core/hle/shared_page.h"
12#include "core/mem_map.h" 16#include "core/mem_map.h"
13#include "core/memory.h" 17#include "core/memory.h"
@@ -31,17 +35,18 @@ struct MemoryArea {
31 u8** ptr; 35 u8** ptr;
32 u32 base; 36 u32 base;
33 u32 size; 37 u32 size;
38 const char* name;
34}; 39};
35 40
36// We don't declare the IO regions in here since its handled by other means. 41// We don't declare the IO regions in here since its handled by other means.
37static MemoryArea memory_areas[] = { 42static MemoryArea memory_areas[] = {
38 {&g_exefs_code, PROCESS_IMAGE_VADDR, PROCESS_IMAGE_MAX_SIZE}, 43 {&g_exefs_code, PROCESS_IMAGE_VADDR, PROCESS_IMAGE_MAX_SIZE, "Process Image"},
39 {&g_heap, HEAP_VADDR, HEAP_SIZE }, 44 {&g_heap, HEAP_VADDR, HEAP_SIZE, "Heap"},
40 {&g_shared_mem, SHARED_MEMORY_VADDR, SHARED_MEMORY_SIZE }, 45 {&g_shared_mem, SHARED_MEMORY_VADDR, SHARED_MEMORY_SIZE, "Shared Memory"},
41 {&g_heap_linear, LINEAR_HEAP_VADDR, LINEAR_HEAP_SIZE }, 46 {&g_heap_linear, LINEAR_HEAP_VADDR, LINEAR_HEAP_SIZE, "Linear Heap"},
42 {&g_vram, VRAM_VADDR, VRAM_SIZE }, 47 {&g_vram, VRAM_VADDR, VRAM_SIZE, "VRAM"},
43 {&g_dsp_mem, DSP_RAM_VADDR, DSP_RAM_SIZE }, 48 {&g_dsp_mem, DSP_RAM_VADDR, DSP_RAM_SIZE, "DSP RAM"},
44 {&g_tls_mem, TLS_AREA_VADDR, TLS_AREA_SIZE }, 49 {&g_tls_mem, TLS_AREA_VADDR, TLS_AREA_SIZE, "TLS Area"},
45}; 50};
46 51
47/// Represents a block of memory mapped by ControlMemory/MapMemoryBlock 52/// Represents a block of memory mapped by ControlMemory/MapMemoryBlock
@@ -135,15 +140,27 @@ VAddr PhysicalToVirtualAddress(const PAddr addr) {
135 return addr | 0x80000000; 140 return addr | 0x80000000;
136} 141}
137 142
143// TODO(yuriks): Move this into Process
144static Kernel::VMManager address_space;
145
138void Init() { 146void Init() {
147 using namespace Kernel;
148
139 InitMemoryMap(); 149 InitMemoryMap();
140 150
141 for (MemoryArea& area : memory_areas) { 151 for (MemoryArea& area : memory_areas) {
142 *area.ptr = new u8[area.size]; 152 auto block = std::make_shared<std::vector<u8>>(area.size);
143 MapMemoryRegion(area.base, area.size, *area.ptr); 153 *area.ptr = block->data(); // TODO(yuriks): Remove
154 address_space.MapMemoryBlock(area.base, std::move(block), 0, area.size, MemoryState::Private).Unwrap();
144 } 155 }
145 MapMemoryRegion(CONFIG_MEMORY_VADDR, CONFIG_MEMORY_SIZE, (u8*)&ConfigMem::config_mem); 156
146 MapMemoryRegion(SHARED_PAGE_VADDR, SHARED_PAGE_SIZE, (u8*)&SharedPage::shared_page); 157 auto cfg_mem_vma = address_space.MapBackingMemory(CONFIG_MEMORY_VADDR,
158 (u8*)&ConfigMem::config_mem, CONFIG_MEMORY_SIZE, MemoryState::Shared).MoveFrom();
159 address_space.Reprotect(cfg_mem_vma, VMAPermission::Read);
160
161 auto shared_page_vma = address_space.MapBackingMemory(SHARED_PAGE_VADDR,
162 (u8*)&SharedPage::shared_page, SHARED_PAGE_SIZE, MemoryState::Shared).MoveFrom();
163 address_space.Reprotect(shared_page_vma, VMAPermission::Read);
147 164
148 LOG_DEBUG(HW_Memory, "initialized OK, RAM at %p", g_heap); 165 LOG_DEBUG(HW_Memory, "initialized OK, RAM at %p", g_heap);
149} 166}
@@ -152,8 +169,9 @@ void Shutdown() {
152 heap_map.clear(); 169 heap_map.clear();
153 heap_linear_map.clear(); 170 heap_linear_map.clear();
154 171
172 address_space.Reset();
173
155 for (MemoryArea& area : memory_areas) { 174 for (MemoryArea& area : memory_areas) {
156 delete[] *area.ptr;
157 *area.ptr = nullptr; 175 *area.ptr = nullptr;
158 } 176 }
159 177
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 5d8069acd..28844a915 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -14,12 +14,10 @@
14#include "core/hw/hw.h" 14#include "core/hw/hw.h"
15#include "core/mem_map.h" 15#include "core/mem_map.h"
16#include "core/memory.h" 16#include "core/memory.h"
17#include "core/memory_setup.h"
17 18
18namespace Memory { 19namespace Memory {
19 20
20const u32 PAGE_MASK = PAGE_SIZE - 1;
21const int PAGE_BITS = 12;
22
23enum class PageType { 21enum class PageType {
24 /// Page is unmapped and should cause an access error. 22 /// Page is unmapped and should cause an access error.
25 Unmapped, 23 Unmapped,
@@ -64,7 +62,7 @@ static void MapPages(u32 base, u32 size, u8* memory, PageType type) {
64 while (base != end) { 62 while (base != end) {
65 ASSERT_MSG(base < PageTable::NUM_ENTRIES, "out of range mapping at %08X", base); 63 ASSERT_MSG(base < PageTable::NUM_ENTRIES, "out of range mapping at %08X", base);
66 64
67 if (current_page_table->attributes[base] != PageType::Unmapped) { 65 if (current_page_table->attributes[base] != PageType::Unmapped && type != PageType::Unmapped) {
68 LOG_ERROR(HW_Memory, "overlapping memory ranges at %08X", base * PAGE_SIZE); 66 LOG_ERROR(HW_Memory, "overlapping memory ranges at %08X", base * PAGE_SIZE);
69 } 67 }
70 current_page_table->attributes[base] = type; 68 current_page_table->attributes[base] = type;
@@ -92,6 +90,12 @@ void MapIoRegion(VAddr base, u32 size) {
92 MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); 90 MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special);
93} 91}
94 92
93void UnmapRegion(VAddr base, u32 size) {
94 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
95 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
96 MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
97}
98
95template <typename T> 99template <typename T>
96T Read(const VAddr vaddr) { 100T Read(const VAddr vaddr) {
97 const u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; 101 const u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
diff --git a/src/core/memory_setup.h b/src/core/memory_setup.h
index 46263495f..361bfc816 100644
--- a/src/core/memory_setup.h
+++ b/src/core/memory_setup.h
@@ -6,8 +6,13 @@
6 6
7#include "common/common_types.h" 7#include "common/common_types.h"
8 8
9#include "core/memory.h"
10
9namespace Memory { 11namespace Memory {
10 12
13const u32 PAGE_MASK = PAGE_SIZE - 1;
14const int PAGE_BITS = 12;
15
11void InitMemoryMap(); 16void InitMemoryMap();
12 17
13/** 18/**
@@ -26,4 +31,6 @@ void MapMemoryRegion(VAddr base, u32 size, u8* target);
26 */ 31 */
27void MapIoRegion(VAddr base, u32 size); 32void MapIoRegion(VAddr base, u32 size);
28 33
34void UnmapRegion(VAddr base, u32 size);
35
29} 36}