summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
authorGravatar Yuri Kunde Schlesner2015-05-21 00:37:07 -0300
committerGravatar Yuri Kunde Schlesner2015-05-27 03:24:12 -0300
commit0a60aa75c2b03b8ed6752e5c64462bf86c52fcfc (patch)
tree3113ce6d149d7adf9ab5c1eb189102869760e55a /src/core/hle/kernel
parentMerge pull request #826 from lioncash/tables (diff)
downloadyuzu-0a60aa75c2b03b8ed6752e5c64462bf86c52fcfc.tar.gz
yuzu-0a60aa75c2b03b8ed6752e5c64462bf86c52fcfc.tar.xz
yuzu-0a60aa75c2b03b8ed6752e5c64462bf86c52fcfc.zip
Kernel: Add VMManager to manage process address spaces
This enables more dynamic management of the process address space, compared to just directly configuring the page table for major areas. This will serve as the foundation upon which the rest of the Kernel memory management functions will be built.
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/vm_manager.cpp245
-rw-r--r--src/core/hle/kernel/vm_manager.h200
2 files changed, 445 insertions, 0 deletions
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
new file mode 100644
index 000000000..b2dd21542
--- /dev/null
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -0,0 +1,245 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6
7#include "core/hle/kernel/vm_manager.h"
8#include "core/memory_setup.h"
9
10namespace Kernel {
11
12bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
13 ASSERT(base + size == next.base);
14 if (permissions != next.permissions ||
15 meminfo_state != next.meminfo_state ||
16 type != next.type) {
17 return false;
18 }
19 if (type == VMAType::AllocatedMemoryBlock &&
20 (backing_block != next.backing_block || offset + size != next.offset)) {
21 return false;
22 }
23 if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) {
24 return false;
25 }
26 if (type == VMAType::MMIO && paddr + size != next.paddr) {
27 return false;
28 }
29 return true;
30}
31
32VMManager::VMManager() {
33 Reset();
34}
35
36void VMManager::Reset() {
37 vma_map.clear();
38
39 // Initialize the map with a single free region covering the entire managed space.
40 VirtualMemoryArea initial_vma;
41 initial_vma.size = MAX_ADDRESS;
42 vma_map.emplace(initial_vma.base, initial_vma);
43
44 UpdatePageTableForVMA(initial_vma);
45}
46
47VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
48 return std::prev(vma_map.upper_bound(target));
49}
50
51ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
52 std::shared_ptr<std::vector<u8>> block, u32 offset, u32 size, MemoryState state) {
53 ASSERT(block != nullptr);
54 ASSERT(offset + size <= block->size());
55
56 // This is the appropriately sized VMA that will turn into our allocation.
57 CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
58 VirtualMemoryArea& final_vma = vma_handle->second;
59 ASSERT(final_vma.size == size);
60
61 final_vma.type = VMAType::AllocatedMemoryBlock;
62 final_vma.permissions = VMAPermission::ReadWrite;
63 final_vma.meminfo_state = state;
64 final_vma.backing_block = block;
65 final_vma.offset = offset;
66 UpdatePageTableForVMA(final_vma);
67
68 return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
69}
70
71ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8 * memory, u32 size, MemoryState state) {
72 ASSERT(memory != nullptr);
73
74 // This is the appropriately sized VMA that will turn into our allocation.
75 CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
76 VirtualMemoryArea& final_vma = vma_handle->second;
77 ASSERT(final_vma.size == size);
78
79 final_vma.type = VMAType::BackingMemory;
80 final_vma.permissions = VMAPermission::ReadWrite;
81 final_vma.meminfo_state = state;
82 final_vma.backing_memory = memory;
83 UpdatePageTableForVMA(final_vma);
84
85 return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
86}
87
88ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u32 size, MemoryState state) {
89 // This is the appropriately sized VMA that will turn into our allocation.
90 CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
91 VirtualMemoryArea& final_vma = vma_handle->second;
92 ASSERT(final_vma.size == size);
93
94 final_vma.type = VMAType::MMIO;
95 final_vma.permissions = VMAPermission::ReadWrite;
96 final_vma.meminfo_state = state;
97 final_vma.paddr = paddr;
98 UpdatePageTableForVMA(final_vma);
99
100 return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
101}
102
103void VMManager::Unmap(VMAHandle vma_handle) {
104 VMAIter iter = StripIterConstness(vma_handle);
105
106 VirtualMemoryArea& vma = iter->second;
107 vma.type = VMAType::Free;
108 vma.permissions = VMAPermission::None;
109 vma.meminfo_state = MemoryState::Free;
110
111 vma.backing_block = nullptr;
112 vma.offset = 0;
113 vma.backing_memory = nullptr;
114 vma.paddr = 0;
115
116 UpdatePageTableForVMA(vma);
117
118 MergeAdjacent(iter);
119}
120
121void VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) {
122 VMAIter iter = StripIterConstness(vma_handle);
123
124 VirtualMemoryArea& vma = iter->second;
125 vma.permissions = new_perms;
126 UpdatePageTableForVMA(vma);
127
128 MergeAdjacent(iter);
129}
130
131VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle & iter) {
132 // This uses a neat C++ trick to convert a const_iterator to a regular iterator, given
133 // non-const access to its container.
134 return vma_map.erase(iter, iter); // Erases an empty range of elements
135}
136
137ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) {
138 ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: %8X", size);
139 ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: %08X", base);
140
141 VMAIter vma_handle = StripIterConstness(FindVMA(base));
142 if (vma_handle == vma_map.end()) {
143 // Target address is outside the range managed by the kernel
144 return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS,
145 ErrorSummary::InvalidArgument, ErrorLevel::Usage); // 0xE0E01BF5
146 }
147
148 VirtualMemoryArea& vma = vma_handle->second;
149 if (vma.type != VMAType::Free) {
150 // Region is already allocated
151 return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS,
152 ErrorSummary::InvalidState, ErrorLevel::Usage); // 0xE0A01BF5
153 }
154
155 u32 start_in_vma = base - vma.base;
156 u32 end_in_vma = start_in_vma + size;
157
158 if (end_in_vma > vma.size) {
159 // Requested allocation doesn't fit inside VMA
160 return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS,
161 ErrorSummary::InvalidState, ErrorLevel::Usage); // 0xE0A01BF5
162 }
163
164 if (end_in_vma != vma.size) {
165 // Split VMA at the end of the allocated region
166 SplitVMA(vma_handle, end_in_vma);
167 }
168 if (start_in_vma != 0) {
169 // Split VMA at the start of the allocated region
170 vma_handle = SplitVMA(vma_handle, start_in_vma);
171 }
172
173 return MakeResult<VMAIter>(vma_handle);
174}
175
176VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u32 offset_in_vma) {
177 VirtualMemoryArea& old_vma = vma_handle->second;
178 VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA
179
180 // For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably
181 // a bug. This restriction might be removed later.
182 ASSERT(offset_in_vma < old_vma.size);
183 ASSERT(offset_in_vma > 0);
184
185 old_vma.size = offset_in_vma;
186 new_vma.base += offset_in_vma;
187 new_vma.size -= offset_in_vma;
188
189 switch (new_vma.type) {
190 case VMAType::Free:
191 break;
192 case VMAType::AllocatedMemoryBlock:
193 new_vma.offset += offset_in_vma;
194 break;
195 case VMAType::BackingMemory:
196 new_vma.backing_memory += offset_in_vma;
197 break;
198 case VMAType::MMIO:
199 new_vma.paddr += offset_in_vma;
200 break;
201 }
202
203 ASSERT(old_vma.CanBeMergedWith(new_vma));
204
205 return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma);
206}
207
208VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) {
209 VMAIter next_vma = std::next(iter);
210 if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) {
211 iter->second.size += next_vma->second.size;
212 vma_map.erase(next_vma);
213 }
214
215 if (iter != vma_map.begin()) {
216 VMAIter prev_vma = std::prev(iter);
217 if (prev_vma->second.CanBeMergedWith(iter->second)) {
218 prev_vma->second.size += iter->second.size;
219 vma_map.erase(iter);
220 iter = prev_vma;
221 }
222 }
223
224 return iter;
225}
226
227void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
228 switch (vma.type) {
229 case VMAType::Free:
230 Memory::UnmapRegion(vma.base, vma.size);
231 break;
232 case VMAType::AllocatedMemoryBlock:
233 Memory::MapMemoryRegion(vma.base, vma.size, vma.backing_block->data() + vma.offset);
234 break;
235 case VMAType::BackingMemory:
236 Memory::MapMemoryRegion(vma.base, vma.size, vma.backing_memory);
237 break;
238 case VMAType::MMIO:
239 // TODO(yuriks): Add support for MMIO handlers.
240 Memory::MapIoRegion(vma.base, vma.size);
241 break;
242 }
243}
244
245}
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
new file mode 100644
index 000000000..22b724603
--- /dev/null
+++ b/src/core/hle/kernel/vm_manager.h
@@ -0,0 +1,200 @@
1// Copyright 2015 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <map>
8#include <memory>
9#include <string>
10#include <vector>
11
12#include "common/common_types.h"
13
14#include "core/hle/result.h"
15
16namespace Kernel {
17
18enum class VMAType : u8 {
19 /// VMA represents an unmapped region of the address space.
20 Free,
21 /// VMA is backed by a ref-counted allocate memory block.
22 AllocatedMemoryBlock,
23 /// VMA is backed by a raw, unmanaged pointer.
24 BackingMemory,
25 /// VMA is mapped to MMIO registers at a fixed PAddr.
26 MMIO,
27 // TODO(yuriks): Implement MemoryAlias to support MAP/UNMAP
28};
29
30/// Permissions for mapped memory blocks
31enum class VMAPermission : u8 {
32 None = 0,
33 Read = 1,
34 Write = 2,
35 Execute = 4,
36
37 ReadWrite = Read | Write,
38 ReadExecute = Read | Execute,
39 WriteExecute = Write | Execute,
40 ReadWriteExecute = Read | Write | Execute,
41};
42
43/// Set of values returned in MemoryInfo.state by svcQueryMemory.
44enum class MemoryState : u8 {
45 Free = 0,
46 Reserved = 1,
47 IO = 2,
48 Static = 3,
49 Code = 4,
50 Private = 5,
51 Shared = 6,
52 Continuous = 7,
53 Aliased = 8,
54 Alias = 9,
55 AliasCode = 10,
56 Locked = 11,
57};
58
59/**
60 * Represents a VMA in an address space. A VMA is a contiguous region of virtual addressing space
61 * with homogeneous attributes across its extents. In this particular implementation each VMA is
62 * also backed by a single host memory allocation.
63 */
64struct VirtualMemoryArea {
65 /// Virtual base address of the region.
66 VAddr base = 0;
67 /// Size of the region.
68 u32 size = 0;
69
70 VMAType type = VMAType::Free;
71 VMAPermission permissions = VMAPermission::None;
72 /// Tag returned by svcQueryMemory. Not otherwise used.
73 MemoryState meminfo_state = MemoryState::Free;
74
75 // Settings for type = AllocatedMemoryBlock
76 /// Memory block backing this VMA.
77 std::shared_ptr<std::vector<u8>> backing_block = nullptr;
78 /// Offset into the backing_memory the mapping starts from.
79 u32 offset = 0;
80
81 // Settings for type = BackingMemory
82 /// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed.
83 u8* backing_memory = nullptr;
84
85 // Settings for type = MMIO
86 /// Physical address of the register area this VMA maps to.
87 PAddr paddr = 0;
88
89 /// Tests if this area can be merged to the right with `next`.
90 bool CanBeMergedWith(const VirtualMemoryArea& next) const;
91};
92
93/**
94 * Manages a process' virtual addressing space. This class maintains a list of allocated and free
95 * regions in the address space, along with their attributes, and allows kernel clients to
96 * manipulate it, adjusting the page table to match.
97 *
98 * This is similar in idea and purpose to the VM manager present in operating system kernels, with
99 * the main difference being that it doesn't have to support swapping or memory mapping of files.
100 * The implementation is also simplified by not having to allocate page frames. See these articles
101 * about the Linux kernel for an explantion of the concept and implementation:
102 * - http://duartes.org/gustavo/blog/post/how-the-kernel-manages-your-memory/
103 * - http://duartes.org/gustavo/blog/post/page-cache-the-affair-between-memory-and-files/
104 */
105class VMManager {
106 // TODO(yuriks): Make page tables switchable to support multiple VMManagers
107public:
108 /**
109 * The maximum amount of address space managed by the kernel. Addresses above this are never used.
110 * @note This is the limit used by the New 3DS kernel. Old 3DS used 0x20000000.
111 */
112 static const u32 MAX_ADDRESS = 0x40000000;
113
114 /**
115 * A map covering the entirety of the managed address space, keyed by the `base` field of each
116 * VMA. It must always be modified by splitting or merging VMAs, so that the invariant
117 * `elem.base + elem.size == next.base` is preserved, and mergeable regions must always be
118 * merged when possible so that no two similar and adjacent regions exist that have not been
119 * merged.
120 */
121 std::map<VAddr, VirtualMemoryArea> vma_map;
122 using VMAHandle = decltype(vma_map)::const_iterator;
123
124 VMManager();
125
126 /// Clears the address space map, re-initializing with a single free area.
127 void Reset();
128
129 /// Finds the VMA in which the given address is included in, or `vma_map.end()`.
130 VMAHandle FindVMA(VAddr target) const;
131
132 // TODO(yuriks): Should these functions actually return the handle?
133
134 /**
135 * Maps part of a ref-counted block of memory at a given address.
136 *
137 * @param target The guest address to start the mapping at.
138 * @param block The block to be mapped.
139 * @param offset Offset into `block` to map from.
140 * @param size Size of the mapping.
141 * @param state MemoryState tag to attach to the VMA.
142 */
143 ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block,
144 u32 offset, u32 size, MemoryState state);
145
146 /**
147 * Maps an unmanaged host memory pointer at a given address.
148 *
149 * @param target The guest address to start the mapping at.
150 * @param memory The memory to be mapped.
151 * @param size Size of the mapping.
152 * @param state MemoryState tag to attach to the VMA.
153 */
154 ResultVal<VMAHandle> MapBackingMemory(VAddr target, u8* memory, u32 size, MemoryState state);
155
156 /**
157 * Maps a memory-mapped IO region at a given address.
158 *
159 * @param target The guest address to start the mapping at.
160 * @param paddr The physical address where the registers are present.
161 * @param size Size of the mapping.
162 * @param state MemoryState tag to attach to the VMA.
163 */
164 ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u32 size, MemoryState state);
165
166 /// Unmaps the given VMA.
167 void Unmap(VMAHandle vma);
168
169 /// Changes the permissions of the given VMA.
170 void Reprotect(VMAHandle vma, VMAPermission new_perms);
171
172private:
173 using VMAIter = decltype(vma_map)::iterator;
174
175 /// Converts a VMAHandle to a mutable VMAIter.
176 VMAIter StripIterConstness(const VMAHandle& iter);
177
178 /**
179 * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing
180 * the appropriate error checking.
181 */
182 ResultVal<VMAIter> CarveVMA(VAddr base, u32 size);
183
184 /**
185 * Splits a VMA in two, at the specified offset.
186 * @returns the right side of the split, with the original iterator becoming the left side.
187 */
188 VMAIter SplitVMA(VMAIter vma, u32 offset_in_vma);
189
190 /**
191 * Checks for and merges the specified VMA with adjacent ones if possible.
192 * @returns the merged VMA or the original if no merging was possible.
193 */
194 VMAIter MergeAdjacent(VMAIter vma);
195
196 /// Updates the pages corresponding to this VMA so they match the VMA's attributes.
197 void UpdatePageTableForVMA(const VirtualMemoryArea& vma);
198};
199
200}