summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/process.cpp
diff options
context:
space:
mode:
authorGravatar Yuri Kunde Schlesner2015-08-05 21:26:52 -0300
committerGravatar Yuri Kunde Schlesner2015-08-16 01:03:47 -0300
commit74d4bc0af1d2f22105bf3c00efcb85613d59cc19 (patch)
tree171c5d0508d99f9ef4dcba2a0e3543eb9bdfa1db /src/core/hle/kernel/process.cpp
parentHLE: Remove empty ConfigMem and SharedPage Shutdown functions (diff)
downloadyuzu-74d4bc0af1d2f22105bf3c00efcb85613d59cc19.tar.gz
yuzu-74d4bc0af1d2f22105bf3c00efcb85613d59cc19.tar.xz
yuzu-74d4bc0af1d2f22105bf3c00efcb85613d59cc19.zip
Kernel: Add more infrastructure to support different memory layouts
This adds some structures necessary to support multiple memory regions in the future. It also adds support for different system memory types and the new linear heap mapping at 0x30000000.
Diffstat (limited to 'src/core/hle/kernel/process.cpp')
-rw-r--r--src/core/hle/kernel/process.cpp44
1 files changed, 28 insertions, 16 deletions
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 2cd1cfc14..1f45e6cf8 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -96,7 +96,7 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
96 96
97 int minor = kernel_version & 0xFF; 97 int minor = kernel_version & 0xFF;
98 int major = (kernel_version >> 8) & 0xFF; 98 int major = (kernel_version >> 8) & 0xFF;
99 LOG_DEBUG(Loader, "ExHeader kernel version: %d.%d", major, minor); 99 LOG_INFO(Loader, "ExHeader kernel version: %d.%d", major, minor);
100 } else { 100 } else {
101 LOG_ERROR(Loader, "Unhandled kernel caps descriptor: 0x%08X", descriptor); 101 LOG_ERROR(Loader, "Unhandled kernel caps descriptor: 0x%08X", descriptor);
102 } 102 }
@@ -104,6 +104,8 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
104} 104}
105 105
106void Process::Run(s32 main_thread_priority, u32 stack_size) { 106void Process::Run(s32 main_thread_priority, u32 stack_size) {
107 memory_region = GetMemoryRegion(flags.memory_region);
108
107 auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions, MemoryState memory_state) { 109 auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions, MemoryState memory_state) {
108 auto vma = vm_manager.MapMemoryBlock(segment.addr, codeset->memory, 110 auto vma = vm_manager.MapMemoryBlock(segment.addr, codeset->memory,
109 segment.offset, segment.size, memory_state).Unwrap(); 111 segment.offset, segment.size, memory_state).Unwrap();
@@ -124,6 +126,15 @@ void Process::Run(s32 main_thread_priority, u32 stack_size) {
124 Kernel::SetupMainThread(codeset->entrypoint, main_thread_priority); 126 Kernel::SetupMainThread(codeset->entrypoint, main_thread_priority);
125} 127}
126 128
129VAddr Process::GetLinearHeapBase() const {
130 return (kernel_version < 0x22C ? Memory::LINEAR_HEAP_VADDR : Memory::NEW_LINEAR_HEAP_SIZE)
131 + memory_region->base;
132}
133
134VAddr Process::GetLinearHeapLimit() const {
135 return GetLinearHeapBase() + memory_region->size;
136}
137
127ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission perms) { 138ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission perms) {
128 if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END || target + size < target) { 139 if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END || target + size < target) {
129 return ERR_INVALID_ADDRESS; 140 return ERR_INVALID_ADDRESS;
@@ -166,19 +177,16 @@ ResultCode Process::HeapFree(VAddr target, u32 size) {
166} 177}
167 178
168ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission perms) { 179ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission perms) {
169 if (linear_heap_memory == nullptr) { 180 auto& linheap_memory = memory_region->linear_heap_memory;
170 // Initialize heap
171 linear_heap_memory = std::make_shared<std::vector<u8>>();
172 }
173 181
174 VAddr heap_end = Memory::LINEAR_HEAP_VADDR + (u32)linear_heap_memory->size(); 182 VAddr heap_end = GetLinearHeapBase() + (u32)linheap_memory->size();
175 // Games and homebrew only ever seem to pass 0 here (which lets the kernel decide the address), 183 // Games and homebrew only ever seem to pass 0 here (which lets the kernel decide the address),
176 // but explicit addresses are also accepted and respected. 184 // but explicit addresses are also accepted and respected.
177 if (target == 0) { 185 if (target == 0) {
178 target = heap_end; 186 target = heap_end;
179 } 187 }
180 188
181 if (target < Memory::LINEAR_HEAP_VADDR || target + size > Memory::LINEAR_HEAP_VADDR_END || 189 if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() ||
182 target > heap_end || target + size < target) { 190 target > heap_end || target + size < target) {
183 191
184 return ERR_INVALID_ADDRESS; 192 return ERR_INVALID_ADDRESS;
@@ -188,25 +196,29 @@ ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission p
188 // end. It's possible to free gaps in the middle of the heap and then reallocate them later, 196 // end. It's possible to free gaps in the middle of the heap and then reallocate them later,
189 // but expansions are only allowed at the end. 197 // but expansions are only allowed at the end.
190 if (target == heap_end) { 198 if (target == heap_end) {
191 linear_heap_memory->insert(linear_heap_memory->end(), size, 0); 199 linheap_memory->insert(linheap_memory->end(), size, 0);
192 vm_manager.RefreshMemoryBlockMappings(linear_heap_memory.get()); 200 vm_manager.RefreshMemoryBlockMappings(linheap_memory.get());
193 } 201 }
194 202
195 size_t offset = target - Memory::LINEAR_HEAP_VADDR; 203 // TODO(yuriks): As is, this lets processes map memory allocated by other processes from the
196 CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, linear_heap_memory, offset, size, MemoryState::Continuous)); 204 // same region. It is unknown if or how the 3DS kernel checks against this.
205 size_t offset = target - GetLinearHeapBase();
206 CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, linheap_memory, offset, size, MemoryState::Continuous));
197 vm_manager.Reprotect(vma, perms); 207 vm_manager.Reprotect(vma, perms);
198 208
199 return MakeResult<VAddr>(target); 209 return MakeResult<VAddr>(target);
200} 210}
201 211
202ResultCode Process::LinearFree(VAddr target, u32 size) { 212ResultCode Process::LinearFree(VAddr target, u32 size) {
203 if (linear_heap_memory == nullptr || target < Memory::LINEAR_HEAP_VADDR || 213 auto& linheap_memory = memory_region->linear_heap_memory;
204 target + size > Memory::LINEAR_HEAP_VADDR_END || target + size < target) { 214
215 if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() ||
216 target + size < target) {
205 217
206 return ERR_INVALID_ADDRESS; 218 return ERR_INVALID_ADDRESS;
207 } 219 }
208 220
209 VAddr heap_end = Memory::LINEAR_HEAP_VADDR + (u32)linear_heap_memory->size(); 221 VAddr heap_end = GetLinearHeapBase() + (u32)linheap_memory->size();
210 if (target + size > heap_end) { 222 if (target + size > heap_end) {
211 return ERR_INVALID_ADDRESS_STATE; 223 return ERR_INVALID_ADDRESS_STATE;
212 } 224 }
@@ -221,8 +233,8 @@ ResultCode Process::LinearFree(VAddr target, u32 size) {
221 ASSERT(vma != vm_manager.vma_map.end()); 233 ASSERT(vma != vm_manager.vma_map.end());
222 ASSERT(vma->second.type == VMAType::Free); 234 ASSERT(vma->second.type == VMAType::Free);
223 VAddr new_end = vma->second.base; 235 VAddr new_end = vma->second.base;
224 if (new_end >= Memory::LINEAR_HEAP_VADDR) { 236 if (new_end >= GetLinearHeapBase()) {
225 linear_heap_memory->resize(new_end - Memory::LINEAR_HEAP_VADDR); 237 linheap_memory->resize(new_end - GetLinearHeapBase());
226 } 238 }
227 } 239 }
228 240