summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/process.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel/process.cpp')
-rw-r--r--src/core/hle/kernel/process.cpp55
1 files changed, 32 insertions, 23 deletions
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 69302cc82..ba80fe7f8 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -3,11 +3,9 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <memory> 5#include <memory>
6
7#include "common/assert.h" 6#include "common/assert.h"
8#include "common/common_funcs.h" 7#include "common/common_funcs.h"
9#include "common/logging/log.h" 8#include "common/logging/log.h"
10
11#include "core/hle/kernel/memory.h" 9#include "core/hle/kernel/memory.h"
12#include "core/hle/kernel/process.h" 10#include "core/hle/kernel/process.h"
13#include "core/hle/kernel/resource_limit.h" 11#include "core/hle/kernel/resource_limit.h"
@@ -60,7 +58,8 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
60 58
61 while (bits && index < svc_access_mask.size()) { 59 while (bits && index < svc_access_mask.size()) {
62 svc_access_mask.set(index, bits & 1); 60 svc_access_mask.set(index, bits & 1);
63 ++index; bits >>= 1; 61 ++index;
62 bits >>= 1;
64 } 63 }
65 } else if ((type & 0xFF0) == 0xFE0) { // 0x00FF 64 } else if ((type & 0xFF0) == 0xFE0) { // 0x00FF
66 // Handle table size 65 // Handle table size
@@ -70,11 +69,11 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
70 flags.raw = descriptor & 0xFFFF; 69 flags.raw = descriptor & 0xFFFF;
71 } else if ((type & 0xFFE) == 0xFF8) { // 0x001F 70 } else if ((type & 0xFFE) == 0xFF8) { // 0x001F
72 // Mapped memory range 71 // Mapped memory range
73 if (i+1 >= len || ((kernel_caps[i+1] >> 20) & 0xFFE) != 0xFF8) { 72 if (i + 1 >= len || ((kernel_caps[i + 1] >> 20) & 0xFFE) != 0xFF8) {
74 LOG_WARNING(Loader, "Incomplete exheader memory range descriptor ignored."); 73 LOG_WARNING(Loader, "Incomplete exheader memory range descriptor ignored.");
75 continue; 74 continue;
76 } 75 }
77 u32 end_desc = kernel_caps[i+1]; 76 u32 end_desc = kernel_caps[i + 1];
78 ++i; // Skip over the second descriptor on the next iteration 77 ++i; // Skip over the second descriptor on the next iteration
79 78
80 AddressMapping mapping; 79 AddressMapping mapping;
@@ -107,23 +106,28 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
107void Process::Run(s32 main_thread_priority, u32 stack_size) { 106void Process::Run(s32 main_thread_priority, u32 stack_size) {
108 memory_region = GetMemoryRegion(flags.memory_region); 107 memory_region = GetMemoryRegion(flags.memory_region);
109 108
110 auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions, MemoryState memory_state) { 109 auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions,
111 auto vma = vm_manager.MapMemoryBlock(segment.addr, codeset->memory, 110 MemoryState memory_state) {
112 segment.offset, segment.size, memory_state).Unwrap(); 111 auto vma = vm_manager
112 .MapMemoryBlock(segment.addr, codeset->memory, segment.offset, segment.size,
113 memory_state)
114 .Unwrap();
113 vm_manager.Reprotect(vma, permissions); 115 vm_manager.Reprotect(vma, permissions);
114 misc_memory_used += segment.size; 116 misc_memory_used += segment.size;
115 memory_region->used += segment.size; 117 memory_region->used += segment.size;
116 }; 118 };
117 119
118 // Map CodeSet segments 120 // Map CodeSet segments
119 MapSegment(codeset->code, VMAPermission::ReadExecute, MemoryState::Code); 121 MapSegment(codeset->code, VMAPermission::ReadExecute, MemoryState::Code);
120 MapSegment(codeset->rodata, VMAPermission::Read, MemoryState::Code); 122 MapSegment(codeset->rodata, VMAPermission::Read, MemoryState::Code);
121 MapSegment(codeset->data, VMAPermission::ReadWrite, MemoryState::Private); 123 MapSegment(codeset->data, VMAPermission::ReadWrite, MemoryState::Private);
122 124
123 // Allocate and map stack 125 // Allocate and map stack
124 vm_manager.MapMemoryBlock(Memory::HEAP_VADDR_END - stack_size, 126 vm_manager
125 std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size, MemoryState::Locked 127 .MapMemoryBlock(Memory::HEAP_VADDR_END - stack_size,
126 ).Unwrap(); 128 std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size,
129 MemoryState::Locked)
130 .Unwrap();
127 misc_memory_used += stack_size; 131 misc_memory_used += stack_size;
128 memory_region->used += stack_size; 132 memory_region->used += stack_size;
129 133
@@ -143,7 +147,8 @@ VAddr Process::GetLinearHeapLimit() const {
143} 147}
144 148
145ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission perms) { 149ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission perms) {
146 if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END || target + size < target) { 150 if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END ||
151 target + size < target) {
147 return ERR_INVALID_ADDRESS; 152 return ERR_INVALID_ADDRESS;
148 } 153 }
149 154
@@ -166,7 +171,8 @@ ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission per
166 } 171 }
167 ASSERT(heap_end - heap_start == heap_memory->size()); 172 ASSERT(heap_end - heap_start == heap_memory->size());
168 173
169 CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, heap_memory, target - heap_start, size, MemoryState::Private)); 174 CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, heap_memory, target - heap_start,
175 size, MemoryState::Private));
170 vm_manager.Reprotect(vma, perms); 176 vm_manager.Reprotect(vma, perms);
171 177
172 heap_used += size; 178 heap_used += size;
@@ -176,7 +182,8 @@ ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission per
176} 182}
177 183
178ResultCode Process::HeapFree(VAddr target, u32 size) { 184ResultCode Process::HeapFree(VAddr target, u32 size) {
179 if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END || target + size < target) { 185 if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END ||
186 target + size < target) {
180 return ERR_INVALID_ADDRESS; 187 return ERR_INVALID_ADDRESS;
181 } 188 }
182 189
@@ -185,7 +192,8 @@ ResultCode Process::HeapFree(VAddr target, u32 size) {
185 } 192 }
186 193
187 ResultCode result = vm_manager.UnmapRange(target, size); 194 ResultCode result = vm_manager.UnmapRange(target, size);
188 if (result.IsError()) return result; 195 if (result.IsError())
196 return result;
189 197
190 heap_used -= size; 198 heap_used -= size;
191 memory_region->used -= size; 199 memory_region->used -= size;
@@ -203,8 +211,8 @@ ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission p
203 target = heap_end; 211 target = heap_end;
204 } 212 }
205 213
206 if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() || 214 if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() || target > heap_end ||
207 target > heap_end || target + size < target) { 215 target + size < target) {
208 216
209 return ERR_INVALID_ADDRESS; 217 return ERR_INVALID_ADDRESS;
210 } 218 }
@@ -220,7 +228,8 @@ ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission p
220 // TODO(yuriks): As is, this lets processes map memory allocated by other processes from the 228 // TODO(yuriks): As is, this lets processes map memory allocated by other processes from the
221 // same region. It is unknown if or how the 3DS kernel checks against this. 229 // same region. It is unknown if or how the 3DS kernel checks against this.
222 size_t offset = target - GetLinearHeapBase(); 230 size_t offset = target - GetLinearHeapBase();
223 CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, linheap_memory, offset, size, MemoryState::Continuous)); 231 CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, linheap_memory, offset, size,
232 MemoryState::Continuous));
224 vm_manager.Reprotect(vma, perms); 233 vm_manager.Reprotect(vma, perms);
225 234
226 linear_heap_used += size; 235 linear_heap_used += size;
@@ -248,7 +257,8 @@ ResultCode Process::LinearFree(VAddr target, u32 size) {
248 } 257 }
249 258
250 ResultCode result = vm_manager.UnmapRange(target, size); 259 ResultCode result = vm_manager.UnmapRange(target, size);
251 if (result.IsError()) return result; 260 if (result.IsError())
261 return result;
252 262
253 linear_heap_used -= size; 263 linear_heap_used -= size;
254 memory_region->used -= size; 264 memory_region->used -= size;
@@ -272,5 +282,4 @@ Kernel::Process::Process() {}
272Kernel::Process::~Process() {} 282Kernel::Process::~Process() {}
273 283
274SharedPtr<Process> g_current_process; 284SharedPtr<Process> g_current_process;
275
276} 285}