summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/process.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel/process.cpp')
-rw-r--r--src/core/hle/kernel/process.cpp65
1 files changed, 40 insertions, 25 deletions
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 69302cc82..cc37e574c 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -26,8 +26,10 @@ SharedPtr<CodeSet> CodeSet::Create(std::string name, u64 program_id) {
26 return codeset; 26 return codeset;
27} 27}
28 28
29CodeSet::CodeSet() {} 29CodeSet::CodeSet() {
30CodeSet::~CodeSet() {} 30}
31CodeSet::~CodeSet() {
32}
31 33
32u32 Process::next_process_id; 34u32 Process::next_process_id;
33 35
@@ -60,7 +62,8 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
60 62
61 while (bits && index < svc_access_mask.size()) { 63 while (bits && index < svc_access_mask.size()) {
62 svc_access_mask.set(index, bits & 1); 64 svc_access_mask.set(index, bits & 1);
63 ++index; bits >>= 1; 65 ++index;
66 bits >>= 1;
64 } 67 }
65 } else if ((type & 0xFF0) == 0xFE0) { // 0x00FF 68 } else if ((type & 0xFF0) == 0xFE0) { // 0x00FF
66 // Handle table size 69 // Handle table size
@@ -70,11 +73,11 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
70 flags.raw = descriptor & 0xFFFF; 73 flags.raw = descriptor & 0xFFFF;
71 } else if ((type & 0xFFE) == 0xFF8) { // 0x001F 74 } else if ((type & 0xFFE) == 0xFF8) { // 0x001F
72 // Mapped memory range 75 // Mapped memory range
73 if (i+1 >= len || ((kernel_caps[i+1] >> 20) & 0xFFE) != 0xFF8) { 76 if (i + 1 >= len || ((kernel_caps[i + 1] >> 20) & 0xFFE) != 0xFF8) {
74 LOG_WARNING(Loader, "Incomplete exheader memory range descriptor ignored."); 77 LOG_WARNING(Loader, "Incomplete exheader memory range descriptor ignored.");
75 continue; 78 continue;
76 } 79 }
77 u32 end_desc = kernel_caps[i+1]; 80 u32 end_desc = kernel_caps[i + 1];
78 ++i; // Skip over the second descriptor on the next iteration 81 ++i; // Skip over the second descriptor on the next iteration
79 82
80 AddressMapping mapping; 83 AddressMapping mapping;
@@ -107,23 +110,28 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
107void Process::Run(s32 main_thread_priority, u32 stack_size) { 110void Process::Run(s32 main_thread_priority, u32 stack_size) {
108 memory_region = GetMemoryRegion(flags.memory_region); 111 memory_region = GetMemoryRegion(flags.memory_region);
109 112
110 auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions, MemoryState memory_state) { 113 auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions,
111 auto vma = vm_manager.MapMemoryBlock(segment.addr, codeset->memory, 114 MemoryState memory_state) {
112 segment.offset, segment.size, memory_state).Unwrap(); 115 auto vma = vm_manager
116 .MapMemoryBlock(segment.addr, codeset->memory, segment.offset, segment.size,
117 memory_state)
118 .Unwrap();
113 vm_manager.Reprotect(vma, permissions); 119 vm_manager.Reprotect(vma, permissions);
114 misc_memory_used += segment.size; 120 misc_memory_used += segment.size;
115 memory_region->used += segment.size; 121 memory_region->used += segment.size;
116 }; 122 };
117 123
118 // Map CodeSet segments 124 // Map CodeSet segments
119 MapSegment(codeset->code, VMAPermission::ReadExecute, MemoryState::Code); 125 MapSegment(codeset->code, VMAPermission::ReadExecute, MemoryState::Code);
120 MapSegment(codeset->rodata, VMAPermission::Read, MemoryState::Code); 126 MapSegment(codeset->rodata, VMAPermission::Read, MemoryState::Code);
121 MapSegment(codeset->data, VMAPermission::ReadWrite, MemoryState::Private); 127 MapSegment(codeset->data, VMAPermission::ReadWrite, MemoryState::Private);
122 128
123 // Allocate and map stack 129 // Allocate and map stack
124 vm_manager.MapMemoryBlock(Memory::HEAP_VADDR_END - stack_size, 130 vm_manager
125 std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size, MemoryState::Locked 131 .MapMemoryBlock(Memory::HEAP_VADDR_END - stack_size,
126 ).Unwrap(); 132 std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size,
133 MemoryState::Locked)
134 .Unwrap();
127 misc_memory_used += stack_size; 135 misc_memory_used += stack_size;
128 memory_region->used += stack_size; 136 memory_region->used += stack_size;
129 137
@@ -143,7 +151,8 @@ VAddr Process::GetLinearHeapLimit() const {
143} 151}
144 152
145ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission perms) { 153ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission perms) {
146 if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END || target + size < target) { 154 if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END ||
155 target + size < target) {
147 return ERR_INVALID_ADDRESS; 156 return ERR_INVALID_ADDRESS;
148 } 157 }
149 158
@@ -166,7 +175,8 @@ ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission per
166 } 175 }
167 ASSERT(heap_end - heap_start == heap_memory->size()); 176 ASSERT(heap_end - heap_start == heap_memory->size());
168 177
169 CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, heap_memory, target - heap_start, size, MemoryState::Private)); 178 CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, heap_memory, target - heap_start,
179 size, MemoryState::Private));
170 vm_manager.Reprotect(vma, perms); 180 vm_manager.Reprotect(vma, perms);
171 181
172 heap_used += size; 182 heap_used += size;
@@ -176,7 +186,8 @@ ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission per
176} 186}
177 187
178ResultCode Process::HeapFree(VAddr target, u32 size) { 188ResultCode Process::HeapFree(VAddr target, u32 size) {
179 if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END || target + size < target) { 189 if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END ||
190 target + size < target) {
180 return ERR_INVALID_ADDRESS; 191 return ERR_INVALID_ADDRESS;
181 } 192 }
182 193
@@ -185,7 +196,8 @@ ResultCode Process::HeapFree(VAddr target, u32 size) {
185 } 196 }
186 197
187 ResultCode result = vm_manager.UnmapRange(target, size); 198 ResultCode result = vm_manager.UnmapRange(target, size);
188 if (result.IsError()) return result; 199 if (result.IsError())
200 return result;
189 201
190 heap_used -= size; 202 heap_used -= size;
191 memory_region->used -= size; 203 memory_region->used -= size;
@@ -203,8 +215,8 @@ ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission p
203 target = heap_end; 215 target = heap_end;
204 } 216 }
205 217
206 if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() || 218 if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() || target > heap_end ||
207 target > heap_end || target + size < target) { 219 target + size < target) {
208 220
209 return ERR_INVALID_ADDRESS; 221 return ERR_INVALID_ADDRESS;
210 } 222 }
@@ -220,7 +232,8 @@ ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission p
220 // TODO(yuriks): As is, this lets processes map memory allocated by other processes from the 232 // TODO(yuriks): As is, this lets processes map memory allocated by other processes from the
221 // same region. It is unknown if or how the 3DS kernel checks against this. 233 // same region. It is unknown if or how the 3DS kernel checks against this.
222 size_t offset = target - GetLinearHeapBase(); 234 size_t offset = target - GetLinearHeapBase();
223 CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, linheap_memory, offset, size, MemoryState::Continuous)); 235 CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, linheap_memory, offset, size,
236 MemoryState::Continuous));
224 vm_manager.Reprotect(vma, perms); 237 vm_manager.Reprotect(vma, perms);
225 238
226 linear_heap_used += size; 239 linear_heap_used += size;
@@ -248,7 +261,8 @@ ResultCode Process::LinearFree(VAddr target, u32 size) {
248 } 261 }
249 262
250 ResultCode result = vm_manager.UnmapRange(target, size); 263 ResultCode result = vm_manager.UnmapRange(target, size);
251 if (result.IsError()) return result; 264 if (result.IsError())
265 return result;
252 266
253 linear_heap_used -= size; 267 linear_heap_used -= size;
254 memory_region->used -= size; 268 memory_region->used -= size;
@@ -268,9 +282,10 @@ ResultCode Process::LinearFree(VAddr target, u32 size) {
268 return RESULT_SUCCESS; 282 return RESULT_SUCCESS;
269} 283}
270 284
271Kernel::Process::Process() {} 285Kernel::Process::Process() {
272Kernel::Process::~Process() {} 286}
287Kernel::Process::~Process() {
288}
273 289
274SharedPtr<Process> g_current_process; 290SharedPtr<Process> g_current_process;
275
276} 291}