summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Liam2023-10-21 16:47:43 -0400
committerGravatar Liam2023-10-21 20:03:41 -0400
commit8c59543ee32c8bff575bab7ec1e70f76f8eda437 (patch)
treeafeee77ba66daf7ec6bff18515c8fbf1bb8468e0
parentMerge pull request #11831 from liamwhite/hosversionbetween (diff)
downloadyuzu-8c59543ee32c8bff575bab7ec1e70f76f8eda437.tar.gz
yuzu-8c59543ee32c8bff575bab7ec1e70f76f8eda437.tar.xz
yuzu-8c59543ee32c8bff575bab7ec1e70f76f8eda437.zip
kernel: update KProcess
-rw-r--r--src/core/arm/arm_interface.cpp6
-rw-r--r--src/core/core.cpp8
-rw-r--r--src/core/debugger/debugger.cpp24
-rw-r--r--src/core/debugger/gdbstub.cpp42
-rw-r--r--src/core/file_sys/program_metadata.cpp10
-rw-r--r--src/core/file_sys/program_metadata.h15
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp59
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_system_control.h13
-rw-r--r--src/core/hle/kernel/k_capabilities.h4
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp22
-rw-r--r--src/core/hle/kernel/k_condition_variable.h9
-rw-r--r--src/core/hle/kernel/k_interrupt_manager.cpp2
-rw-r--r--src/core/hle/kernel/k_memory_manager.cpp125
-rw-r--r--src/core/hle/kernel/k_memory_manager.h12
-rw-r--r--src/core/hle/kernel/k_page_table.cpp14
-rw-r--r--src/core/hle/kernel/k_page_table.h4
-rw-r--r--src/core/hle/kernel/k_process.cpp1442
-rw-r--r--src/core/hle/kernel/k_process.h724
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp4
-rw-r--r--src/core/hle/kernel/k_system_resource.cpp87
-rw-r--r--src/core/hle/kernel/k_thread.cpp16
-rw-r--r--src/core/hle/kernel/k_thread.h1
-rw-r--r--src/core/hle/kernel/kernel.cpp54
-rw-r--r--src/core/hle/kernel/kernel.h3
-rw-r--r--src/core/hle/kernel/svc.cpp2
-rw-r--r--src/core/hle/kernel/svc/svc_info.cpp28
-rw-r--r--src/core/hle/kernel/svc/svc_lock.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_physical_memory.cpp4
-rw-r--r--src/core/hle/kernel/svc/svc_synchronization.cpp2
-rw-r--r--src/core/hle/kernel/svc/svc_thread.cpp7
-rw-r--r--src/core/hle/kernel/svc_generator.py2
-rw-r--r--src/core/hle/kernel/svc_types.h46
-rw-r--r--src/core/hle/service/kernel_helpers.cpp6
-rw-r--r--src/core/hle/service/nvnflinger/nvnflinger.cpp15
-rw-r--r--src/core/hle/service/nvnflinger/nvnflinger.h3
-rw-r--r--src/core/hle/service/pm/pm.cpp2
-rw-r--r--src/core/reporter.cpp2
-rw-r--r--src/yuzu/debugger/wait_tree.cpp2
-rw-r--r--src/yuzu/main.cpp2
39 files changed, 1811 insertions, 1016 deletions
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp
index 0c012f094..5e27dde58 100644
--- a/src/core/arm/arm_interface.cpp
+++ b/src/core/arm/arm_interface.cpp
@@ -86,9 +86,9 @@ void ARM_Interface::SymbolicateBacktrace(Core::System& system, std::vector<Backt
86 86
87 std::map<std::string, Symbols::Symbols> symbols; 87 std::map<std::string, Symbols::Symbols> symbols;
88 for (const auto& module : modules) { 88 for (const auto& module : modules) {
89 symbols.insert_or_assign( 89 symbols.insert_or_assign(module.second,
90 module.second, Symbols::GetSymbols(module.first, system.ApplicationMemory(), 90 Symbols::GetSymbols(module.first, system.ApplicationMemory(),
91 system.ApplicationProcess()->Is64BitProcess())); 91 system.ApplicationProcess()->Is64Bit()));
92 } 92 }
93 93
94 for (auto& entry : out) { 94 for (auto& entry : out) {
diff --git a/src/core/core.cpp b/src/core/core.cpp
index d7e2efbd7..296727ed7 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -309,16 +309,8 @@ struct System::Impl {
309 309
310 telemetry_session->AddInitialInfo(*app_loader, fs_controller, *content_provider); 310 telemetry_session->AddInitialInfo(*app_loader, fs_controller, *content_provider);
311 311
312 // Create a resource limit for the process.
313 const auto physical_memory_size =
314 kernel.MemoryManager().GetSize(Kernel::KMemoryManager::Pool::Application);
315 auto* resource_limit = Kernel::CreateResourceLimitForProcess(system, physical_memory_size);
316
317 // Create the process. 312 // Create the process.
318 auto main_process = Kernel::KProcess::Create(system.Kernel()); 313 auto main_process = Kernel::KProcess::Create(system.Kernel());
319 ASSERT(Kernel::KProcess::Initialize(main_process, system, "main",
320 Kernel::KProcess::ProcessType::Userland, resource_limit)
321 .IsSuccess());
322 Kernel::KProcess::Register(system.Kernel(), main_process); 314 Kernel::KProcess::Register(system.Kernel(), main_process);
323 kernel.MakeApplicationProcess(main_process); 315 kernel.MakeApplicationProcess(main_process);
324 const auto [load_result, load_parameters] = app_loader->Load(*main_process, system); 316 const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
diff --git a/src/core/debugger/debugger.cpp b/src/core/debugger/debugger.cpp
index a1589fecb..0e270eb50 100644
--- a/src/core/debugger/debugger.cpp
+++ b/src/core/debugger/debugger.cpp
@@ -258,20 +258,20 @@ private:
258 Kernel::KScopedSchedulerLock sl{system.Kernel()}; 258 Kernel::KScopedSchedulerLock sl{system.Kernel()};
259 259
260 // Put all threads to sleep on next scheduler round. 260 // Put all threads to sleep on next scheduler round.
261 for (auto* thread : ThreadList()) { 261 for (auto& thread : ThreadList()) {
262 thread->RequestSuspend(Kernel::SuspendType::Debug); 262 thread.RequestSuspend(Kernel::SuspendType::Debug);
263 } 263 }
264 } 264 }
265 265
266 void ResumeEmulation(Kernel::KThread* except = nullptr) { 266 void ResumeEmulation(Kernel::KThread* except = nullptr) {
267 // Wake up all threads. 267 // Wake up all threads.
268 for (auto* thread : ThreadList()) { 268 for (auto& thread : ThreadList()) {
269 if (thread == except) { 269 if (std::addressof(thread) == except) {
270 continue; 270 continue;
271 } 271 }
272 272
273 thread->SetStepState(Kernel::StepState::NotStepping); 273 thread.SetStepState(Kernel::StepState::NotStepping);
274 thread->Resume(Kernel::SuspendType::Debug); 274 thread.Resume(Kernel::SuspendType::Debug);
275 } 275 }
276 } 276 }
277 277
@@ -283,13 +283,17 @@ private:
283 } 283 }
284 284
285 void UpdateActiveThread() { 285 void UpdateActiveThread() {
286 const auto& threads{ThreadList()}; 286 auto& threads{ThreadList()};
287 if (std::find(threads.begin(), threads.end(), state->active_thread) == threads.end()) { 287 for (auto& thread : threads) {
288 state->active_thread = threads.front(); 288 if (std::addressof(thread) == state->active_thread) {
289 // Thread is still alive, no need to update.
290 return;
291 }
289 } 292 }
293 state->active_thread = std::addressof(threads.front());
290 } 294 }
291 295
292 const std::list<Kernel::KThread*>& ThreadList() { 296 Kernel::KProcess::ThreadList& ThreadList() {
293 return system.ApplicationProcess()->GetThreadList(); 297 return system.ApplicationProcess()->GetThreadList();
294 } 298 }
295 299
diff --git a/src/core/debugger/gdbstub.cpp b/src/core/debugger/gdbstub.cpp
index 2076aa8a2..6f5f5156b 100644
--- a/src/core/debugger/gdbstub.cpp
+++ b/src/core/debugger/gdbstub.cpp
@@ -109,7 +109,7 @@ static std::string EscapeXML(std::string_view data) {
109 109
110GDBStub::GDBStub(DebuggerBackend& backend_, Core::System& system_) 110GDBStub::GDBStub(DebuggerBackend& backend_, Core::System& system_)
111 : DebuggerFrontend(backend_), system{system_} { 111 : DebuggerFrontend(backend_), system{system_} {
112 if (system.ApplicationProcess()->Is64BitProcess()) { 112 if (system.ApplicationProcess()->Is64Bit()) {
113 arch = std::make_unique<GDBStubA64>(); 113 arch = std::make_unique<GDBStubA64>();
114 } else { 114 } else {
115 arch = std::make_unique<GDBStubA32>(); 115 arch = std::make_unique<GDBStubA32>();
@@ -446,10 +446,10 @@ void GDBStub::HandleBreakpointRemove(std::string_view command) {
446// See osdbg_thread_local_region.os.horizon.hpp and osdbg_thread_type.os.horizon.hpp 446// See osdbg_thread_local_region.os.horizon.hpp and osdbg_thread_type.os.horizon.hpp
447 447
448static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& memory, 448static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& memory,
449 const Kernel::KThread* thread) { 449 const Kernel::KThread& thread) {
450 // Read thread type from TLS 450 // Read thread type from TLS
451 const VAddr tls_thread_type{memory.Read32(thread->GetTlsAddress() + 0x1fc)}; 451 const VAddr tls_thread_type{memory.Read32(thread.GetTlsAddress() + 0x1fc)};
452 const VAddr argument_thread_type{thread->GetArgument()}; 452 const VAddr argument_thread_type{thread.GetArgument()};
453 453
454 if (argument_thread_type && tls_thread_type != argument_thread_type) { 454 if (argument_thread_type && tls_thread_type != argument_thread_type) {
455 // Probably not created by nnsdk, no name available. 455 // Probably not created by nnsdk, no name available.
@@ -477,10 +477,10 @@ static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory&
477} 477}
478 478
479static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory& memory, 479static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory& memory,
480 const Kernel::KThread* thread) { 480 const Kernel::KThread& thread) {
481 // Read thread type from TLS 481 // Read thread type from TLS
482 const VAddr tls_thread_type{memory.Read64(thread->GetTlsAddress() + 0x1f8)}; 482 const VAddr tls_thread_type{memory.Read64(thread.GetTlsAddress() + 0x1f8)};
483 const VAddr argument_thread_type{thread->GetArgument()}; 483 const VAddr argument_thread_type{thread.GetArgument()};
484 484
485 if (argument_thread_type && tls_thread_type != argument_thread_type) { 485 if (argument_thread_type && tls_thread_type != argument_thread_type) {
486 // Probably not created by nnsdk, no name available. 486 // Probably not created by nnsdk, no name available.
@@ -508,16 +508,16 @@ static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory&
508} 508}
509 509
510static std::optional<std::string> GetThreadName(Core::System& system, 510static std::optional<std::string> GetThreadName(Core::System& system,
511 const Kernel::KThread* thread) { 511 const Kernel::KThread& thread) {
512 if (system.ApplicationProcess()->Is64BitProcess()) { 512 if (system.ApplicationProcess()->Is64Bit()) {
513 return GetNameFromThreadType64(system.ApplicationMemory(), thread); 513 return GetNameFromThreadType64(system.ApplicationMemory(), thread);
514 } else { 514 } else {
515 return GetNameFromThreadType32(system.ApplicationMemory(), thread); 515 return GetNameFromThreadType32(system.ApplicationMemory(), thread);
516 } 516 }
517} 517}
518 518
519static std::string_view GetThreadWaitReason(const Kernel::KThread* thread) { 519static std::string_view GetThreadWaitReason(const Kernel::KThread& thread) {
520 switch (thread->GetWaitReasonForDebugging()) { 520 switch (thread.GetWaitReasonForDebugging()) {
521 case Kernel::ThreadWaitReasonForDebugging::Sleep: 521 case Kernel::ThreadWaitReasonForDebugging::Sleep:
522 return "Sleep"; 522 return "Sleep";
523 case Kernel::ThreadWaitReasonForDebugging::IPC: 523 case Kernel::ThreadWaitReasonForDebugging::IPC:
@@ -535,8 +535,8 @@ static std::string_view GetThreadWaitReason(const Kernel::KThread* thread) {
535 } 535 }
536} 536}
537 537
538static std::string GetThreadState(const Kernel::KThread* thread) { 538static std::string GetThreadState(const Kernel::KThread& thread) {
539 switch (thread->GetState()) { 539 switch (thread.GetState()) {
540 case Kernel::ThreadState::Initialized: 540 case Kernel::ThreadState::Initialized:
541 return "Initialized"; 541 return "Initialized";
542 case Kernel::ThreadState::Waiting: 542 case Kernel::ThreadState::Waiting:
@@ -604,7 +604,7 @@ void GDBStub::HandleQuery(std::string_view command) {
604 const auto& threads = system.ApplicationProcess()->GetThreadList(); 604 const auto& threads = system.ApplicationProcess()->GetThreadList();
605 std::vector<std::string> thread_ids; 605 std::vector<std::string> thread_ids;
606 for (const auto& thread : threads) { 606 for (const auto& thread : threads) {
607 thread_ids.push_back(fmt::format("{:x}", thread->GetThreadId())); 607 thread_ids.push_back(fmt::format("{:x}", thread.GetThreadId()));
608 } 608 }
609 SendReply(fmt::format("m{}", fmt::join(thread_ids, ","))); 609 SendReply(fmt::format("m{}", fmt::join(thread_ids, ",")));
610 } else if (command.starts_with("sThreadInfo")) { 610 } else if (command.starts_with("sThreadInfo")) {
@@ -616,14 +616,14 @@ void GDBStub::HandleQuery(std::string_view command) {
616 buffer += "<threads>"; 616 buffer += "<threads>";
617 617
618 const auto& threads = system.ApplicationProcess()->GetThreadList(); 618 const auto& threads = system.ApplicationProcess()->GetThreadList();
619 for (const auto* thread : threads) { 619 for (const auto& thread : threads) {
620 auto thread_name{GetThreadName(system, thread)}; 620 auto thread_name{GetThreadName(system, thread)};
621 if (!thread_name) { 621 if (!thread_name) {
622 thread_name = fmt::format("Thread {:d}", thread->GetThreadId()); 622 thread_name = fmt::format("Thread {:d}", thread.GetThreadId());
623 } 623 }
624 624
625 buffer += fmt::format(R"(<thread id="{:x}" core="{:d}" name="{}">{}</thread>)", 625 buffer += fmt::format(R"(<thread id="{:x}" core="{:d}" name="{}">{}</thread>)",
626 thread->GetThreadId(), thread->GetActiveCore(), 626 thread.GetThreadId(), thread.GetActiveCore(),
627 EscapeXML(*thread_name), GetThreadState(thread)); 627 EscapeXML(*thread_name), GetThreadState(thread));
628 } 628 }
629 629
@@ -850,10 +850,10 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) {
850} 850}
851 851
852Kernel::KThread* GDBStub::GetThreadByID(u64 thread_id) { 852Kernel::KThread* GDBStub::GetThreadByID(u64 thread_id) {
853 const auto& threads{system.ApplicationProcess()->GetThreadList()}; 853 auto& threads{system.ApplicationProcess()->GetThreadList()};
854 for (auto* thread : threads) { 854 for (auto& thread : threads) {
855 if (thread->GetThreadId() == thread_id) { 855 if (thread.GetThreadId() == thread_id) {
856 return thread; 856 return std::addressof(thread);
857 } 857 }
858 } 858 }
859 859
diff --git a/src/core/file_sys/program_metadata.cpp b/src/core/file_sys/program_metadata.cpp
index 8e291ff67..763a44fee 100644
--- a/src/core/file_sys/program_metadata.cpp
+++ b/src/core/file_sys/program_metadata.cpp
@@ -104,16 +104,16 @@ Loader::ResultStatus ProgramMetadata::Reload(VirtualFile file) {
104} 104}
105 105
106/*static*/ ProgramMetadata ProgramMetadata::GetDefault() { 106/*static*/ ProgramMetadata ProgramMetadata::GetDefault() {
107 // Allow use of cores 0~3 and thread priorities 1~63. 107 // Allow use of cores 0~3 and thread priorities 16~63.
108 constexpr u32 default_thread_info_capability = 0x30007F7; 108 constexpr u32 default_thread_info_capability = 0x30043F7;
109 109
110 ProgramMetadata result; 110 ProgramMetadata result;
111 111
112 result.LoadManual( 112 result.LoadManual(
113 true /*is_64_bit*/, FileSys::ProgramAddressSpaceType::Is39Bit /*address_space*/, 113 true /*is_64_bit*/, FileSys::ProgramAddressSpaceType::Is39Bit /*address_space*/,
114 0x2c /*main_thread_prio*/, 0 /*main_thread_core*/, 0x00100000 /*main_thread_stack_size*/, 114 0x2c /*main_thread_prio*/, 0 /*main_thread_core*/, 0x100000 /*main_thread_stack_size*/,
115 0 /*title_id*/, 0xFFFFFFFFFFFFFFFF /*filesystem_permissions*/, 115 0 /*title_id*/, 0xFFFFFFFFFFFFFFFF /*filesystem_permissions*/, 0 /*system_resource_size*/,
116 0x1FE00000 /*system_resource_size*/, {default_thread_info_capability} /*capabilities*/); 116 {default_thread_info_capability} /*capabilities*/);
117 117
118 return result; 118 return result;
119} 119}
diff --git a/src/core/file_sys/program_metadata.h b/src/core/file_sys/program_metadata.h
index 9f8e74b13..76ee97d78 100644
--- a/src/core/file_sys/program_metadata.h
+++ b/src/core/file_sys/program_metadata.h
@@ -73,6 +73,9 @@ public:
73 u64 GetFilesystemPermissions() const; 73 u64 GetFilesystemPermissions() const;
74 u32 GetSystemResourceSize() const; 74 u32 GetSystemResourceSize() const;
75 const KernelCapabilityDescriptors& GetKernelCapabilities() const; 75 const KernelCapabilityDescriptors& GetKernelCapabilities() const;
76 const std::array<u8, 0x10>& GetName() const {
77 return npdm_header.application_name;
78 }
76 79
77 void Print() const; 80 void Print() const;
78 81
@@ -164,14 +167,14 @@ private:
164 u32_le unk_size_2; 167 u32_le unk_size_2;
165 }; 168 };
166 169
167 Header npdm_header; 170 Header npdm_header{};
168 AciHeader aci_header; 171 AciHeader aci_header{};
169 AcidHeader acid_header; 172 AcidHeader acid_header{};
170 173
171 FileAccessControl acid_file_access; 174 FileAccessControl acid_file_access{};
172 FileAccessHeader aci_file_access; 175 FileAccessHeader aci_file_access{};
173 176
174 KernelCapabilityDescriptors aci_kernel_capabilities; 177 KernelCapabilityDescriptors aci_kernel_capabilities{};
175}; 178};
176 179
177} // namespace FileSys 180} // namespace FileSys
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
index 4cfdf4558..59364efa1 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp
@@ -8,7 +8,11 @@
8 8
9#include "core/hle/kernel/board/nintendo/nx/k_system_control.h" 9#include "core/hle/kernel/board/nintendo/nx/k_system_control.h"
10#include "core/hle/kernel/board/nintendo/nx/secure_monitor.h" 10#include "core/hle/kernel/board/nintendo/nx/secure_monitor.h"
11#include "core/hle/kernel/k_memory_manager.h"
12#include "core/hle/kernel/k_page_table.h"
11#include "core/hle/kernel/k_trace.h" 13#include "core/hle/kernel/k_trace.h"
14#include "core/hle/kernel/kernel.h"
15#include "core/hle/kernel/svc_results.h"
12 16
13namespace Kernel::Board::Nintendo::Nx { 17namespace Kernel::Board::Nintendo::Nx {
14 18
@@ -30,6 +34,8 @@ constexpr const std::size_t RequiredNonSecureSystemMemorySize =
30constexpr const std::size_t RequiredNonSecureSystemMemorySizeWithFatal = 34constexpr const std::size_t RequiredNonSecureSystemMemorySizeWithFatal =
31 RequiredNonSecureSystemMemorySize + impl::RequiredNonSecureSystemMemorySizeViFatal; 35 RequiredNonSecureSystemMemorySize + impl::RequiredNonSecureSystemMemorySizeViFatal;
32 36
37constexpr const std::size_t SecureAlignment = 128_KiB;
38
33namespace { 39namespace {
34 40
35using namespace Common::Literals; 41using namespace Common::Literals;
@@ -183,4 +189,57 @@ u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
183 return GenerateUniformRange(min, max, GenerateRandomU64); 189 return GenerateUniformRange(min, max, GenerateRandomU64);
184} 190}
185 191
192size_t KSystemControl::CalculateRequiredSecureMemorySize(size_t size, u32 pool) {
193 if (pool == static_cast<u32>(KMemoryManager::Pool::Applet)) {
194 return 0;
195 } else {
196 // return KSystemControlBase::CalculateRequiredSecureMemorySize(size, pool);
197 return size;
198 }
199}
200
201Result KSystemControl::AllocateSecureMemory(KernelCore& kernel, KVirtualAddress* out, size_t size,
202 u32 pool) {
203 // Applet secure memory is handled separately.
204 UNIMPLEMENTED_IF(pool == static_cast<u32>(KMemoryManager::Pool::Applet));
205
206 // Ensure the size is aligned.
207 const size_t alignment =
208 (pool == static_cast<u32>(KMemoryManager::Pool::System) ? PageSize : SecureAlignment);
209 R_UNLESS(Common::IsAligned(size, alignment), ResultInvalidSize);
210
211 // Allocate the memory.
212 const size_t num_pages = size / PageSize;
213 const KPhysicalAddress paddr = kernel.MemoryManager().AllocateAndOpenContinuous(
214 num_pages, alignment / PageSize,
215 KMemoryManager::EncodeOption(static_cast<KMemoryManager::Pool>(pool),
216 KMemoryManager::Direction::FromFront));
217 R_UNLESS(paddr != 0, ResultOutOfMemory);
218
219 // Ensure we don't leak references to the memory on error.
220 ON_RESULT_FAILURE {
221 kernel.MemoryManager().Close(paddr, num_pages);
222 };
223
224 // We succeeded.
225 *out = KPageTable::GetHeapVirtualAddress(kernel.MemoryLayout(), paddr);
226 R_SUCCEED();
227}
228
229void KSystemControl::FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size,
230 u32 pool) {
231 // Applet secure memory is handled separately.
232 UNIMPLEMENTED_IF(pool == static_cast<u32>(KMemoryManager::Pool::Applet));
233
234 // Ensure the size is aligned.
235 const size_t alignment =
236 (pool == static_cast<u32>(KMemoryManager::Pool::System) ? PageSize : SecureAlignment);
237 ASSERT(Common::IsAligned(GetInteger(address), alignment));
238 ASSERT(Common::IsAligned(size, alignment));
239
240 // Close the secure region's pages.
241 kernel.MemoryManager().Close(KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), address),
242 size / PageSize);
243}
244
186} // namespace Kernel::Board::Nintendo::Nx 245} // namespace Kernel::Board::Nintendo::Nx
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
index b477e8193..ff1feec70 100644
--- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
+++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h
@@ -4,6 +4,11 @@
4#pragma once 4#pragma once
5 5
6#include "core/hle/kernel/k_typed_address.h" 6#include "core/hle/kernel/k_typed_address.h"
7#include "core/hle/result.h"
8
9namespace Kernel {
10class KernelCore;
11}
7 12
8namespace Kernel::Board::Nintendo::Nx { 13namespace Kernel::Board::Nintendo::Nx {
9 14
@@ -25,8 +30,16 @@ public:
25 static std::size_t GetMinimumNonSecureSystemPoolSize(); 30 static std::size_t GetMinimumNonSecureSystemPoolSize();
26 }; 31 };
27 32
33 // Randomness.
28 static u64 GenerateRandomRange(u64 min, u64 max); 34 static u64 GenerateRandomRange(u64 min, u64 max);
29 static u64 GenerateRandomU64(); 35 static u64 GenerateRandomU64();
36
37 // Secure Memory.
38 static size_t CalculateRequiredSecureMemorySize(size_t size, u32 pool);
39 static Result AllocateSecureMemory(KernelCore& kernel, KVirtualAddress* out, size_t size,
40 u32 pool);
41 static void FreeSecureMemory(KernelCore& kernel, KVirtualAddress address, size_t size,
42 u32 pool);
30}; 43};
31 44
32} // namespace Kernel::Board::Nintendo::Nx 45} // namespace Kernel::Board::Nintendo::Nx
diff --git a/src/core/hle/kernel/k_capabilities.h b/src/core/hle/kernel/k_capabilities.h
index de766c811..ebd4eedb1 100644
--- a/src/core/hle/kernel/k_capabilities.h
+++ b/src/core/hle/kernel/k_capabilities.h
@@ -200,8 +200,8 @@ private:
200 200
201 RawCapabilityValue raw; 201 RawCapabilityValue raw;
202 BitField<0, 15, CapabilityType> id; 202 BitField<0, 15, CapabilityType> id;
203 BitField<15, 4, u32> major_version; 203 BitField<15, 4, u32> minor_version;
204 BitField<19, 13, u32> minor_version; 204 BitField<19, 13, u32> major_version;
205 }; 205 };
206 206
207 union HandleTable { 207 union HandleTable {
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index efbac0e6a..7633a51fb 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -107,12 +107,12 @@ KConditionVariable::KConditionVariable(Core::System& system)
107 107
108KConditionVariable::~KConditionVariable() = default; 108KConditionVariable::~KConditionVariable() = default;
109 109
110Result KConditionVariable::SignalToAddress(KProcessAddress addr) { 110Result KConditionVariable::SignalToAddress(KernelCore& kernel, KProcessAddress addr) {
111 KThread* owner_thread = GetCurrentThreadPointer(m_kernel); 111 KThread* owner_thread = GetCurrentThreadPointer(kernel);
112 112
113 // Signal the address. 113 // Signal the address.
114 { 114 {
115 KScopedSchedulerLock sl(m_kernel); 115 KScopedSchedulerLock sl(kernel);
116 116
117 // Remove waiter thread. 117 // Remove waiter thread.
118 bool has_waiters{}; 118 bool has_waiters{};
@@ -133,7 +133,7 @@ Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
133 133
134 // Write the value to userspace. 134 // Write the value to userspace.
135 Result result{ResultSuccess}; 135 Result result{ResultSuccess};
136 if (WriteToUser(m_kernel, addr, std::addressof(next_value))) [[likely]] { 136 if (WriteToUser(kernel, addr, std::addressof(next_value))) [[likely]] {
137 result = ResultSuccess; 137 result = ResultSuccess;
138 } else { 138 } else {
139 result = ResultInvalidCurrentMemory; 139 result = ResultInvalidCurrentMemory;
@@ -148,28 +148,28 @@ Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
148 } 148 }
149} 149}
150 150
151Result KConditionVariable::WaitForAddress(Handle handle, KProcessAddress addr, u32 value) { 151Result KConditionVariable::WaitForAddress(KernelCore& kernel, Handle handle, KProcessAddress addr,
152 KThread* cur_thread = GetCurrentThreadPointer(m_kernel); 152 u32 value) {
153 ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(m_kernel); 153 KThread* cur_thread = GetCurrentThreadPointer(kernel);
154 ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
154 155
155 // Wait for the address. 156 // Wait for the address.
156 KThread* owner_thread{}; 157 KThread* owner_thread{};
157 { 158 {
158 KScopedSchedulerLock sl(m_kernel); 159 KScopedSchedulerLock sl(kernel);
159 160
160 // Check if the thread should terminate. 161 // Check if the thread should terminate.
161 R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested); 162 R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
162 163
163 // Read the tag from userspace. 164 // Read the tag from userspace.
164 u32 test_tag{}; 165 u32 test_tag{};
165 R_UNLESS(ReadFromUser(m_kernel, std::addressof(test_tag), addr), 166 R_UNLESS(ReadFromUser(kernel, std::addressof(test_tag), addr), ResultInvalidCurrentMemory);
166 ResultInvalidCurrentMemory);
167 167
168 // If the tag isn't the handle (with wait mask), we're done. 168 // If the tag isn't the handle (with wait mask), we're done.
169 R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask)); 169 R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
170 170
171 // Get the lock owner thread. 171 // Get the lock owner thread.
172 owner_thread = GetCurrentProcess(m_kernel) 172 owner_thread = GetCurrentProcess(kernel)
173 .GetHandleTable() 173 .GetHandleTable()
174 .GetObjectWithoutPseudoHandle<KThread>(handle) 174 .GetObjectWithoutPseudoHandle<KThread>(handle)
175 .ReleasePointerUnsafe(); 175 .ReleasePointerUnsafe();
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h
index 8c2f3ae51..2620c8e39 100644
--- a/src/core/hle/kernel/k_condition_variable.h
+++ b/src/core/hle/kernel/k_condition_variable.h
@@ -24,11 +24,12 @@ public:
24 explicit KConditionVariable(Core::System& system); 24 explicit KConditionVariable(Core::System& system);
25 ~KConditionVariable(); 25 ~KConditionVariable();
26 26
27 // Arbitration 27 // Arbitration.
28 Result SignalToAddress(KProcessAddress addr); 28 static Result SignalToAddress(KernelCore& kernel, KProcessAddress addr);
29 Result WaitForAddress(Handle handle, KProcessAddress addr, u32 value); 29 static Result WaitForAddress(KernelCore& kernel, Handle handle, KProcessAddress addr,
30 u32 value);
30 31
31 // Condition variable 32 // Condition variable.
32 void Signal(u64 cv_key, s32 count); 33 void Signal(u64 cv_key, s32 count);
33 Result Wait(KProcessAddress addr, u64 key, u32 value, s64 timeout); 34 Result Wait(KProcessAddress addr, u64 key, u32 value, s64 timeout);
34 35
diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp
index fe6a20168..22d79569a 100644
--- a/src/core/hle/kernel/k_interrupt_manager.cpp
+++ b/src/core/hle/kernel/k_interrupt_manager.cpp
@@ -22,7 +22,7 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) {
22 KScopedSchedulerLock sl{kernel}; 22 KScopedSchedulerLock sl{kernel};
23 23
24 // Pin the current thread. 24 // Pin the current thread.
25 process->PinCurrentThread(core_id); 25 process->PinCurrentThread();
26 26
27 // Set the interrupt flag for the thread. 27 // Set the interrupt flag for the thread.
28 GetCurrentThread(kernel).SetInterruptFlag(); 28 GetCurrentThread(kernel).SetInterruptFlag();
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp
index 637558e10..cdc5572d8 100644
--- a/src/core/hle/kernel/k_memory_manager.cpp
+++ b/src/core/hle/kernel/k_memory_manager.cpp
@@ -11,6 +11,7 @@
11#include "core/hle/kernel/initial_process.h" 11#include "core/hle/kernel/initial_process.h"
12#include "core/hle/kernel/k_memory_manager.h" 12#include "core/hle/kernel/k_memory_manager.h"
13#include "core/hle/kernel/k_page_group.h" 13#include "core/hle/kernel/k_page_group.h"
14#include "core/hle/kernel/k_page_table.h"
14#include "core/hle/kernel/kernel.h" 15#include "core/hle/kernel/kernel.h"
15#include "core/hle/kernel/svc_results.h" 16#include "core/hle/kernel/svc_results.h"
16 17
@@ -168,11 +169,37 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage
168} 169}
169 170
170Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) { 171Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) {
171 UNREACHABLE(); 172 const u32 pool_index = static_cast<u32>(pool);
173
174 // Lock the pool.
175 KScopedLightLock lk(m_pool_locks[pool_index]);
176
177 // Check that we don't already have an optimized process.
178 R_UNLESS(!m_has_optimized_process[pool_index], ResultBusy);
179
180 // Set the optimized process id.
181 m_optimized_process_ids[pool_index] = process_id;
182 m_has_optimized_process[pool_index] = true;
183
184 // Clear the management area for the optimized process.
185 for (auto* manager = this->GetFirstManager(pool, Direction::FromFront); manager != nullptr;
186 manager = this->GetNextManager(manager, Direction::FromFront)) {
187 manager->InitializeOptimizedMemory(m_system.Kernel());
188 }
189
190 R_SUCCEED();
172} 191}
173 192
174void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) { 193void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) {
175 UNREACHABLE(); 194 const u32 pool_index = static_cast<u32>(pool);
195
196 // Lock the pool.
197 KScopedLightLock lk(m_pool_locks[pool_index]);
198
199 // If the process was optimized, clear it.
200 if (m_has_optimized_process[pool_index] && m_optimized_process_ids[pool_index] == process_id) {
201 m_has_optimized_process[pool_index] = false;
202 }
176} 203}
177 204
178KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, 205KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages,
@@ -207,7 +234,7 @@ KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, siz
207 234
208 // Maintain the optimized memory bitmap, if we should. 235 // Maintain the optimized memory bitmap, if we should.
209 if (m_has_optimized_process[static_cast<size_t>(pool)]) { 236 if (m_has_optimized_process[static_cast<size_t>(pool)]) {
210 UNIMPLEMENTED(); 237 chosen_manager->TrackUnoptimizedAllocation(m_system.Kernel(), allocated_block, num_pages);
211 } 238 }
212 239
213 // Open the first reference to the pages. 240 // Open the first reference to the pages.
@@ -255,7 +282,8 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
255 282
256 // Maintain the optimized memory bitmap, if we should. 283 // Maintain the optimized memory bitmap, if we should.
257 if (unoptimized) { 284 if (unoptimized) {
258 UNIMPLEMENTED(); 285 cur_manager->TrackUnoptimizedAllocation(m_system.Kernel(), allocated_block,
286 pages_per_alloc);
259 } 287 }
260 288
261 num_pages -= pages_per_alloc; 289 num_pages -= pages_per_alloc;
@@ -358,8 +386,8 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
358 // Process part or all of the block. 386 // Process part or all of the block.
359 const size_t cur_pages = 387 const size_t cur_pages =
360 std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); 388 std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
361 any_new = 389 any_new = manager.ProcessOptimizedAllocation(m_system.Kernel(), cur_address,
362 manager.ProcessOptimizedAllocation(cur_address, cur_pages, fill_pattern); 390 cur_pages, fill_pattern);
363 391
364 // Advance. 392 // Advance.
365 cur_address += cur_pages * PageSize; 393 cur_address += cur_pages * PageSize;
@@ -382,7 +410,7 @@ Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32
382 // Track some or all of the current pages. 410 // Track some or all of the current pages.
383 const size_t cur_pages = 411 const size_t cur_pages =
384 std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); 412 std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
385 manager.TrackOptimizedAllocation(cur_address, cur_pages); 413 manager.TrackOptimizedAllocation(m_system.Kernel(), cur_address, cur_pages);
386 414
387 // Advance. 415 // Advance.
388 cur_address += cur_pages * PageSize; 416 cur_address += cur_pages * PageSize;
@@ -427,17 +455,86 @@ size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size,
427 return total_management_size; 455 return total_management_size;
428} 456}
429 457
430void KMemoryManager::Impl::TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages) { 458void KMemoryManager::Impl::InitializeOptimizedMemory(KernelCore& kernel) {
431 UNREACHABLE(); 459 auto optimize_pa =
460 KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
461 auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
462
463 std::memset(optimize_map, 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize()));
432} 464}
433 465
434void KMemoryManager::Impl::TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages) { 466void KMemoryManager::Impl::TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
435 UNREACHABLE(); 467 size_t num_pages) {
468 auto optimize_pa =
469 KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
470 auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
471
472 // Get the range we're tracking.
473 size_t offset = this->GetPageOffset(block);
474 const size_t last = offset + num_pages - 1;
475
476 // Track.
477 while (offset <= last) {
478 // Mark the page as not being optimized-allocated.
479 optimize_map[offset / Common::BitSize<u64>()] &=
480 ~(u64(1) << (offset % Common::BitSize<u64>()));
481
482 offset++;
483 }
484}
485
486void KMemoryManager::Impl::TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
487 size_t num_pages) {
488 auto optimize_pa =
489 KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
490 auto* optimize_map = kernel.System().DeviceMemory().GetPointer<u64>(optimize_pa);
491
492 // Get the range we're tracking.
493 size_t offset = this->GetPageOffset(block);
494 const size_t last = offset + num_pages - 1;
495
496 // Track.
497 while (offset <= last) {
498 // Mark the page as being optimized-allocated.
499 optimize_map[offset / Common::BitSize<u64>()] |=
500 (u64(1) << (offset % Common::BitSize<u64>()));
501
502 offset++;
503 }
436} 504}
437 505
438bool KMemoryManager::Impl::ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages, 506bool KMemoryManager::Impl::ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
439 u8 fill_pattern) { 507 size_t num_pages, u8 fill_pattern) {
440 UNREACHABLE(); 508 auto& device_memory = kernel.System().DeviceMemory();
509 auto optimize_pa =
510 KPageTable::GetHeapPhysicalAddress(kernel.MemoryLayout(), m_management_region);
511 auto* optimize_map = device_memory.GetPointer<u64>(optimize_pa);
512
513 // We want to return whether any pages were newly allocated.
514 bool any_new = false;
515
516 // Get the range we're processing.
517 size_t offset = this->GetPageOffset(block);
518 const size_t last = offset + num_pages - 1;
519
520 // Process.
521 while (offset <= last) {
522 // Check if the page has been optimized-allocated before.
523 if ((optimize_map[offset / Common::BitSize<u64>()] &
524 (u64(1) << (offset % Common::BitSize<u64>()))) == 0) {
525 // If not, it's new.
526 any_new = true;
527
528 // Fill the page.
529 auto* ptr = device_memory.GetPointer<u8>(m_heap.GetAddress());
530 std::memset(ptr + offset * PageSize, fill_pattern, PageSize);
531 }
532
533 offset++;
534 }
535
536 // Return the number of pages we processed.
537 return any_new;
441} 538}
442 539
443size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) { 540size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) {
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h
index 7e4b41319..c5a487af9 100644
--- a/src/core/hle/kernel/k_memory_manager.h
+++ b/src/core/hle/kernel/k_memory_manager.h
@@ -216,14 +216,14 @@ private:
216 m_heap.SetInitialUsedSize(reserved_size); 216 m_heap.SetInitialUsedSize(reserved_size);
217 } 217 }
218 218
219 void InitializeOptimizedMemory() { 219 void InitializeOptimizedMemory(KernelCore& kernel);
220 UNIMPLEMENTED();
221 }
222 220
223 void TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages); 221 void TrackUnoptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
224 void TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages); 222 size_t num_pages);
223 void TrackOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block, size_t num_pages);
225 224
226 bool ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages, u8 fill_pattern); 225 bool ProcessOptimizedAllocation(KernelCore& kernel, KPhysicalAddress block,
226 size_t num_pages, u8 fill_pattern);
227 227
228 constexpr Pool GetPool() const { 228 constexpr Pool GetPool() const {
229 return m_pool; 229 return m_pool;
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 217ccbae3..1d47bdf6b 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -82,14 +82,14 @@ public:
82 82
83using namespace Common::Literals; 83using namespace Common::Literals;
84 84
85constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { 85constexpr size_t GetAddressSpaceWidthFromType(Svc::CreateProcessFlag as_type) {
86 switch (as_type) { 86 switch (as_type) {
87 case FileSys::ProgramAddressSpaceType::Is32Bit: 87 case Svc::CreateProcessFlag::AddressSpace32Bit:
88 case FileSys::ProgramAddressSpaceType::Is32BitNoMap: 88 case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias:
89 return 32; 89 return 32;
90 case FileSys::ProgramAddressSpaceType::Is36Bit: 90 case Svc::CreateProcessFlag::AddressSpace64BitDeprecated:
91 return 36; 91 return 36;
92 case FileSys::ProgramAddressSpaceType::Is39Bit: 92 case Svc::CreateProcessFlag::AddressSpace64Bit:
93 return 39; 93 return 39;
94 default: 94 default:
95 ASSERT(false); 95 ASSERT(false);
@@ -105,7 +105,7 @@ KPageTable::KPageTable(Core::System& system_)
105 105
106KPageTable::~KPageTable() = default; 106KPageTable::~KPageTable() = default;
107 107
108Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, 108Result KPageTable::InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
109 bool enable_das_merge, bool from_back, 109 bool enable_das_merge, bool from_back,
110 KMemoryManager::Pool pool, KProcessAddress code_addr, 110 KMemoryManager::Pool pool, KProcessAddress code_addr,
111 size_t code_size, KSystemResource* system_resource, 111 size_t code_size, KSystemResource* system_resource,
@@ -133,7 +133,7 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
133 ASSERT(code_addr + code_size - 1 <= end - 1); 133 ASSERT(code_addr + code_size - 1 <= end - 1);
134 134
135 // Adjust heap/alias size if we don't have an alias region 135 // Adjust heap/alias size if we don't have an alias region
136 if (as_type == FileSys::ProgramAddressSpaceType::Is32BitNoMap) { 136 if (as_type == Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias) {
137 heap_region_size += alias_region_size; 137 heap_region_size += alias_region_size;
138 alias_region_size = 0; 138 alias_region_size = 0;
139 } 139 }
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 3d64b6fb0..66f16faaf 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -63,7 +63,7 @@ public:
63 explicit KPageTable(Core::System& system_); 63 explicit KPageTable(Core::System& system_);
64 ~KPageTable(); 64 ~KPageTable();
65 65
66 Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, 66 Result InitializeForProcess(Svc::CreateProcessFlag as_type, bool enable_aslr,
67 bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, 67 bool enable_das_merge, bool from_back, KMemoryManager::Pool pool,
68 KProcessAddress code_addr, size_t code_size, 68 KProcessAddress code_addr, size_t code_size,
69 KSystemResource* system_resource, KResourceLimit* resource_limit, 69 KSystemResource* system_resource, KResourceLimit* resource_limit,
@@ -400,7 +400,7 @@ public:
400 constexpr size_t GetAliasCodeRegionSize() const { 400 constexpr size_t GetAliasCodeRegionSize() const {
401 return m_alias_code_region_end - m_alias_code_region_start; 401 return m_alias_code_region_end - m_alias_code_region_start;
402 } 402 }
403 size_t GetNormalMemorySize() { 403 size_t GetNormalMemorySize() const {
404 KScopedLightLock lk(m_general_lock); 404 KScopedLightLock lk(m_general_lock);
405 return GetHeapSize() + m_mapped_physical_memory_size; 405 return GetHeapSize() + m_mapped_physical_memory_size;
406 } 406 }
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 7fa34d693..1f4b0755d 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -1,515 +1,598 @@
1// SPDX-FileCopyrightText: 2015 Citra Emulator Project 1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include <algorithm>
5#include <bitset>
6#include <ctime>
7#include <memory>
8#include <random> 4#include <random>
9#include "common/alignment.h"
10#include "common/assert.h"
11#include "common/logging/log.h"
12#include "common/scope_exit.h" 5#include "common/scope_exit.h"
13#include "common/settings.h" 6#include "common/settings.h"
14#include "core/core.h" 7#include "core/core.h"
15#include "core/file_sys/program_metadata.h"
16#include "core/hle/kernel/code_set.h"
17#include "core/hle/kernel/k_memory_block_manager.h"
18#include "core/hle/kernel/k_page_table.h"
19#include "core/hle/kernel/k_process.h" 8#include "core/hle/kernel/k_process.h"
20#include "core/hle/kernel/k_resource_limit.h"
21#include "core/hle/kernel/k_scheduler.h"
22#include "core/hle/kernel/k_scoped_resource_reservation.h" 9#include "core/hle/kernel/k_scoped_resource_reservation.h"
23#include "core/hle/kernel/k_shared_memory.h" 10#include "core/hle/kernel/k_shared_memory.h"
24#include "core/hle/kernel/k_shared_memory_info.h" 11#include "core/hle/kernel/k_shared_memory_info.h"
25#include "core/hle/kernel/k_thread.h" 12#include "core/hle/kernel/k_thread_local_page.h"
26#include "core/hle/kernel/kernel.h" 13#include "core/hle/kernel/k_thread_queue.h"
27#include "core/hle/kernel/svc_results.h" 14#include "core/hle/kernel/k_worker_task_manager.h"
28#include "core/memory.h"
29 15
30namespace Kernel { 16namespace Kernel {
31namespace {
32/**
33 * Sets up the primary application thread
34 *
35 * @param system The system instance to create the main thread under.
36 * @param owner_process The parent process for the main thread
37 * @param priority The priority to give the main thread
38 */
39void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority,
40 KProcessAddress stack_top) {
41 const KProcessAddress entry_point = owner_process.GetEntryPoint();
42 ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::ThreadCountMax, 1));
43
44 KThread* thread = KThread::Create(system.Kernel());
45 SCOPE_EXIT({ thread->Close(); });
46
47 ASSERT(KThread::InitializeUserThread(system, thread, entry_point, 0, stack_top, priority,
48 owner_process.GetIdealCoreId(),
49 std::addressof(owner_process))
50 .IsSuccess());
51
52 // Register 1 must be a handle to the main thread
53 Handle thread_handle{};
54 owner_process.GetHandleTable().Add(std::addressof(thread_handle), thread);
55
56 thread->GetContext32().cpu_registers[0] = 0;
57 thread->GetContext64().cpu_registers[0] = 0;
58 thread->GetContext32().cpu_registers[1] = thread_handle;
59 thread->GetContext64().cpu_registers[1] = thread_handle;
60
61 if (system.DebuggerEnabled()) {
62 thread->RequestSuspend(SuspendType::Debug);
63 }
64 17
65 // Run our thread. 18namespace {
66 void(thread->Run());
67}
68} // Anonymous namespace
69 19
70Result KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name, 20Result TerminateChildren(KernelCore& kernel, KProcess* process,
71 ProcessType type, KResourceLimit* res_limit) { 21 const KThread* thread_to_not_terminate) {
72 auto& kernel = system.Kernel(); 22 // Request that all children threads terminate.
23 {
24 KScopedLightLock proc_lk(process->GetListLock());
25 KScopedSchedulerLock sl(kernel);
26
27 if (thread_to_not_terminate != nullptr &&
28 process->GetPinnedThread(GetCurrentCoreId(kernel)) == thread_to_not_terminate) {
29 // NOTE: Here Nintendo unpins the current thread instead of the thread_to_not_terminate.
30 // This is valid because the only caller which uses non-nullptr as argument uses
31 // GetCurrentThreadPointer(), but it's still notable because it seems incorrect at
32 // first glance.
33 process->UnpinCurrentThread();
34 }
73 35
74 process->name = std::move(process_name); 36 auto& thread_list = process->GetThreadList();
75 process->m_resource_limit = res_limit; 37 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
76 process->m_system_resource_address = 0; 38 if (KThread* thread = std::addressof(*it); thread != thread_to_not_terminate) {
77 process->m_state = State::Created; 39 if (thread->GetState() != ThreadState::Terminated) {
78 process->m_program_id = 0; 40 thread->RequestTerminate();
79 process->m_process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() 41 }
80 : kernel.CreateNewUserProcessID(); 42 }
81 process->m_capabilities.InitializeForMetadatalessProcess(); 43 }
82 process->m_is_initialized = true; 44 }
83 45
84 std::mt19937 rng(Settings::values.rng_seed_enabled ? Settings::values.rng_seed.GetValue() 46 // Wait for all children threads to terminate.
85 : static_cast<u32>(std::time(nullptr))); 47 while (true) {
86 std::uniform_int_distribution<u64> distribution; 48 // Get the next child.
87 std::generate(process->m_random_entropy.begin(), process->m_random_entropy.end(), 49 KThread* cur_child = nullptr;
88 [&] { return distribution(rng); }); 50 {
51 KScopedLightLock proc_lk(process->GetListLock());
52
53 auto& thread_list = process->GetThreadList();
54 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
55 if (KThread* thread = std::addressof(*it); thread != thread_to_not_terminate) {
56 if (thread->GetState() != ThreadState::Terminated) {
57 if (thread->Open()) {
58 cur_child = thread;
59 break;
60 }
61 }
62 }
63 }
64 }
89 65
90 kernel.AppendNewProcess(process); 66 // If we didn't find any non-terminated children, we're done.
67 if (cur_child == nullptr) {
68 break;
69 }
91 70
92 // Clear remaining fields. 71 // Terminate and close the thread.
93 process->m_num_running_threads = 0; 72 SCOPE_EXIT({ cur_child->Close(); });
94 process->m_is_signaled = false;
95 process->m_exception_thread = nullptr;
96 process->m_is_suspended = false;
97 process->m_schedule_count = 0;
98 process->m_is_handle_table_initialized = false;
99 process->m_is_hbl = false;
100 73
101 // Open a reference to the resource limit. 74 if (const Result terminate_result = cur_child->Terminate();
102 process->m_resource_limit->Open(); 75 ResultTerminationRequested == terminate_result) {
76 R_THROW(terminate_result);
77 }
78 }
103 79
104 R_SUCCEED(); 80 R_SUCCEED();
105} 81}
106 82
107void KProcess::DoWorkerTaskImpl() { 83class ThreadQueueImplForKProcessEnterUserException final : public KThreadQueue {
108 UNIMPLEMENTED(); 84private:
109} 85 KThread** m_exception_thread;
110
111KResourceLimit* KProcess::GetResourceLimit() const {
112 return m_resource_limit;
113}
114 86
115void KProcess::IncrementRunningThreadCount() { 87public:
116 ASSERT(m_num_running_threads.load() >= 0); 88 explicit ThreadQueueImplForKProcessEnterUserException(KernelCore& kernel, KThread** t)
117 ++m_num_running_threads; 89 : KThreadQueue(kernel), m_exception_thread(t) {}
118}
119 90
120void KProcess::DecrementRunningThreadCount() { 91 virtual void EndWait(KThread* waiting_thread, Result wait_result) override {
121 ASSERT(m_num_running_threads.load() > 0); 92 // Set the exception thread.
93 *m_exception_thread = waiting_thread;
122 94
123 if (const auto prev = m_num_running_threads--; prev == 1) { 95 // Invoke the base end wait handler.
124 // TODO(bunnei): Process termination to be implemented when multiprocess is supported. 96 KThreadQueue::EndWait(waiting_thread, wait_result);
125 } 97 }
126}
127 98
128u64 KProcess::GetTotalPhysicalMemoryAvailable() { 99 virtual void CancelWait(KThread* waiting_thread, Result wait_result,
129 const u64 capacity{m_resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) + 100 bool cancel_timer_task) override {
130 m_page_table.GetNormalMemorySize() + GetSystemResourceSize() + m_image_size + 101 // Remove the thread as a waiter on its mutex owner.
131 m_main_thread_stack_size}; 102 waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
132 if (const auto pool_size = m_kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); 103
133 capacity != pool_size) { 104 // Invoke the base cancel wait handler.
134 LOG_WARNING(Kernel, "capacity {} != application pool size {}", capacity, pool_size); 105 KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
135 }
136 if (capacity < m_memory_usage_capacity) {
137 return capacity;
138 } 106 }
139 return m_memory_usage_capacity; 107};
140}
141 108
142u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() { 109void GenerateRandom(std::span<u64> out_random) {
143 return this->GetTotalPhysicalMemoryAvailable() - this->GetSystemResourceSize(); 110 std::mt19937 rng(Settings::values.rng_seed_enabled ? Settings::values.rng_seed.GetValue()
111 : static_cast<u32>(std::time(nullptr)));
112 std::uniform_int_distribution<u64> distribution;
113 std::generate(out_random.begin(), out_random.end(), [&] { return distribution(rng); });
144} 114}
145 115
146u64 KProcess::GetTotalPhysicalMemoryUsed() { 116} // namespace
147 return m_image_size + m_main_thread_stack_size + m_page_table.GetNormalMemorySize() +
148 this->GetSystemResourceSize();
149}
150 117
151u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() { 118void KProcess::Finalize() {
152 return this->GetTotalPhysicalMemoryUsed() - this->GetSystemResourceSize(); 119 // Delete the process local region.
153} 120 this->DeleteThreadLocalRegion(m_plr_address);
154 121
155bool KProcess::ReleaseUserException(KThread* thread) { 122 // Get the used memory size.
156 KScopedSchedulerLock sl{m_kernel}; 123 const size_t used_memory_size = this->GetUsedNonSystemUserPhysicalMemorySize();
157 124
158 if (m_exception_thread == thread) { 125 // Finalize the page table.
159 m_exception_thread = nullptr; 126 m_page_table.Finalize();
160 127
161 // Remove waiter thread. 128 // Finish using our system resource.
162 bool has_waiters{}; 129 if (m_system_resource) {
163 if (KThread* next = thread->RemoveKernelWaiterByKey( 130 if (m_system_resource->IsSecureResource()) {
164 std::addressof(has_waiters), 131 // Finalize optimized memory. If memory wasn't optimized, this is a no-op.
165 reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread))); 132 m_kernel.MemoryManager().FinalizeOptimizedMemory(this->GetId(), m_memory_pool);
166 next != nullptr) {
167 next->EndWait(ResultSuccess);
168 } 133 }
169 134
170 KScheduler::SetSchedulerUpdateNeeded(m_kernel); 135 m_system_resource->Close();
171 136 m_system_resource = nullptr;
172 return true;
173 } else {
174 return false;
175 } 137 }
176}
177
178void KProcess::PinCurrentThread(s32 core_id) {
179 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
180 138
181 // Get the current thread. 139 // Free all shared memory infos.
182 KThread* cur_thread = 140 {
183 m_kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread(); 141 auto it = m_shared_memory_list.begin();
142 while (it != m_shared_memory_list.end()) {
143 KSharedMemoryInfo* info = std::addressof(*it);
144 KSharedMemory* shmem = info->GetSharedMemory();
184 145
185 // If the thread isn't terminated, pin it. 146 while (!info->Close()) {
186 if (!cur_thread->IsTerminationRequested()) { 147 shmem->Close();
187 // Pin it. 148 }
188 this->PinThread(core_id, cur_thread); 149 shmem->Close();
189 cur_thread->Pin(core_id);
190 150
191 // An update is needed. 151 it = m_shared_memory_list.erase(it);
192 KScheduler::SetSchedulerUpdateNeeded(m_kernel); 152 KSharedMemoryInfo::Free(m_kernel, info);
153 }
193 } 154 }
194}
195 155
196void KProcess::UnpinCurrentThread(s32 core_id) { 156 // Our thread local page list must be empty at this point.
197 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); 157 ASSERT(m_partially_used_tlp_tree.empty());
198 158 ASSERT(m_fully_used_tlp_tree.empty());
199 // Get the current thread.
200 KThread* cur_thread =
201 m_kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
202 159
203 // Unpin it. 160 // Release memory to the resource limit.
204 cur_thread->Unpin(); 161 if (m_resource_limit != nullptr) {
205 this->UnpinThread(core_id, cur_thread); 162 ASSERT(used_memory_size >= m_memory_release_hint);
163 m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, used_memory_size,
164 used_memory_size - m_memory_release_hint);
165 m_resource_limit->Close();
166 }
206 167
207 // An update is needed. 168 // Perform inherited finalization.
208 KScheduler::SetSchedulerUpdateNeeded(m_kernel); 169 KSynchronizationObject::Finalize();
209} 170}
210 171
211void KProcess::UnpinThread(KThread* thread) { 172Result KProcess::Initialize(const Svc::CreateProcessParameter& params, KResourceLimit* res_limit,
212 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); 173 bool is_real) {
213 174 // TODO: remove this special case
214 // Get the thread's core id. 175 if (is_real) {
215 const auto core_id = thread->GetActiveCore(); 176 // Create and clear the process local region.
177 R_TRY(this->CreateThreadLocalRegion(std::addressof(m_plr_address)));
178 this->GetMemory().ZeroBlock(m_plr_address, Svc::ThreadLocalRegionSize);
179 }
216 180
217 // Unpin it. 181 // Copy in the name from parameters.
218 this->UnpinThread(core_id, thread); 182 static_assert(sizeof(params.name) < sizeof(m_name));
219 thread->Unpin(); 183 std::memcpy(m_name.data(), params.name.data(), sizeof(params.name));
184 m_name[sizeof(params.name)] = 0;
185
186 // Set misc fields.
187 m_state = State::Created;
188 m_main_thread_stack_size = 0;
189 m_used_kernel_memory_size = 0;
190 m_ideal_core_id = 0;
191 m_flags = params.flags;
192 m_version = params.version;
193 m_program_id = params.program_id;
194 m_code_address = params.code_address;
195 m_code_size = params.code_num_pages * PageSize;
196 m_is_application = True(params.flags & Svc::CreateProcessFlag::IsApplication);
197
198 // Set thread fields.
199 for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
200 m_running_threads[i] = nullptr;
201 m_pinned_threads[i] = nullptr;
202 m_running_thread_idle_counts[i] = 0;
203 m_running_thread_switch_counts[i] = 0;
204 }
220 205
221 // An update is needed. 206 // Set max memory based on address space type.
222 KScheduler::SetSchedulerUpdateNeeded(m_kernel); 207 switch ((params.flags & Svc::CreateProcessFlag::AddressSpaceMask)) {
223} 208 case Svc::CreateProcessFlag::AddressSpace32Bit:
209 case Svc::CreateProcessFlag::AddressSpace64BitDeprecated:
210 case Svc::CreateProcessFlag::AddressSpace64Bit:
211 m_max_process_memory = m_page_table.GetHeapRegionSize();
212 break;
213 case Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias:
214 m_max_process_memory = m_page_table.GetHeapRegionSize() + m_page_table.GetAliasRegionSize();
215 break;
216 default:
217 UNREACHABLE();
218 }
224 219
225Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] KProcessAddress address, 220 // Generate random entropy.
226 [[maybe_unused]] size_t size) { 221 GenerateRandom(m_entropy);
227 // Lock ourselves, to prevent concurrent access.
228 KScopedLightLock lk(m_state_lock);
229 222
230 // Try to find an existing info for the memory. 223 // Clear remaining fields.
231 KSharedMemoryInfo* shemen_info = nullptr; 224 m_num_running_threads = 0;
232 const auto iter = std::find_if( 225 m_num_process_switches = 0;
233 m_shared_memory_list.begin(), m_shared_memory_list.end(), 226 m_num_thread_switches = 0;
234 [shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; }); 227 m_num_fpu_switches = 0;
235 if (iter != m_shared_memory_list.end()) { 228 m_num_supervisor_calls = 0;
236 shemen_info = *iter; 229 m_num_ipc_messages = 0;
237 }
238 230
239 if (shemen_info == nullptr) { 231 m_is_signaled = false;
240 shemen_info = KSharedMemoryInfo::Allocate(m_kernel); 232 m_exception_thread = nullptr;
241 R_UNLESS(shemen_info != nullptr, ResultOutOfMemory); 233 m_is_suspended = false;
234 m_memory_release_hint = 0;
235 m_schedule_count = 0;
236 m_is_handle_table_initialized = false;
242 237
243 shemen_info->Initialize(shmem); 238 // Open a reference to our resource limit.
244 m_shared_memory_list.push_back(shemen_info); 239 m_resource_limit = res_limit;
245 } 240 m_resource_limit->Open();
246 241
247 // Open a reference to the shared memory and its info. 242 // We're initialized!
248 shmem->Open(); 243 m_is_initialized = true;
249 shemen_info->Open();
250 244
251 R_SUCCEED(); 245 R_SUCCEED();
252} 246}
253 247
254void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] KProcessAddress address, 248Result KProcess::Initialize(const Svc::CreateProcessParameter& params, const KPageGroup& pg,
255 [[maybe_unused]] size_t size) { 249 std::span<const u32> caps, KResourceLimit* res_limit,
256 // Lock ourselves, to prevent concurrent access. 250 KMemoryManager::Pool pool, bool immortal) {
257 KScopedLightLock lk(m_state_lock); 251 ASSERT(res_limit != nullptr);
252 ASSERT((params.code_num_pages * PageSize) / PageSize ==
253 static_cast<size_t>(params.code_num_pages));
254
255 // Set members.
256 m_memory_pool = pool;
257 m_is_default_application_system_resource = false;
258 m_is_immortal = immortal;
259
260 // Setup our system resource.
261 if (const size_t system_resource_num_pages = params.system_resource_num_pages;
262 system_resource_num_pages != 0) {
263 // Create a secure system resource.
264 KSecureSystemResource* secure_resource = KSecureSystemResource::Create(m_kernel);
265 R_UNLESS(secure_resource != nullptr, ResultOutOfResource);
266
267 ON_RESULT_FAILURE {
268 secure_resource->Close();
269 };
270
271 // Initialize the secure resource.
272 R_TRY(secure_resource->Initialize(system_resource_num_pages * PageSize, res_limit,
273 m_memory_pool));
274
275 // Set our system resource.
276 m_system_resource = secure_resource;
277 } else {
278 // Use the system-wide system resource.
279 const bool is_app = True(params.flags & Svc::CreateProcessFlag::IsApplication);
280 m_system_resource = std::addressof(is_app ? m_kernel.GetAppSystemResource()
281 : m_kernel.GetSystemSystemResource());
258 282
259 KSharedMemoryInfo* shemen_info = nullptr; 283 m_is_default_application_system_resource = is_app;
260 const auto iter = std::find_if( 284
261 m_shared_memory_list.begin(), m_shared_memory_list.end(), 285 // Open reference to the system resource.
262 [shmem](const KSharedMemoryInfo* info) { return info->GetSharedMemory() == shmem; }); 286 m_system_resource->Open();
263 if (iter != m_shared_memory_list.end()) {
264 shemen_info = *iter;
265 } 287 }
266 288
267 ASSERT(shemen_info != nullptr); 289 // Ensure we clean up our secure resource, if we fail.
290 ON_RESULT_FAILURE {
291 m_system_resource->Close();
292 m_system_resource = nullptr;
293 };
268 294
269 if (shemen_info->Close()) { 295 // Setup page table.
270 m_shared_memory_list.erase(iter); 296 {
271 KSharedMemoryInfo::Free(m_kernel, shemen_info); 297 const auto as_type = params.flags & Svc::CreateProcessFlag::AddressSpaceMask;
298 const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr);
299 const bool enable_das_merge =
300 False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
301 R_TRY(m_page_table.InitializeForProcess(
302 as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address,
303 params.code_num_pages * PageSize, m_system_resource, res_limit, this->GetMemory()));
272 } 304 }
305 ON_RESULT_FAILURE_2 {
306 m_page_table.Finalize();
307 };
273 308
274 // Close a reference to the shared memory. 309 // Ensure we can insert the code region.
275 shmem->Close(); 310 R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize,
276} 311 KMemoryState::Code),
312 ResultInvalidMemoryRegion);
277 313
278void KProcess::RegisterThread(KThread* thread) { 314 // Map the code region.
279 KScopedLightLock lk{m_list_lock}; 315 R_TRY(m_page_table.MapPageGroup(params.code_address, pg, KMemoryState::Code,
316 KMemoryPermission::KernelRead));
280 317
281 m_thread_list.push_back(thread); 318 // Initialize capabilities.
282} 319 R_TRY(m_capabilities.InitializeForKip(caps, std::addressof(m_page_table)));
283 320
284void KProcess::UnregisterThread(KThread* thread) { 321 // Initialize the process id.
285 KScopedLightLock lk{m_list_lock}; 322 m_process_id = m_kernel.CreateNewUserProcessID();
323 ASSERT(InitialProcessIdMin <= m_process_id);
324 ASSERT(m_process_id <= InitialProcessIdMax);
286 325
287 m_thread_list.remove(thread); 326 // Initialize the rest of the process.
288} 327 R_TRY(this->Initialize(params, res_limit, true));
289 328
290u64 KProcess::GetFreeThreadCount() const { 329 // We succeeded!
291 if (m_resource_limit != nullptr) { 330 R_SUCCEED();
292 const auto current_value =
293 m_resource_limit->GetCurrentValue(LimitableResource::ThreadCountMax);
294 const auto limit_value = m_resource_limit->GetLimitValue(LimitableResource::ThreadCountMax);
295 return limit_value - current_value;
296 } else {
297 return 0;
298 }
299} 331}
300 332
301Result KProcess::Reset() { 333Result KProcess::Initialize(const Svc::CreateProcessParameter& params,
302 // Lock the process and the scheduler. 334 std::span<const u32> user_caps, KResourceLimit* res_limit,
303 KScopedLightLock lk(m_state_lock); 335 KMemoryManager::Pool pool) {
304 KScopedSchedulerLock sl{m_kernel}; 336 ASSERT(res_limit != nullptr);
305 337
306 // Validate that we're in a state that we can reset. 338 // Set members.
307 R_UNLESS(m_state != State::Terminated, ResultInvalidState); 339 m_memory_pool = pool;
308 R_UNLESS(m_is_signaled, ResultInvalidState); 340 m_is_default_application_system_resource = false;
341 m_is_immortal = false;
309 342
310 // Clear signaled. 343 // Get the memory sizes.
311 m_is_signaled = false; 344 const size_t code_num_pages = params.code_num_pages;
312 R_SUCCEED(); 345 const size_t system_resource_num_pages = params.system_resource_num_pages;
313} 346 const size_t code_size = code_num_pages * PageSize;
347 const size_t system_resource_size = system_resource_num_pages * PageSize;
314 348
315Result KProcess::SetActivity(ProcessActivity activity) { 349 // Reserve memory for our code resource.
316 // Lock ourselves and the scheduler. 350 KScopedResourceReservation memory_reservation(
317 KScopedLightLock lk{m_state_lock}; 351 res_limit, Svc::LimitableResource::PhysicalMemoryMax, code_size);
318 KScopedLightLock list_lk{m_list_lock}; 352 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
319 KScopedSchedulerLock sl{m_kernel};
320 353
321 // Validate our state. 354 // Setup our system resource.
322 R_UNLESS(m_state != State::Terminating, ResultInvalidState); 355 if (system_resource_num_pages != 0) {
323 R_UNLESS(m_state != State::Terminated, ResultInvalidState); 356 // Create a secure system resource.
357 KSecureSystemResource* secure_resource = KSecureSystemResource::Create(m_kernel);
358 R_UNLESS(secure_resource != nullptr, ResultOutOfResource);
324 359
325 // Either pause or resume. 360 ON_RESULT_FAILURE {
326 if (activity == ProcessActivity::Paused) { 361 secure_resource->Close();
327 // Verify that we're not suspended. 362 };
328 R_UNLESS(!m_is_suspended, ResultInvalidState);
329 363
330 // Suspend all threads. 364 // Initialize the secure resource.
331 for (auto* thread : this->GetThreadList()) { 365 R_TRY(secure_resource->Initialize(system_resource_size, res_limit, m_memory_pool));
332 thread->RequestSuspend(SuspendType::Process); 366
333 } 367 // Set our system resource.
368 m_system_resource = secure_resource;
334 369
335 // Set ourselves as suspended.
336 this->SetSuspended(true);
337 } else { 370 } else {
338 ASSERT(activity == ProcessActivity::Runnable); 371 // Use the system-wide system resource.
372 const bool is_app = True(params.flags & Svc::CreateProcessFlag::IsApplication);
373 m_system_resource = std::addressof(is_app ? m_kernel.GetAppSystemResource()
374 : m_kernel.GetSystemSystemResource());
339 375
340 // Verify that we're suspended. 376 m_is_default_application_system_resource = is_app;
341 R_UNLESS(m_is_suspended, ResultInvalidState);
342 377
343 // Resume all threads. 378 // Open reference to the system resource.
344 for (auto* thread : this->GetThreadList()) { 379 m_system_resource->Open();
345 thread->Resume(SuspendType::Process); 380 }
346 }
347 381
348 // Set ourselves as resumed. 382 // Ensure we clean up our secure resource, if we fail.
349 this->SetSuspended(false); 383 ON_RESULT_FAILURE {
384 m_system_resource->Close();
385 m_system_resource = nullptr;
386 };
387
388 // Setup page table.
389 {
390 const auto as_type = params.flags & Svc::CreateProcessFlag::AddressSpaceMask;
391 const bool enable_aslr = True(params.flags & Svc::CreateProcessFlag::EnableAslr);
392 const bool enable_das_merge =
393 False(params.flags & Svc::CreateProcessFlag::DisableDeviceAddressSpaceMerge);
394 R_TRY(m_page_table.InitializeForProcess(as_type, enable_aslr, enable_das_merge,
395 !enable_aslr, pool, params.code_address, code_size,
396 m_system_resource, res_limit, this->GetMemory()));
397 }
398 ON_RESULT_FAILURE_2 {
399 m_page_table.Finalize();
400 };
401
402 // Ensure we can insert the code region.
403 R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState::Code),
404 ResultInvalidMemoryRegion);
405
406 // Map the code region.
407 R_TRY(m_page_table.MapPages(params.code_address, code_num_pages, KMemoryState::Code,
408 KMemoryPermission::KernelRead | KMemoryPermission::NotMapped));
409
410 // Initialize capabilities.
411 R_TRY(m_capabilities.InitializeForUser(user_caps, std::addressof(m_page_table)));
412
413 // Initialize the process id.
414 m_process_id = m_kernel.CreateNewUserProcessID();
415 ASSERT(ProcessIdMin <= m_process_id);
416 ASSERT(m_process_id <= ProcessIdMax);
417
418 // If we should optimize memory allocations, do so.
419 if (m_system_resource->IsSecureResource() &&
420 True(params.flags & Svc::CreateProcessFlag::OptimizeMemoryAllocation)) {
421 R_TRY(m_kernel.MemoryManager().InitializeOptimizedMemory(m_process_id, pool));
350 } 422 }
351 423
424 // Initialize the rest of the process.
425 R_TRY(this->Initialize(params, res_limit, true));
426
427 // We succeeded, so commit our memory reservation.
428 memory_reservation.Commit();
352 R_SUCCEED(); 429 R_SUCCEED();
353} 430}
354 431
355Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size, 432void KProcess::DoWorkerTaskImpl() {
356 bool is_hbl) { 433 // Terminate child threads.
357 m_program_id = metadata.GetTitleID(); 434 TerminateChildren(m_kernel, this, nullptr);
358 m_ideal_core = metadata.GetMainThreadCore();
359 m_is_64bit_process = metadata.Is64BitProgram();
360 m_system_resource_size = metadata.GetSystemResourceSize();
361 m_image_size = code_size;
362 m_is_hbl = is_hbl;
363 435
364 if (metadata.GetAddressSpaceType() == FileSys::ProgramAddressSpaceType::Is39Bit) { 436 // Finalize the handle table, if we're not immortal.
365 // For 39-bit processes, the ASLR region starts at 0x800'0000 and is ~512GiB large. 437 if (!m_is_immortal && m_is_handle_table_initialized) {
366 // However, some (buggy) programs/libraries like skyline incorrectly depend on the 438 this->FinalizeHandleTable();
367 // existence of ASLR pages before the entry point, so we will adjust the load address
368 // to point to about 2GiB into the ASLR region.
369 m_code_address = 0x8000'0000;
370 } else {
371 // All other processes can be mapped at the beginning of the code region.
372 if (metadata.GetAddressSpaceType() == FileSys::ProgramAddressSpaceType::Is36Bit) {
373 m_code_address = 0x800'0000;
374 } else {
375 m_code_address = 0x20'0000;
376 }
377 } 439 }
378 440
379 KScopedResourceReservation memory_reservation( 441 // Finish termination.
380 m_resource_limit, LimitableResource::PhysicalMemoryMax, code_size + m_system_resource_size); 442 this->FinishTermination();
381 if (!memory_reservation.Succeeded()) { 443}
382 LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
383 code_size + m_system_resource_size);
384 R_RETURN(ResultLimitReached);
385 }
386 // Initialize process address space
387 if (const Result result{m_page_table.InitializeForProcess(
388 metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application,
389 this->GetEntryPoint(), code_size, std::addressof(m_kernel.GetAppSystemResource()),
390 m_resource_limit, m_kernel.System().ApplicationMemory())};
391 result.IsError()) {
392 R_RETURN(result);
393 }
394
395 // Map process code region
396 if (const Result result{m_page_table.MapProcessCode(this->GetEntryPoint(), code_size / PageSize,
397 KMemoryState::Code,
398 KMemoryPermission::None)};
399 result.IsError()) {
400 R_RETURN(result);
401 }
402
403 // Initialize process capabilities
404 const auto& caps{metadata.GetKernelCapabilities()};
405 if (const Result result{
406 m_capabilities.InitializeForUserProcess(caps.data(), caps.size(), m_page_table)};
407 result.IsError()) {
408 R_RETURN(result);
409 }
410
411 // Set memory usage capacity
412 switch (metadata.GetAddressSpaceType()) {
413 case FileSys::ProgramAddressSpaceType::Is32Bit:
414 case FileSys::ProgramAddressSpaceType::Is36Bit:
415 case FileSys::ProgramAddressSpaceType::Is39Bit:
416 m_memory_usage_capacity =
417 m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart();
418 break;
419 444
420 case FileSys::ProgramAddressSpaceType::Is32BitNoMap: 445Result KProcess::StartTermination() {
421 m_memory_usage_capacity = 446 // Finalize the handle table when we're done, if the process isn't immortal.
422 (m_page_table.GetHeapRegionEnd() - m_page_table.GetHeapRegionStart()) + 447 SCOPE_EXIT({
423 (m_page_table.GetAliasRegionEnd() - m_page_table.GetAliasRegionStart()); 448 if (!m_is_immortal) {
424 break; 449 this->FinalizeHandleTable();
450 }
451 });
425 452
426 default: 453 // Terminate child threads other than the current one.
427 ASSERT(false); 454 R_RETURN(TerminateChildren(m_kernel, this, GetCurrentThreadPointer(m_kernel)));
428 break; 455}
429 }
430 456
431 // Create TLS region 457void KProcess::FinishTermination() {
432 R_TRY(this->CreateThreadLocalRegion(std::addressof(m_plr_address))); 458 // Only allow termination to occur if the process isn't immortal.
433 memory_reservation.Commit(); 459 if (!m_is_immortal) {
460 // Release resource limit hint.
461 if (m_resource_limit != nullptr) {
462 m_memory_release_hint = this->GetUsedNonSystemUserPhysicalMemorySize();
463 m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax, 0,
464 m_memory_release_hint);
465 }
466
467 // Change state.
468 {
469 KScopedSchedulerLock sl(m_kernel);
470 this->ChangeState(State::Terminated);
471 }
434 472
435 R_RETURN(m_handle_table.Initialize(m_capabilities.GetHandleTableSize())); 473 // Close.
474 this->Close();
475 }
436} 476}
437 477
438void KProcess::Run(s32 main_thread_priority, u64 stack_size) { 478void KProcess::Exit() {
439 ASSERT(this->AllocateMainThreadStack(stack_size) == ResultSuccess); 479 // Determine whether we need to start terminating
440 m_resource_limit->Reserve(LimitableResource::ThreadCountMax, 1); 480 bool needs_terminate = false;
481 {
482 KScopedLightLock lk(m_state_lock);
483 KScopedSchedulerLock sl(m_kernel);
484
485 ASSERT(m_state != State::Created);
486 ASSERT(m_state != State::CreatedAttached);
487 ASSERT(m_state != State::Crashed);
488 ASSERT(m_state != State::Terminated);
489 if (m_state == State::Running || m_state == State::RunningAttached ||
490 m_state == State::DebugBreak) {
491 this->ChangeState(State::Terminating);
492 needs_terminate = true;
493 }
494 }
441 495
442 const std::size_t heap_capacity{m_memory_usage_capacity - 496 // If we need to start termination, do so.
443 (m_main_thread_stack_size + m_image_size)}; 497 if (needs_terminate) {
444 ASSERT(!m_page_table.SetMaxHeapSize(heap_capacity).IsError()); 498 this->StartTermination();
445 499
446 this->ChangeState(State::Running); 500 // Register the process as a work task.
501 m_kernel.WorkerTaskManager().AddTask(m_kernel, KWorkerTaskManager::WorkerType::Exit, this);
502 }
447 503
448 SetupMainThread(m_kernel.System(), *this, main_thread_priority, m_main_thread_stack_top); 504 // Exit the current thread.
505 GetCurrentThread(m_kernel).Exit();
449} 506}
450 507
451void KProcess::PrepareForTermination() { 508Result KProcess::Terminate() {
452 this->ChangeState(State::Terminating); 509 // Determine whether we need to start terminating.
510 bool needs_terminate = false;
511 {
512 KScopedLightLock lk(m_state_lock);
453 513
454 const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) { 514 // Check whether we're allowed to terminate.
455 for (auto* thread : in_thread_list) { 515 R_UNLESS(m_state != State::Created, ResultInvalidState);
456 if (thread->GetOwnerProcess() != this) 516 R_UNLESS(m_state != State::CreatedAttached, ResultInvalidState);
457 continue;
458 517
459 if (thread == GetCurrentThreadPointer(m_kernel)) 518 KScopedSchedulerLock sl(m_kernel);
460 continue;
461 519
462 // TODO(Subv): When are the other running/ready threads terminated? 520 if (m_state == State::Running || m_state == State::RunningAttached ||
463 ASSERT_MSG(thread->GetState() == ThreadState::Waiting, 521 m_state == State::Crashed || m_state == State::DebugBreak) {
464 "Exiting processes with non-waiting threads is currently unimplemented"); 522 this->ChangeState(State::Terminating);
523 needs_terminate = true;
524 }
525 }
465 526
466 thread->Exit(); 527 // If we need to terminate, do so.
528 if (needs_terminate) {
529 // Start termination.
530 if (R_SUCCEEDED(this->StartTermination())) {
531 // Finish termination.
532 this->FinishTermination();
533 } else {
534 // Register the process as a work task.
535 m_kernel.WorkerTaskManager().AddTask(m_kernel, KWorkerTaskManager::WorkerType::Exit,
536 this);
467 } 537 }
468 }; 538 }
469 539
470 stop_threads(m_kernel.System().GlobalSchedulerContext().GetThreadList()); 540 R_SUCCEED();
541}
471 542
472 this->DeleteThreadLocalRegion(m_plr_address); 543Result KProcess::AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size) {
473 m_plr_address = 0; 544 // Lock ourselves, to prevent concurrent access.
545 KScopedLightLock lk(m_state_lock);
474 546
475 if (m_resource_limit) { 547 // Try to find an existing info for the memory.
476 m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, 548 KSharedMemoryInfo* info = nullptr;
477 m_main_thread_stack_size + m_image_size); 549 for (auto it = m_shared_memory_list.begin(); it != m_shared_memory_list.end(); ++it) {
550 if (it->GetSharedMemory() == shmem) {
551 info = std::addressof(*it);
552 break;
553 }
478 } 554 }
479 555
480 this->ChangeState(State::Terminated); 556 // If we didn't find an info, create one.
481} 557 if (info == nullptr) {
558 // Allocate a new info.
559 info = KSharedMemoryInfo::Allocate(m_kernel);
560 R_UNLESS(info != nullptr, ResultOutOfResource);
482 561
483void KProcess::Finalize() { 562 // Initialize the info and add it to our list.
484 // Free all shared memory infos. 563 info->Initialize(shmem);
485 { 564 m_shared_memory_list.push_back(*info);
486 auto it = m_shared_memory_list.begin(); 565 }
487 while (it != m_shared_memory_list.end()) {
488 KSharedMemoryInfo* info = *it;
489 KSharedMemory* shmem = info->GetSharedMemory();
490 566
491 while (!info->Close()) { 567 // Open a reference to the shared memory and its info.
492 shmem->Close(); 568 shmem->Open();
493 } 569 info->Open();
494 570
495 shmem->Close(); 571 R_SUCCEED();
572}
496 573
497 it = m_shared_memory_list.erase(it); 574void KProcess::RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size) {
498 KSharedMemoryInfo::Free(m_kernel, info); 575 // Lock ourselves, to prevent concurrent access.
576 KScopedLightLock lk(m_state_lock);
577
578 // Find an existing info for the memory.
579 KSharedMemoryInfo* info = nullptr;
580 auto it = m_shared_memory_list.begin();
581 for (; it != m_shared_memory_list.end(); ++it) {
582 if (it->GetSharedMemory() == shmem) {
583 info = std::addressof(*it);
584 break;
499 } 585 }
500 } 586 }
587 ASSERT(info != nullptr);
501 588
502 // Release memory to the resource limit. 589 // Close a reference to the info and its memory.
503 if (m_resource_limit != nullptr) { 590 if (info->Close()) {
504 m_resource_limit->Close(); 591 m_shared_memory_list.erase(it);
505 m_resource_limit = nullptr; 592 KSharedMemoryInfo::Free(m_kernel, info);
506 } 593 }
507 594
508 // Finalize the page table. 595 shmem->Close();
509 m_page_table.Finalize();
510
511 // Perform inherited finalization.
512 KSynchronizationObject::Finalize();
513} 596}
514 597
515Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) { 598Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
@@ -518,7 +601,7 @@ Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
518 601
519 // See if we can get a region from a partially used TLP. 602 // See if we can get a region from a partially used TLP.
520 { 603 {
521 KScopedSchedulerLock sl{m_kernel}; 604 KScopedSchedulerLock sl(m_kernel);
522 605
523 if (auto it = m_partially_used_tlp_tree.begin(); it != m_partially_used_tlp_tree.end()) { 606 if (auto it = m_partially_used_tlp_tree.begin(); it != m_partially_used_tlp_tree.end()) {
524 tlr = it->Reserve(); 607 tlr = it->Reserve();
@@ -538,7 +621,9 @@ Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
538 // Allocate a new page. 621 // Allocate a new page.
539 tlp = KThreadLocalPage::Allocate(m_kernel); 622 tlp = KThreadLocalPage::Allocate(m_kernel);
540 R_UNLESS(tlp != nullptr, ResultOutOfMemory); 623 R_UNLESS(tlp != nullptr, ResultOutOfMemory);
541 auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(m_kernel, tlp); }); 624 ON_RESULT_FAILURE {
625 KThreadLocalPage::Free(m_kernel, tlp);
626 };
542 627
543 // Initialize the new page. 628 // Initialize the new page.
544 R_TRY(tlp->Initialize(m_kernel, this)); 629 R_TRY(tlp->Initialize(m_kernel, this));
@@ -549,7 +634,7 @@ Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
549 634
550 // Insert into our tree. 635 // Insert into our tree.
551 { 636 {
552 KScopedSchedulerLock sl{m_kernel}; 637 KScopedSchedulerLock sl(m_kernel);
553 if (tlp->IsAllUsed()) { 638 if (tlp->IsAllUsed()) {
554 m_fully_used_tlp_tree.insert(*tlp); 639 m_fully_used_tlp_tree.insert(*tlp);
555 } else { 640 } else {
@@ -558,7 +643,6 @@ Result KProcess::CreateThreadLocalRegion(KProcessAddress* out) {
558 } 643 }
559 644
560 // We succeeded! 645 // We succeeded!
561 tlp_guard.Cancel();
562 *out = tlr; 646 *out = tlr;
563 R_SUCCEED(); 647 R_SUCCEED();
564} 648}
@@ -568,7 +652,7 @@ Result KProcess::DeleteThreadLocalRegion(KProcessAddress addr) {
568 652
569 // Release the region. 653 // Release the region.
570 { 654 {
571 KScopedSchedulerLock sl{m_kernel}; 655 KScopedSchedulerLock sl(m_kernel);
572 656
573 // Try to find the page in the partially used list. 657 // Try to find the page in the partially used list.
574 auto it = m_partially_used_tlp_tree.find_key(Common::AlignDown(GetInteger(addr), PageSize)); 658 auto it = m_partially_used_tlp_tree.find_key(Common::AlignDown(GetInteger(addr), PageSize));
@@ -611,95 +695,213 @@ Result KProcess::DeleteThreadLocalRegion(KProcessAddress addr) {
611 R_SUCCEED(); 695 R_SUCCEED();
612} 696}
613 697
614bool KProcess::InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type) { 698bool KProcess::ReserveResource(Svc::LimitableResource which, s64 value) {
615 const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) { 699 if (KResourceLimit* rl = this->GetResourceLimit(); rl != nullptr) {
616 return wp.type == DebugWatchpointType::None; 700 return rl->Reserve(which, value);
617 })}; 701 } else {
702 return true;
703 }
704}
618 705
619 if (watch == m_watchpoints.end()) { 706bool KProcess::ReserveResource(Svc::LimitableResource which, s64 value, s64 timeout) {
620 return false; 707 if (KResourceLimit* rl = this->GetResourceLimit(); rl != nullptr) {
708 return rl->Reserve(which, value, timeout);
709 } else {
710 return true;
621 } 711 }
712}
622 713
623 watch->start_address = addr; 714void KProcess::ReleaseResource(Svc::LimitableResource which, s64 value) {
624 watch->end_address = addr + size; 715 if (KResourceLimit* rl = this->GetResourceLimit(); rl != nullptr) {
625 watch->type = type; 716 rl->Release(which, value);
717 }
718}
626 719
627 for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size; 720void KProcess::ReleaseResource(Svc::LimitableResource which, s64 value, s64 hint) {
628 page += PageSize) { 721 if (KResourceLimit* rl = this->GetResourceLimit(); rl != nullptr) {
629 m_debug_page_refcounts[page]++; 722 rl->Release(which, value, hint);
630 this->GetMemory().MarkRegionDebug(page, PageSize, true);
631 } 723 }
724}
632 725
633 return true; 726void KProcess::IncrementRunningThreadCount() {
727 ASSERT(m_num_running_threads.load() >= 0);
728
729 ++m_num_running_threads;
634} 730}
635 731
636bool KProcess::RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type) { 732void KProcess::DecrementRunningThreadCount() {
637 const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) { 733 ASSERT(m_num_running_threads.load() > 0);
638 return wp.start_address == addr && wp.end_address == addr + size && wp.type == type;
639 })};
640 734
641 if (watch == m_watchpoints.end()) { 735 if (const auto prev = m_num_running_threads--; prev == 1) {
736 this->Terminate();
737 }
738}
739
740bool KProcess::EnterUserException() {
741 // Get the current thread.
742 KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
743 ASSERT(this == cur_thread->GetOwnerProcess());
744
745 // Check that we haven't already claimed the exception thread.
746 if (m_exception_thread == cur_thread) {
642 return false; 747 return false;
643 } 748 }
644 749
645 watch->start_address = 0; 750 // Create the wait queue we'll be using.
646 watch->end_address = 0; 751 ThreadQueueImplForKProcessEnterUserException wait_queue(m_kernel,
647 watch->type = DebugWatchpointType::None; 752 std::addressof(m_exception_thread));
648 753
649 for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size; 754 // Claim the exception thread.
650 page += PageSize) { 755 {
651 m_debug_page_refcounts[page]--; 756 // Lock the scheduler.
652 if (!m_debug_page_refcounts[page]) { 757 KScopedSchedulerLock sl(m_kernel);
653 this->GetMemory().MarkRegionDebug(page, PageSize, false); 758
759 // Check that we're not terminating.
760 if (cur_thread->IsTerminationRequested()) {
761 return false;
762 }
763
764 // If we don't have an exception thread, we can just claim it directly.
765 if (m_exception_thread == nullptr) {
766 m_exception_thread = cur_thread;
767 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
768 return true;
654 } 769 }
770
771 // Otherwise, we need to wait until we don't have an exception thread.
772
773 // Add the current thread as a waiter on the current exception thread.
774 cur_thread->SetKernelAddressKey(
775 reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)) | 1);
776 m_exception_thread->AddWaiter(cur_thread);
777
778 // Wait to claim the exception thread.
779 cur_thread->BeginWait(std::addressof(wait_queue));
655 } 780 }
656 781
657 return true; 782 // If our wait didn't end due to thread termination, we succeeded.
783 return ResultTerminationRequested != cur_thread->GetWaitResult();
658} 784}
659 785
660void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) { 786bool KProcess::LeaveUserException() {
661 const auto ReprotectSegment = [&](const CodeSet::Segment& segment, 787 return this->ReleaseUserException(GetCurrentThreadPointer(m_kernel));
662 Svc::MemoryPermission permission) { 788}
663 m_page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
664 };
665 789
666 this->GetMemory().WriteBlock(base_addr, code_set.memory.data(), code_set.memory.size()); 790bool KProcess::ReleaseUserException(KThread* thread) {
791 KScopedSchedulerLock sl(m_kernel);
667 792
668 ReprotectSegment(code_set.CodeSegment(), Svc::MemoryPermission::ReadExecute); 793 if (m_exception_thread == thread) {
669 ReprotectSegment(code_set.RODataSegment(), Svc::MemoryPermission::Read); 794 m_exception_thread = nullptr;
670 ReprotectSegment(code_set.DataSegment(), Svc::MemoryPermission::ReadWrite); 795
796 // Remove waiter thread.
797 bool has_waiters;
798 if (KThread* next = thread->RemoveKernelWaiterByKey(
799 std::addressof(has_waiters),
800 reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)) | 1);
801 next != nullptr) {
802 next->EndWait(ResultSuccess);
803 }
804
805 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
806
807 return true;
808 } else {
809 return false;
810 }
671} 811}
672 812
673bool KProcess::IsSignaled() const { 813void KProcess::RegisterThread(KThread* thread) {
674 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); 814 KScopedLightLock lk(m_list_lock);
675 return m_is_signaled; 815
816 m_thread_list.push_back(*thread);
676} 817}
677 818
678KProcess::KProcess(KernelCore& kernel) 819void KProcess::UnregisterThread(KThread* thread) {
679 : KAutoObjectWithSlabHeapAndContainer{kernel}, m_page_table{m_kernel.System()}, 820 KScopedLightLock lk(m_list_lock);
680 m_handle_table{m_kernel}, m_address_arbiter{m_kernel.System()},
681 m_condition_var{m_kernel.System()}, m_state_lock{m_kernel}, m_list_lock{m_kernel} {}
682 821
683KProcess::~KProcess() = default; 822 m_thread_list.erase(m_thread_list.iterator_to(*thread));
823}
824
825size_t KProcess::GetUsedUserPhysicalMemorySize() const {
826 const size_t norm_size = m_page_table.GetNormalMemorySize();
827 const size_t other_size = m_code_size + m_main_thread_stack_size;
828 const size_t sec_size = this->GetRequiredSecureMemorySizeNonDefault();
684 829
685void KProcess::ChangeState(State new_state) { 830 return norm_size + other_size + sec_size;
686 if (m_state == new_state) { 831}
687 return; 832
833size_t KProcess::GetTotalUserPhysicalMemorySize() const {
834 // Get the amount of free and used size.
835 const size_t free_size =
836 m_resource_limit->GetFreeValue(Svc::LimitableResource::PhysicalMemoryMax);
837 const size_t max_size = m_max_process_memory;
838
839 // Determine used size.
840 // NOTE: This does *not* check this->IsDefaultApplicationSystemResource(), unlike
841 // GetUsedUserPhysicalMemorySize().
842 const size_t norm_size = m_page_table.GetNormalMemorySize();
843 const size_t other_size = m_code_size + m_main_thread_stack_size;
844 const size_t sec_size = this->GetRequiredSecureMemorySize();
845 const size_t used_size = norm_size + other_size + sec_size;
846
847 // NOTE: These function calls will recalculate, introducing a race...it is unclear why Nintendo
848 // does it this way.
849 if (used_size + free_size > max_size) {
850 return max_size;
851 } else {
852 return free_size + this->GetUsedUserPhysicalMemorySize();
688 } 853 }
854}
689 855
690 m_state = new_state; 856size_t KProcess::GetUsedNonSystemUserPhysicalMemorySize() const {
691 m_is_signaled = true; 857 const size_t norm_size = m_page_table.GetNormalMemorySize();
692 this->NotifyAvailable(); 858 const size_t other_size = m_code_size + m_main_thread_stack_size;
859
860 return norm_size + other_size;
861}
862
863size_t KProcess::GetTotalNonSystemUserPhysicalMemorySize() const {
864 // Get the amount of free and used size.
865 const size_t free_size =
866 m_resource_limit->GetFreeValue(Svc::LimitableResource::PhysicalMemoryMax);
867 const size_t max_size = m_max_process_memory;
868
869 // Determine used size.
870 // NOTE: This does *not* check this->IsDefaultApplicationSystemResource(), unlike
871 // GetUsedUserPhysicalMemorySize().
872 const size_t norm_size = m_page_table.GetNormalMemorySize();
873 const size_t other_size = m_code_size + m_main_thread_stack_size;
874 const size_t sec_size = this->GetRequiredSecureMemorySize();
875 const size_t used_size = norm_size + other_size + sec_size;
876
877 // NOTE: These function calls will recalculate, introducing a race...it is unclear why Nintendo
878 // does it this way.
879 if (used_size + free_size > max_size) {
880 return max_size - this->GetRequiredSecureMemorySizeNonDefault();
881 } else {
882 return free_size + this->GetUsedNonSystemUserPhysicalMemorySize();
883 }
693} 884}
694 885
695Result KProcess::AllocateMainThreadStack(std::size_t stack_size) { 886Result KProcess::Run(s32 priority, size_t stack_size) {
887 // Lock ourselves, to prevent concurrent access.
888 KScopedLightLock lk(m_state_lock);
889
890 // Validate that we're in a state where we can initialize.
891 const auto state = m_state;
892 R_UNLESS(state == State::Created || state == State::CreatedAttached, ResultInvalidState);
893
894 // Place a tentative reservation of a thread for this process.
895 KScopedResourceReservation thread_reservation(this, Svc::LimitableResource::ThreadCountMax);
896 R_UNLESS(thread_reservation.Succeeded(), ResultLimitReached);
897
696 // Ensure that we haven't already allocated stack. 898 // Ensure that we haven't already allocated stack.
697 ASSERT(m_main_thread_stack_size == 0); 899 ASSERT(m_main_thread_stack_size == 0);
698 900
699 // Ensure that we're allocating a valid stack. 901 // Ensure that we're allocating a valid stack.
700 stack_size = Common::AlignUp(stack_size, PageSize); 902 stack_size = Common::AlignUp(stack_size, PageSize);
701 // R_UNLESS(stack_size + image_size <= m_max_process_memory, ResultOutOfMemory); 903 R_UNLESS(stack_size + m_code_size <= m_max_process_memory, ResultOutOfMemory);
702 R_UNLESS(stack_size + m_image_size >= m_image_size, ResultOutOfMemory); 904 R_UNLESS(stack_size + m_code_size >= m_code_size, ResultOutOfMemory);
703 905
704 // Place a tentative reservation of memory for our new stack. 906 // Place a tentative reservation of memory for our new stack.
705 KScopedResourceReservation mem_reservation(this, Svc::LimitableResource::PhysicalMemoryMax, 907 KScopedResourceReservation mem_reservation(this, Svc::LimitableResource::PhysicalMemoryMax,
@@ -707,21 +909,359 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
707 R_UNLESS(mem_reservation.Succeeded(), ResultLimitReached); 909 R_UNLESS(mem_reservation.Succeeded(), ResultLimitReached);
708 910
709 // Allocate and map our stack. 911 // Allocate and map our stack.
912 KProcessAddress stack_top = 0;
710 if (stack_size) { 913 if (stack_size) {
711 KProcessAddress stack_bottom; 914 KProcessAddress stack_bottom;
712 R_TRY(m_page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize, 915 R_TRY(m_page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize,
713 KMemoryState::Stack, KMemoryPermission::UserReadWrite)); 916 KMemoryState::Stack, KMemoryPermission::UserReadWrite));
714 917
715 m_main_thread_stack_top = stack_bottom + stack_size; 918 stack_top = stack_bottom + stack_size;
716 m_main_thread_stack_size = stack_size; 919 m_main_thread_stack_size = stack_size;
717 } 920 }
718 921
922 // Ensure our stack is safe to clean up on exit.
923 ON_RESULT_FAILURE {
924 if (m_main_thread_stack_size) {
925 ASSERT(R_SUCCEEDED(m_page_table.UnmapPages(stack_top - m_main_thread_stack_size,
926 m_main_thread_stack_size / PageSize,
927 KMemoryState::Stack)));
928 m_main_thread_stack_size = 0;
929 }
930 };
931
932 // Set our maximum heap size.
933 R_TRY(m_page_table.SetMaxHeapSize(m_max_process_memory -
934 (m_main_thread_stack_size + m_code_size)));
935
936 // Initialize our handle table.
937 R_TRY(this->InitializeHandleTable(m_capabilities.GetHandleTableSize()));
938 ON_RESULT_FAILURE_2 {
939 this->FinalizeHandleTable();
940 };
941
942 // Create a new thread for the process.
943 KThread* main_thread = KThread::Create(m_kernel);
944 R_UNLESS(main_thread != nullptr, ResultOutOfResource);
945 SCOPE_EXIT({ main_thread->Close(); });
946
947 // Initialize the thread.
948 R_TRY(KThread::InitializeUserThread(m_kernel.System(), main_thread, this->GetEntryPoint(), 0,
949 stack_top, priority, m_ideal_core_id, this));
950
951 // Register the thread, and commit our reservation.
952 KThread::Register(m_kernel, main_thread);
953 thread_reservation.Commit();
954
955 // Add the thread to our handle table.
956 Handle thread_handle;
957 R_TRY(m_handle_table.Add(std::addressof(thread_handle), main_thread));
958
959 // Set the thread arguments.
960 main_thread->GetContext32().cpu_registers[0] = 0;
961 main_thread->GetContext64().cpu_registers[0] = 0;
962 main_thread->GetContext32().cpu_registers[1] = thread_handle;
963 main_thread->GetContext64().cpu_registers[1] = thread_handle;
964
965 // Update our state.
966 this->ChangeState((state == State::Created) ? State::Running : State::RunningAttached);
967 ON_RESULT_FAILURE_2 {
968 this->ChangeState(state);
969 };
970
971 // Suspend for debug, if we should.
972 if (m_kernel.System().DebuggerEnabled()) {
973 main_thread->RequestSuspend(SuspendType::Debug);
974 }
975
976 // Run our thread.
977 R_TRY(main_thread->Run());
978
979 // Open a reference to represent that we're running.
980 this->Open();
981
719 // We succeeded! Commit our memory reservation. 982 // We succeeded! Commit our memory reservation.
720 mem_reservation.Commit(); 983 mem_reservation.Commit();
721 984
722 R_SUCCEED(); 985 R_SUCCEED();
723} 986}
724 987
988Result KProcess::Reset() {
989 // Lock the process and the scheduler.
990 KScopedLightLock lk(m_state_lock);
991 KScopedSchedulerLock sl(m_kernel);
992
993 // Validate that we're in a state that we can reset.
994 R_UNLESS(m_state != State::Terminated, ResultInvalidState);
995 R_UNLESS(m_is_signaled, ResultInvalidState);
996
997 // Clear signaled.
998 m_is_signaled = false;
999 R_SUCCEED();
1000}
1001
1002Result KProcess::SetActivity(Svc::ProcessActivity activity) {
1003 // Lock ourselves and the scheduler.
1004 KScopedLightLock lk(m_state_lock);
1005 KScopedLightLock list_lk(m_list_lock);
1006 KScopedSchedulerLock sl(m_kernel);
1007
1008 // Validate our state.
1009 R_UNLESS(m_state != State::Terminating, ResultInvalidState);
1010 R_UNLESS(m_state != State::Terminated, ResultInvalidState);
1011
1012 // Either pause or resume.
1013 if (activity == Svc::ProcessActivity::Paused) {
1014 // Verify that we're not suspended.
1015 R_UNLESS(!m_is_suspended, ResultInvalidState);
1016
1017 // Suspend all threads.
1018 auto end = this->GetThreadList().end();
1019 for (auto it = this->GetThreadList().begin(); it != end; ++it) {
1020 it->RequestSuspend(SuspendType::Process);
1021 }
1022
1023 // Set ourselves as suspended.
1024 this->SetSuspended(true);
1025 } else {
1026 ASSERT(activity == Svc::ProcessActivity::Runnable);
1027
1028 // Verify that we're suspended.
1029 R_UNLESS(m_is_suspended, ResultInvalidState);
1030
1031 // Resume all threads.
1032 auto end = this->GetThreadList().end();
1033 for (auto it = this->GetThreadList().begin(); it != end; ++it) {
1034 it->Resume(SuspendType::Process);
1035 }
1036
1037 // Set ourselves as resumed.
1038 this->SetSuspended(false);
1039 }
1040
1041 R_SUCCEED();
1042}
1043
1044void KProcess::PinCurrentThread() {
1045 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
1046
1047 // Get the current thread.
1048 const s32 core_id = GetCurrentCoreId(m_kernel);
1049 KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
1050
1051 // If the thread isn't terminated, pin it.
1052 if (!cur_thread->IsTerminationRequested()) {
1053 // Pin it.
1054 this->PinThread(core_id, cur_thread);
1055 cur_thread->Pin(core_id);
1056
1057 // An update is needed.
1058 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
1059 }
1060}
1061
1062void KProcess::UnpinCurrentThread() {
1063 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
1064
1065 // Get the current thread.
1066 const s32 core_id = GetCurrentCoreId(m_kernel);
1067 KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
1068
1069 // Unpin it.
1070 cur_thread->Unpin();
1071 this->UnpinThread(core_id, cur_thread);
1072
1073 // An update is needed.
1074 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
1075}
1076
1077void KProcess::UnpinThread(KThread* thread) {
1078 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
1079
1080 // Get the thread's core id.
1081 const auto core_id = thread->GetActiveCore();
1082
1083 // Unpin it.
1084 this->UnpinThread(core_id, thread);
1085 thread->Unpin();
1086
1087 // An update is needed.
1088 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
1089}
1090
1091Result KProcess::GetThreadList(s32* out_num_threads, KProcessAddress out_thread_ids,
1092 s32 max_out_count) {
1093 // TODO: use current memory reference
1094 auto& memory = m_kernel.System().ApplicationMemory();
1095
1096 // Lock the list.
1097 KScopedLightLock lk(m_list_lock);
1098
1099 // Iterate over the list.
1100 s32 count = 0;
1101 auto end = this->GetThreadList().end();
1102 for (auto it = this->GetThreadList().begin(); it != end; ++it) {
1103 // If we're within array bounds, write the id.
1104 if (count < max_out_count) {
1105 // Get the thread id.
1106 KThread* thread = std::addressof(*it);
1107 const u64 id = thread->GetId();
1108
1109 // Copy the id to userland.
1110 memory.Write64(out_thread_ids + count * sizeof(u64), id);
1111 }
1112
1113 // Increment the count.
1114 ++count;
1115 }
1116
1117 // We successfully iterated the list.
1118 *out_num_threads = count;
1119 R_SUCCEED();
1120}
1121
1122void KProcess::Switch(KProcess* cur_process, KProcess* next_process) {}
1123
1124KProcess::KProcess(KernelCore& kernel)
1125 : KAutoObjectWithSlabHeapAndContainer(kernel), m_page_table{kernel.System()},
1126 m_state_lock{kernel}, m_list_lock{kernel}, m_cond_var{kernel.System()},
1127 m_address_arbiter{kernel.System()}, m_handle_table{kernel} {}
1128KProcess::~KProcess() = default;
1129
1130Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
1131 bool is_hbl) {
1132 // Create a resource limit for the process.
1133 const auto physical_memory_size =
1134 m_kernel.MemoryManager().GetSize(Kernel::KMemoryManager::Pool::Application);
1135 auto* res_limit =
1136 Kernel::CreateResourceLimitForProcess(m_kernel.System(), physical_memory_size);
1137
1138 // Ensure we maintain a clean state on exit.
1139 SCOPE_EXIT({ res_limit->Close(); });
1140
1141 // Declare flags and code address.
1142 Svc::CreateProcessFlag flag{};
1143 u64 code_address{};
1144
1145 // We are an application.
1146 flag |= Svc::CreateProcessFlag::IsApplication;
1147
1148 // If we are 64-bit, create as such.
1149 if (metadata.Is64BitProgram()) {
1150 flag |= Svc::CreateProcessFlag::Is64Bit;
1151 }
1152
1153 // Set the address space type and code address.
1154 switch (metadata.GetAddressSpaceType()) {
1155 case FileSys::ProgramAddressSpaceType::Is39Bit:
1156 flag |= Svc::CreateProcessFlag::AddressSpace64Bit;
1157
1158 // For 39-bit processes, the ASLR region starts at 0x800'0000 and is ~512GiB large.
1159 // However, some (buggy) programs/libraries like skyline incorrectly depend on the
1160 // existence of ASLR pages before the entry point, so we will adjust the load address
1161 // to point to about 2GiB into the ASLR region.
1162 code_address = 0x8000'0000;
1163 break;
1164 case FileSys::ProgramAddressSpaceType::Is36Bit:
1165 flag |= Svc::CreateProcessFlag::AddressSpace64BitDeprecated;
1166 code_address = 0x800'0000;
1167 break;
1168 case FileSys::ProgramAddressSpaceType::Is32Bit:
1169 flag |= Svc::CreateProcessFlag::AddressSpace32Bit;
1170 code_address = 0x20'0000;
1171 break;
1172 case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
1173 flag |= Svc::CreateProcessFlag::AddressSpace32BitWithoutAlias;
1174 code_address = 0x20'0000;
1175 break;
1176 }
1177
1178 Svc::CreateProcessParameter params{
1179 .name = {},
1180 .version = {},
1181 .program_id = metadata.GetTitleID(),
1182 .code_address = code_address,
1183 .code_num_pages = static_cast<s32>(code_size / PageSize),
1184 .flags = flag,
1185 .reslimit = Svc::InvalidHandle,
1186 .system_resource_num_pages = static_cast<s32>(metadata.GetSystemResourceSize() / PageSize),
1187 };
1188
1189 // Set the process name.
1190 const auto& name = metadata.GetName();
1191 static_assert(sizeof(params.name) <= sizeof(name));
1192 std::memcpy(params.name.data(), name.data(), sizeof(params.name));
1193
1194 // Initialize for application process.
1195 R_TRY(this->Initialize(params, metadata.GetKernelCapabilities(), res_limit,
1196 KMemoryManager::Pool::Application));
1197
1198 // Assign remaining properties.
1199 m_is_hbl = is_hbl;
1200 m_ideal_core_id = metadata.GetMainThreadCore();
1201
1202 // We succeeded.
1203 R_SUCCEED();
1204}
1205
1206void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {
1207 const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
1208 Svc::MemoryPermission permission) {
1209 m_page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
1210 };
1211
1212 this->GetMemory().WriteBlock(base_addr, code_set.memory.data(), code_set.memory.size());
1213
1214 ReprotectSegment(code_set.CodeSegment(), Svc::MemoryPermission::ReadExecute);
1215 ReprotectSegment(code_set.RODataSegment(), Svc::MemoryPermission::Read);
1216 ReprotectSegment(code_set.DataSegment(), Svc::MemoryPermission::ReadWrite);
1217}
1218
1219bool KProcess::InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type) {
1220 const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
1221 return wp.type == DebugWatchpointType::None;
1222 })};
1223
1224 if (watch == m_watchpoints.end()) {
1225 return false;
1226 }
1227
1228 watch->start_address = addr;
1229 watch->end_address = addr + size;
1230 watch->type = type;
1231
1232 for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
1233 page += PageSize) {
1234 m_debug_page_refcounts[page]++;
1235 this->GetMemory().MarkRegionDebug(page, PageSize, true);
1236 }
1237
1238 return true;
1239}
1240
1241bool KProcess::RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type) {
1242 const auto watch{std::find_if(m_watchpoints.begin(), m_watchpoints.end(), [&](const auto& wp) {
1243 return wp.start_address == addr && wp.end_address == addr + size && wp.type == type;
1244 })};
1245
1246 if (watch == m_watchpoints.end()) {
1247 return false;
1248 }
1249
1250 watch->start_address = 0;
1251 watch->end_address = 0;
1252 watch->type = DebugWatchpointType::None;
1253
1254 for (KProcessAddress page = Common::AlignDown(GetInteger(addr), PageSize); page < addr + size;
1255 page += PageSize) {
1256 m_debug_page_refcounts[page]--;
1257 if (!m_debug_page_refcounts[page]) {
1258 this->GetMemory().MarkRegionDebug(page, PageSize, false);
1259 }
1260 }
1261
1262 return true;
1263}
1264
725Core::Memory::Memory& KProcess::GetMemory() const { 1265Core::Memory::Memory& KProcess::GetMemory() const {
726 // TODO: per-process memory 1266 // TODO: per-process memory
727 return m_kernel.System().ApplicationMemory(); 1267 return m_kernel.System().ApplicationMemory();
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 146e07a57..f9f755afa 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -1,59 +1,23 @@
1// SPDX-FileCopyrightText: 2015 Citra Emulator Project 1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#pragma once 4#pragma once
5 5
6#include <array>
7#include <cstddef>
8#include <list>
9#include <map> 6#include <map>
10#include <string> 7
8#include "core/hle/kernel/code_set.h"
11#include "core/hle/kernel/k_address_arbiter.h" 9#include "core/hle/kernel/k_address_arbiter.h"
12#include "core/hle/kernel/k_auto_object.h" 10#include "core/hle/kernel/k_capabilities.h"
13#include "core/hle/kernel/k_condition_variable.h" 11#include "core/hle/kernel/k_condition_variable.h"
14#include "core/hle/kernel/k_handle_table.h" 12#include "core/hle/kernel/k_handle_table.h"
15#include "core/hle/kernel/k_page_table.h" 13#include "core/hle/kernel/k_page_table.h"
16#include "core/hle/kernel/k_synchronization_object.h" 14#include "core/hle/kernel/k_page_table_manager.h"
15#include "core/hle/kernel/k_system_resource.h"
16#include "core/hle/kernel/k_thread.h"
17#include "core/hle/kernel/k_thread_local_page.h" 17#include "core/hle/kernel/k_thread_local_page.h"
18#include "core/hle/kernel/k_typed_address.h"
19#include "core/hle/kernel/k_worker_task.h"
20#include "core/hle/kernel/process_capability.h"
21#include "core/hle/kernel/slab_helpers.h"
22#include "core/hle/result.h"
23
24namespace Core {
25namespace Memory {
26class Memory;
27};
28
29class System;
30} // namespace Core
31
32namespace FileSys {
33class ProgramMetadata;
34}
35 18
36namespace Kernel { 19namespace Kernel {
37 20
38class KernelCore;
39class KResourceLimit;
40class KThread;
41class KSharedMemoryInfo;
42class TLSPage;
43
44struct CodeSet;
45
46enum class MemoryRegion : u16 {
47 APPLICATION = 1,
48 SYSTEM = 2,
49 BASE = 3,
50};
51
52enum class ProcessActivity : u32 {
53 Runnable,
54 Paused,
55};
56
57enum class DebugWatchpointType : u8 { 21enum class DebugWatchpointType : u8 {
58 None = 0, 22 None = 0,
59 Read = 1 << 0, 23 Read = 1 << 0,
@@ -72,9 +36,6 @@ class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWor
72 KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject); 36 KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
73 37
74public: 38public:
75 explicit KProcess(KernelCore& kernel);
76 ~KProcess() override;
77
78 enum class State { 39 enum class State {
79 Created = static_cast<u32>(Svc::ProcessState::Created), 40 Created = static_cast<u32>(Svc::ProcessState::Created),
80 CreatedAttached = static_cast<u32>(Svc::ProcessState::CreatedAttached), 41 CreatedAttached = static_cast<u32>(Svc::ProcessState::CreatedAttached),
@@ -86,470 +47,493 @@ public:
86 DebugBreak = static_cast<u32>(Svc::ProcessState::DebugBreak), 47 DebugBreak = static_cast<u32>(Svc::ProcessState::DebugBreak),
87 }; 48 };
88 49
89 enum : u64 { 50 using ThreadList = Common::IntrusiveListMemberTraits<&KThread::m_process_list_node>::ListType;
90 /// Lowest allowed process ID for a kernel initial process.
91 InitialKIPIDMin = 1,
92 /// Highest allowed process ID for a kernel initial process.
93 InitialKIPIDMax = 80,
94
95 /// Lowest allowed process ID for a userland process.
96 ProcessIDMin = 81,
97 /// Highest allowed process ID for a userland process.
98 ProcessIDMax = 0xFFFFFFFFFFFFFFFF,
99 };
100 51
101 // Used to determine how process IDs are assigned. 52 static constexpr size_t AslrAlignment = 2_MiB;
102 enum class ProcessType {
103 KernelInternal,
104 Userland,
105 };
106 53
107 static constexpr std::size_t RANDOM_ENTROPY_SIZE = 4; 54public:
55 static constexpr u64 InitialProcessIdMin = 1;
56 static constexpr u64 InitialProcessIdMax = 0x50;
108 57
109 static Result Initialize(KProcess* process, Core::System& system, std::string process_name, 58 static constexpr u64 ProcessIdMin = InitialProcessIdMax + 1;
110 ProcessType type, KResourceLimit* res_limit); 59 static constexpr u64 ProcessIdMax = std::numeric_limits<u64>::max();
111 60
112 /// Gets a reference to the process' page table. 61private:
113 KPageTable& GetPageTable() { 62 using SharedMemoryInfoList = Common::IntrusiveListBaseTraits<KSharedMemoryInfo>::ListType;
114 return m_page_table; 63 using TLPTree =
115 } 64 Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
65 using TLPIterator = TLPTree::iterator;
116 66
117 /// Gets const a reference to the process' page table. 67private:
118 const KPageTable& GetPageTable() const { 68 KPageTable m_page_table;
119 return m_page_table; 69 std::atomic<size_t> m_used_kernel_memory_size{};
120 } 70 TLPTree m_fully_used_tlp_tree{};
71 TLPTree m_partially_used_tlp_tree{};
72 s32 m_ideal_core_id{};
73 KResourceLimit* m_resource_limit{};
74 KSystemResource* m_system_resource{};
75 size_t m_memory_release_hint{};
76 State m_state{};
77 KLightLock m_state_lock;
78 KLightLock m_list_lock;
79 KConditionVariable m_cond_var;
80 KAddressArbiter m_address_arbiter;
81 std::array<u64, 4> m_entropy{};
82 bool m_is_signaled{};
83 bool m_is_initialized{};
84 bool m_is_application{};
85 bool m_is_default_application_system_resource{};
86 bool m_is_hbl{};
87 std::array<char, 13> m_name{};
88 std::atomic<u16> m_num_running_threads{};
89 Svc::CreateProcessFlag m_flags{};
90 KMemoryManager::Pool m_memory_pool{};
91 s64 m_schedule_count{};
92 KCapabilities m_capabilities{};
93 u64 m_program_id{};
94 u64 m_process_id{};
95 KProcessAddress m_code_address{};
96 size_t m_code_size{};
97 size_t m_main_thread_stack_size{};
98 size_t m_max_process_memory{};
99 u32 m_version{};
100 KHandleTable m_handle_table;
101 KProcessAddress m_plr_address{};
102 KThread* m_exception_thread{};
103 ThreadList m_thread_list{};
104 SharedMemoryInfoList m_shared_memory_list{};
105 bool m_is_suspended{};
106 bool m_is_immortal{};
107 bool m_is_handle_table_initialized{};
108 std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_running_threads{};
109 std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{};
110 std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_switch_counts{};
111 std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{};
112 std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{};
113 std::map<KProcessAddress, u64> m_debug_page_refcounts{};
114 std::atomic<s64> m_cpu_time{};
115 std::atomic<s64> m_num_process_switches{};
116 std::atomic<s64> m_num_thread_switches{};
117 std::atomic<s64> m_num_fpu_switches{};
118 std::atomic<s64> m_num_supervisor_calls{};
119 std::atomic<s64> m_num_ipc_messages{};
120 std::atomic<s64> m_num_ipc_replies{};
121 std::atomic<s64> m_num_ipc_receives{};
121 122
122 /// Gets a reference to the process' handle table. 123private:
123 KHandleTable& GetHandleTable() { 124 Result StartTermination();
124 return m_handle_table; 125 void FinishTermination();
125 }
126 126
127 /// Gets a const reference to the process' handle table. 127 void PinThread(s32 core_id, KThread* thread) {
128 const KHandleTable& GetHandleTable() const { 128 ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
129 return m_handle_table; 129 ASSERT(thread != nullptr);
130 ASSERT(m_pinned_threads[core_id] == nullptr);
131 m_pinned_threads[core_id] = thread;
130 } 132 }
131 133
132 /// Gets a reference to process's memory. 134 void UnpinThread(s32 core_id, KThread* thread) {
133 Core::Memory::Memory& GetMemory() const; 135 ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
134 136 ASSERT(thread != nullptr);
135 Result SignalToAddress(KProcessAddress address) { 137 ASSERT(m_pinned_threads[core_id] == thread);
136 return m_condition_var.SignalToAddress(address); 138 m_pinned_threads[core_id] = nullptr;
137 } 139 }
138 140
139 Result WaitForAddress(Handle handle, KProcessAddress address, u32 tag) { 141public:
140 return m_condition_var.WaitForAddress(handle, address, tag); 142 explicit KProcess(KernelCore& kernel);
141 } 143 ~KProcess() override;
142 144
143 void SignalConditionVariable(u64 cv_key, int32_t count) { 145 Result Initialize(const Svc::CreateProcessParameter& params, KResourceLimit* res_limit,
144 return m_condition_var.Signal(cv_key, count); 146 bool is_real);
145 }
146 147
147 Result WaitConditionVariable(KProcessAddress address, u64 cv_key, u32 tag, s64 ns) { 148 Result Initialize(const Svc::CreateProcessParameter& params, const KPageGroup& pg,
148 R_RETURN(m_condition_var.Wait(address, cv_key, tag, ns)); 149 std::span<const u32> caps, KResourceLimit* res_limit,
149 } 150 KMemoryManager::Pool pool, bool immortal);
151 Result Initialize(const Svc::CreateProcessParameter& params, std::span<const u32> user_caps,
152 KResourceLimit* res_limit, KMemoryManager::Pool pool);
153 void Exit();
150 154
151 Result SignalAddressArbiter(uint64_t address, Svc::SignalType signal_type, s32 value, 155 const char* GetName() const {
152 s32 count) { 156 return m_name.data();
153 R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
154 } 157 }
155 158
156 Result WaitAddressArbiter(uint64_t address, Svc::ArbitrationType arb_type, s32 value, 159 u64 GetProgramId() const {
157 s64 timeout) { 160 return m_program_id;
158 R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
159 } 161 }
160 162
161 KProcessAddress GetProcessLocalRegionAddress() const { 163 u64 GetProcessId() const {
162 return m_plr_address; 164 return m_process_id;
163 } 165 }
164 166
165 /// Gets the current status of the process
166 State GetState() const { 167 State GetState() const {
167 return m_state; 168 return m_state;
168 } 169 }
169 170
170 /// Gets the unique ID that identifies this particular process. 171 u64 GetCoreMask() const {
171 u64 GetProcessId() const { 172 return m_capabilities.GetCoreMask();
172 return m_process_id; 173 }
174 u64 GetPhysicalCoreMask() const {
175 return m_capabilities.GetPhysicalCoreMask();
176 }
177 u64 GetPriorityMask() const {
178 return m_capabilities.GetPriorityMask();
173 } 179 }
174 180
175 /// Gets the program ID corresponding to this process. 181 s32 GetIdealCoreId() const {
176 u64 GetProgramId() const { 182 return m_ideal_core_id;
177 return m_program_id; 183 }
184 void SetIdealCoreId(s32 core_id) {
185 m_ideal_core_id = core_id;
178 } 186 }
179 187
180 KProcessAddress GetEntryPoint() const { 188 bool CheckThreadPriority(s32 prio) const {
181 return m_code_address; 189 return ((1ULL << prio) & this->GetPriorityMask()) != 0;
182 } 190 }
183 191
184 /// Gets the resource limit descriptor for this process 192 u32 GetCreateProcessFlags() const {
185 KResourceLimit* GetResourceLimit() const; 193 return static_cast<u32>(m_flags);
194 }
186 195
187 /// Gets the ideal CPU core ID for this process 196 bool Is64Bit() const {
188 u8 GetIdealCoreId() const { 197 return True(m_flags & Svc::CreateProcessFlag::Is64Bit);
189 return m_ideal_core;
190 } 198 }
191 199
192 /// Checks if the specified thread priority is valid. 200 KProcessAddress GetEntryPoint() const {
193 bool CheckThreadPriority(s32 prio) const { 201 return m_code_address;
194 return ((1ULL << prio) & GetPriorityMask()) != 0;
195 } 202 }
196 203
197 /// Gets the bitmask of allowed cores that this process' threads can run on. 204 size_t GetMainStackSize() const {
198 u64 GetCoreMask() const { 205 return m_main_thread_stack_size;
199 return m_capabilities.GetCoreMask();
200 } 206 }
201 207
202 /// Gets the bitmask of allowed thread priorities. 208 KMemoryManager::Pool GetMemoryPool() const {
203 u64 GetPriorityMask() const { 209 return m_memory_pool;
204 return m_capabilities.GetPriorityMask();
205 } 210 }
206 211
207 /// Gets the amount of secure memory to allocate for memory management. 212 u64 GetRandomEntropy(size_t i) const {
208 u32 GetSystemResourceSize() const { 213 return m_entropy[i];
209 return m_system_resource_size;
210 } 214 }
211 215
212 /// Gets the amount of secure memory currently in use for memory management. 216 bool IsApplication() const {
213 u32 GetSystemResourceUsage() const { 217 return m_is_application;
214 // On hardware, this returns the amount of system resource memory that has
215 // been used by the kernel. This is problematic for Yuzu to emulate, because
216 // system resource memory is used for page tables -- and yuzu doesn't really
217 // have a way to calculate how much memory is required for page tables for
218 // the current process at any given time.
219 // TODO: Is this even worth implementing? Games may retrieve this value via
220 // an SDK function that gets used + available system resource size for debug
221 // or diagnostic purposes. However, it seems unlikely that a game would make
222 // decisions based on how much system memory is dedicated to its page tables.
223 // Is returning a value other than zero wise?
224 return 0;
225 } 218 }
226 219
227 /// Whether this process is an AArch64 or AArch32 process. 220 bool IsDefaultApplicationSystemResource() const {
228 bool Is64BitProcess() const { 221 return m_is_default_application_system_resource;
229 return m_is_64bit_process;
230 } 222 }
231 223
232 bool IsSuspended() const { 224 bool IsSuspended() const {
233 return m_is_suspended; 225 return m_is_suspended;
234 } 226 }
235
236 void SetSuspended(bool suspended) { 227 void SetSuspended(bool suspended) {
237 m_is_suspended = suspended; 228 m_is_suspended = suspended;
238 } 229 }
239 230
240 /// Gets the total running time of the process instance in ticks. 231 Result Terminate();
241 u64 GetCPUTimeTicks() const { 232
242 return m_total_process_running_time_ticks; 233 bool IsTerminated() const {
234 return m_state == State::Terminated;
243 } 235 }
244 236
245 /// Updates the total running time, adding the given ticks to it. 237 bool IsPermittedSvc(u32 svc_id) const {
246 void UpdateCPUTimeTicks(u64 ticks) { 238 return m_capabilities.IsPermittedSvc(svc_id);
247 m_total_process_running_time_ticks += ticks;
248 } 239 }
249 240
250 /// Gets the process schedule count, used for thread yielding 241 bool IsPermittedInterrupt(s32 interrupt_id) const {
251 s64 GetScheduledCount() const { 242 return m_capabilities.IsPermittedInterrupt(interrupt_id);
252 return m_schedule_count;
253 } 243 }
254 244
255 /// Increments the process schedule count, used for thread yielding. 245 bool IsPermittedDebug() const {
256 void IncrementScheduledCount() { 246 return m_capabilities.IsPermittedDebug();
257 ++m_schedule_count;
258 } 247 }
259 248
260 void IncrementRunningThreadCount(); 249 bool CanForceDebug() const {
261 void DecrementRunningThreadCount(); 250 return m_capabilities.CanForceDebug();
251 }
262 252
263 void SetRunningThread(s32 core, KThread* thread, u64 idle_count) { 253 bool IsHbl() const {
264 m_running_threads[core] = thread; 254 return m_is_hbl;
265 m_running_thread_idle_counts[core] = idle_count;
266 } 255 }
267 256
268 void ClearRunningThread(KThread* thread) { 257 Kernel::KMemoryManager::Direction GetAllocateOption() const {
269 for (size_t i = 0; i < m_running_threads.size(); ++i) { 258 // TODO: property of the KPageTableBase
270 if (m_running_threads[i] == thread) { 259 return KMemoryManager::Direction::FromFront;
271 m_running_threads[i] = nullptr;
272 }
273 }
274 } 260 }
275 261
276 [[nodiscard]] KThread* GetRunningThread(s32 core) const { 262 ThreadList& GetThreadList() {
277 return m_running_threads[core]; 263 return m_thread_list;
264 }
265 const ThreadList& GetThreadList() const {
266 return m_thread_list;
278 } 267 }
279 268
269 bool EnterUserException();
270 bool LeaveUserException();
280 bool ReleaseUserException(KThread* thread); 271 bool ReleaseUserException(KThread* thread);
281 272
282 [[nodiscard]] KThread* GetPinnedThread(s32 core_id) const { 273 KThread* GetPinnedThread(s32 core_id) const {
283 ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); 274 ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
284 return m_pinned_threads[core_id]; 275 return m_pinned_threads[core_id];
285 } 276 }
286 277
287 /// Gets 8 bytes of random data for svcGetInfo RandomEntropy 278 const Svc::SvcAccessFlagSet& GetSvcPermissions() const {
288 u64 GetRandomEntropy(std::size_t index) const { 279 return m_capabilities.GetSvcPermissions();
289 return m_random_entropy.at(index);
290 } 280 }
291 281
292 /// Retrieves the total physical memory available to this process in bytes. 282 KResourceLimit* GetResourceLimit() const {
293 u64 GetTotalPhysicalMemoryAvailable(); 283 return m_resource_limit;
294
295 /// Retrieves the total physical memory available to this process in bytes,
296 /// without the size of the personal system resource heap added to it.
297 u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource();
298
299 /// Retrieves the total physical memory used by this process in bytes.
300 u64 GetTotalPhysicalMemoryUsed();
301
302 /// Retrieves the total physical memory used by this process in bytes,
303 /// without the size of the personal system resource heap added to it.
304 u64 GetTotalPhysicalMemoryUsedWithoutSystemResource();
305
306 /// Gets the list of all threads created with this process as their owner.
307 std::list<KThread*>& GetThreadList() {
308 return m_thread_list;
309 } 284 }
310 285
311 /// Registers a thread as being created under this process, 286 bool ReserveResource(Svc::LimitableResource which, s64 value);
312 /// adding it to this process' thread list. 287 bool ReserveResource(Svc::LimitableResource which, s64 value, s64 timeout);
313 void RegisterThread(KThread* thread); 288 void ReleaseResource(Svc::LimitableResource which, s64 value);
289 void ReleaseResource(Svc::LimitableResource which, s64 value, s64 hint);
314 290
315 /// Unregisters a thread from this process, removing it 291 KLightLock& GetStateLock() {
316 /// from this process' thread list. 292 return m_state_lock;
317 void UnregisterThread(KThread* thread); 293 }
294 KLightLock& GetListLock() {
295 return m_list_lock;
296 }
318 297
319 /// Retrieves the number of available threads for this process. 298 KPageTable& GetPageTable() {
320 u64 GetFreeThreadCount() const; 299 return m_page_table;
321 300 }
322 /// Clears the signaled state of the process if and only if it's signaled. 301 const KPageTable& GetPageTable() const {
323 /// 302 return m_page_table;
324 /// @pre The process must not be already terminated. If this is called on a 303 }
325 /// terminated process, then ResultInvalidState will be returned.
326 ///
327 /// @pre The process must be in a signaled state. If this is called on a
328 /// process instance that is not signaled, ResultInvalidState will be
329 /// returned.
330 Result Reset();
331 304
332 /** 305 KHandleTable& GetHandleTable() {
333 * Loads process-specifics configuration info with metadata provided 306 return m_handle_table;
334 * by an executable. 307 }
335 * 308 const KHandleTable& GetHandleTable() const {
336 * @param metadata The provided metadata to load process specific info from. 309 return m_handle_table;
337 * 310 }
338 * @returns ResultSuccess if all relevant metadata was able to be
339 * loaded and parsed. Otherwise, an error code is returned.
340 */
341 Result LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
342 bool is_hbl);
343 311
344 /** 312 size_t GetUsedUserPhysicalMemorySize() const;
345 * Starts the main application thread for this process. 313 size_t GetTotalUserPhysicalMemorySize() const;
346 * 314 size_t GetUsedNonSystemUserPhysicalMemorySize() const;
347 * @param main_thread_priority The priority for the main thread. 315 size_t GetTotalNonSystemUserPhysicalMemorySize() const;
348 * @param stack_size The stack size for the main thread in bytes.
349 */
350 void Run(s32 main_thread_priority, u64 stack_size);
351 316
352 /** 317 Result AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
353 * Prepares a process for termination by stopping all of its threads 318 void RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size);
354 * and clearing any other resources.
355 */
356 void PrepareForTermination();
357 319
358 void LoadModule(CodeSet code_set, KProcessAddress base_addr); 320 Result CreateThreadLocalRegion(KProcessAddress* out);
321 Result DeleteThreadLocalRegion(KProcessAddress addr);
359 322
360 bool IsInitialized() const override { 323 KProcessAddress GetProcessLocalRegionAddress() const {
361 return m_is_initialized; 324 return m_plr_address;
362 } 325 }
363 326
364 static void PostDestroy(uintptr_t arg) {} 327 KThread* GetExceptionThread() const {
365 328 return m_exception_thread;
366 void Finalize() override;
367
368 u64 GetId() const override {
369 return GetProcessId();
370 } 329 }
371 330
372 bool IsHbl() const { 331 void AddCpuTime(s64 diff) {
373 return m_is_hbl; 332 m_cpu_time += diff;
333 }
334 s64 GetCpuTime() {
335 return m_cpu_time.load();
374 } 336 }
375 337
376 bool IsSignaled() const override; 338 s64 GetScheduledCount() const {
377 339 return m_schedule_count;
378 void DoWorkerTaskImpl(); 340 }
341 void IncrementScheduledCount() {
342 ++m_schedule_count;
343 }
379 344
380 Result SetActivity(ProcessActivity activity); 345 void IncrementRunningThreadCount();
346 void DecrementRunningThreadCount();
381 347
382 void PinCurrentThread(s32 core_id); 348 size_t GetRequiredSecureMemorySizeNonDefault() const {
383 void UnpinCurrentThread(s32 core_id); 349 if (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) {
384 void UnpinThread(KThread* thread); 350 auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
351 return secure_system_resource->CalculateRequiredSecureMemorySize();
352 }
385 353
386 KLightLock& GetStateLock() { 354 return 0;
387 return m_state_lock;
388 } 355 }
389 356
390 Result AddSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size); 357 size_t GetRequiredSecureMemorySize() const {
391 void RemoveSharedMemory(KSharedMemory* shmem, KProcessAddress address, size_t size); 358 if (m_system_resource->IsSecureResource()) {
392 359 auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
393 /////////////////////////////////////////////////////////////////////////////////////////////// 360 return secure_system_resource->CalculateRequiredSecureMemorySize();
394 // Thread-local storage management 361 }
395
396 // Marks the next available region as used and returns the address of the slot.
397 [[nodiscard]] Result CreateThreadLocalRegion(KProcessAddress* out);
398 362
399 // Frees a used TLS slot identified by the given address 363 return 0;
400 Result DeleteThreadLocalRegion(KProcessAddress addr); 364 }
401 365
402 /////////////////////////////////////////////////////////////////////////////////////////////// 366 size_t GetTotalSystemResourceSize() const {
403 // Debug watchpoint management 367 if (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) {
368 auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
369 return secure_system_resource->GetSize();
370 }
404 371
405 // Attempts to insert a watchpoint into a free slot. Returns false if none are available. 372 return 0;
406 bool InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type); 373 }
407 374
408 // Attempts to remove the watchpoint specified by the given parameters. 375 size_t GetUsedSystemResourceSize() const {
409 bool RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type); 376 if (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) {
377 auto* secure_system_resource = static_cast<KSecureSystemResource*>(m_system_resource);
378 return secure_system_resource->GetUsedSize();
379 }
410 380
411 const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const { 381 return 0;
412 return m_watchpoints;
413 } 382 }
414 383
415 const std::string& GetName() { 384 void SetRunningThread(s32 core, KThread* thread, u64 idle_count, u64 switch_count) {
416 return name; 385 m_running_threads[core] = thread;
386 m_running_thread_idle_counts[core] = idle_count;
387 m_running_thread_switch_counts[core] = switch_count;
417 } 388 }
418 389
419private: 390 void ClearRunningThread(KThread* thread) {
420 void PinThread(s32 core_id, KThread* thread) { 391 for (size_t i = 0; i < m_running_threads.size(); ++i) {
421 ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); 392 if (m_running_threads[i] == thread) {
422 ASSERT(thread != nullptr); 393 m_running_threads[i] = nullptr;
423 ASSERT(m_pinned_threads[core_id] == nullptr); 394 }
424 m_pinned_threads[core_id] = thread; 395 }
425 } 396 }
426 397
427 void UnpinThread(s32 core_id, KThread* thread) { 398 const KSystemResource& GetSystemResource() const {
428 ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); 399 return *m_system_resource;
429 ASSERT(thread != nullptr);
430 ASSERT(m_pinned_threads[core_id] == thread);
431 m_pinned_threads[core_id] = nullptr;
432 } 400 }
433 401
434 void FinalizeHandleTable() { 402 const KMemoryBlockSlabManager& GetMemoryBlockSlabManager() const {
435 // Finalize the table. 403 return m_system_resource->GetMemoryBlockSlabManager();
436 m_handle_table.Finalize(); 404 }
437 405 const KBlockInfoManager& GetBlockInfoManager() const {
438 // Note that the table is finalized. 406 return m_system_resource->GetBlockInfoManager();
439 m_is_handle_table_initialized = false; 407 }
408 const KPageTableManager& GetPageTableManager() const {
409 return m_system_resource->GetPageTableManager();
440 } 410 }
441 411
442 void ChangeState(State new_state); 412 KThread* GetRunningThread(s32 core) const {
443 413 return m_running_threads[core];
444 /// Allocates the main thread stack for the process, given the stack size in bytes. 414 }
445 Result AllocateMainThreadStack(std::size_t stack_size); 415 u64 GetRunningThreadIdleCount(s32 core) const {
446 416 return m_running_thread_idle_counts[core];
447 /// Memory manager for this process 417 }
448 KPageTable m_page_table; 418 u64 GetRunningThreadSwitchCount(s32 core) const {
449 419 return m_running_thread_switch_counts[core];
450 /// Current status of the process 420 }
451 State m_state{};
452 421
453 /// The ID of this process 422 void RegisterThread(KThread* thread);
454 u64 m_process_id = 0; 423 void UnregisterThread(KThread* thread);
455 424
456 /// Title ID corresponding to the process 425 Result Run(s32 priority, size_t stack_size);
457 u64 m_program_id = 0;
458 426
459 /// Specifies additional memory to be reserved for the process's memory management by the 427 Result Reset();
460 /// system. When this is non-zero, secure memory is allocated and used for page table allocation
461 /// instead of using the normal global page tables/memory block management.
462 u32 m_system_resource_size = 0;
463 428
464 /// Resource limit descriptor for this process 429 void SetDebugBreak() {
465 KResourceLimit* m_resource_limit{}; 430 if (m_state == State::RunningAttached) {
431 this->ChangeState(State::DebugBreak);
432 }
433 }
466 434
467 KVirtualAddress m_system_resource_address{}; 435 void SetAttached() {
436 if (m_state == State::DebugBreak) {
437 this->ChangeState(State::RunningAttached);
438 }
439 }
468 440
469 /// The ideal CPU core for this process, threads are scheduled on this core by default. 441 Result SetActivity(Svc::ProcessActivity activity);
470 u8 m_ideal_core = 0;
471 442
472 /// Contains the parsed process capability descriptors. 443 void PinCurrentThread();
473 ProcessCapabilities m_capabilities; 444 void UnpinCurrentThread();
445 void UnpinThread(KThread* thread);
474 446
475 /// Whether or not this process is AArch64, or AArch32. 447 void SignalConditionVariable(uintptr_t cv_key, int32_t count) {
476 /// By default, we currently assume this is true, unless otherwise 448 return m_cond_var.Signal(cv_key, count);
477 /// specified by metadata provided to the process during loading. 449 }
478 bool m_is_64bit_process = true;
479 450
480 /// Total running time for the process in ticks. 451 Result WaitConditionVariable(KProcessAddress address, uintptr_t cv_key, u32 tag, s64 ns) {
481 std::atomic<u64> m_total_process_running_time_ticks = 0; 452 R_RETURN(m_cond_var.Wait(address, cv_key, tag, ns));
453 }
482 454
483 /// Per-process handle table for storing created object handles in. 455 Result SignalAddressArbiter(uintptr_t address, Svc::SignalType signal_type, s32 value,
484 KHandleTable m_handle_table; 456 s32 count) {
457 R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
458 }
485 459
486 /// Per-process address arbiter. 460 Result WaitAddressArbiter(uintptr_t address, Svc::ArbitrationType arb_type, s32 value,
487 KAddressArbiter m_address_arbiter; 461 s64 timeout) {
462 R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
463 }
488 464
489 /// The per-process mutex lock instance used for handling various 465 Result GetThreadList(s32* out_num_threads, KProcessAddress out_thread_ids, s32 max_out_count);
490 /// forms of services, such as lock arbitration, and condition
491 /// variable related facilities.
492 KConditionVariable m_condition_var;
493 466
494 /// Address indicating the location of the process' dedicated TLS region. 467 static void Switch(KProcess* cur_process, KProcess* next_process);
495 KProcessAddress m_plr_address = 0;
496 468
497 /// Address indicating the location of the process's entry point. 469public:
498 KProcessAddress m_code_address = 0; 470 // Attempts to insert a watchpoint into a free slot. Returns false if none are available.
471 bool InsertWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
499 472
500 /// Random values for svcGetInfo RandomEntropy 473 // Attempts to remove the watchpoint specified by the given parameters.
501 std::array<u64, RANDOM_ENTROPY_SIZE> m_random_entropy{}; 474 bool RemoveWatchpoint(KProcessAddress addr, u64 size, DebugWatchpointType type);
502 475
503 /// List of threads that are running with this process as their owner. 476 const std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS>& GetWatchpoints() const {
504 std::list<KThread*> m_thread_list; 477 return m_watchpoints;
478 }
505 479
506 /// List of shared memory that are running with this process as their owner. 480public:
507 std::list<KSharedMemoryInfo*> m_shared_memory_list; 481 Result LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size,
482 bool is_hbl);
508 483
509 /// Address of the top of the main thread's stack 484 void LoadModule(CodeSet code_set, KProcessAddress base_addr);
510 KProcessAddress m_main_thread_stack_top{};
511 485
512 /// Size of the main thread's stack 486 Core::Memory::Memory& GetMemory() const;
513 std::size_t m_main_thread_stack_size{};
514 487
515 /// Memory usage capacity for the process 488public:
516 std::size_t m_memory_usage_capacity{}; 489 // Overridden parent functions.
490 bool IsInitialized() const override {
491 return m_is_initialized;
492 }
517 493
518 /// Process total image size 494 static void PostDestroy(uintptr_t arg) {}
519 std::size_t m_image_size{};
520 495
521 /// Schedule count of this process 496 void Finalize() override;
522 s64 m_schedule_count{};
523 497
524 size_t m_memory_release_hint{}; 498 u64 GetIdImpl() const {
499 return this->GetProcessId();
500 }
501 u64 GetId() const override {
502 return this->GetIdImpl();
503 }
525 504
526 std::string name{}; 505 virtual bool IsSignaled() const override {
506 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
507 return m_is_signaled;
508 }
527 509
528 bool m_is_signaled{}; 510 void DoWorkerTaskImpl();
529 bool m_is_suspended{};
530 bool m_is_immortal{};
531 bool m_is_handle_table_initialized{};
532 bool m_is_initialized{};
533 bool m_is_hbl{};
534 511
535 std::atomic<u16> m_num_running_threads{}; 512private:
513 void ChangeState(State new_state) {
514 if (m_state != new_state) {
515 m_state = new_state;
516 m_is_signaled = true;
517 this->NotifyAvailable();
518 }
519 }
536 520
537 std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_running_threads{}; 521 Result InitializeHandleTable(s32 size) {
538 std::array<u64, Core::Hardware::NUM_CPU_CORES> m_running_thread_idle_counts{}; 522 // Try to initialize the handle table.
539 std::array<KThread*, Core::Hardware::NUM_CPU_CORES> m_pinned_threads{}; 523 R_TRY(m_handle_table.Initialize(size));
540 std::array<DebugWatchpoint, Core::Hardware::NUM_WATCHPOINTS> m_watchpoints{};
541 std::map<KProcessAddress, u64> m_debug_page_refcounts;
542 524
543 KThread* m_exception_thread{}; 525 // We succeeded, so note that we did.
526 m_is_handle_table_initialized = true;
527 R_SUCCEED();
528 }
544 529
545 KLightLock m_state_lock; 530 void FinalizeHandleTable() {
546 KLightLock m_list_lock; 531 // Finalize the table.
532 m_handle_table.Finalize();
547 533
548 using TLPTree = 534 // Note that the table is finalized.
549 Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>; 535 m_is_handle_table_initialized = false;
550 using TLPIterator = TLPTree::iterator; 536 }
551 TLPTree m_fully_used_tlp_tree;
552 TLPTree m_partially_used_tlp_tree;
553}; 537};
554 538
555} // namespace Kernel 539} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index d8143c650..1bce63a56 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -190,7 +190,7 @@ u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
190 if (m_state.should_count_idle) { 190 if (m_state.should_count_idle) {
191 if (highest_thread != nullptr) [[likely]] { 191 if (highest_thread != nullptr) [[likely]] {
192 if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) { 192 if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) {
193 process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count); 193 process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count, 0);
194 } 194 }
195 } else { 195 } else {
196 m_state.idle_count++; 196 m_state.idle_count++;
@@ -356,7 +356,7 @@ void KScheduler::SwitchThread(KThread* next_thread) {
356 const s64 tick_diff = cur_tick - prev_tick; 356 const s64 tick_diff = cur_tick - prev_tick;
357 cur_thread->AddCpuTime(m_core_id, tick_diff); 357 cur_thread->AddCpuTime(m_core_id, tick_diff);
358 if (cur_process != nullptr) { 358 if (cur_process != nullptr) {
359 cur_process->UpdateCPUTimeTicks(tick_diff); 359 cur_process->AddCpuTime(tick_diff);
360 } 360 }
361 m_last_context_switch_time = cur_tick; 361 m_last_context_switch_time = cur_tick;
362 362
diff --git a/src/core/hle/kernel/k_system_resource.cpp b/src/core/hle/kernel/k_system_resource.cpp
index e6c8d589a..07e92aa80 100644
--- a/src/core/hle/kernel/k_system_resource.cpp
+++ b/src/core/hle/kernel/k_system_resource.cpp
@@ -1,25 +1,100 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include "core/core.h"
5#include "core/hle/kernel/k_scoped_resource_reservation.h"
4#include "core/hle/kernel/k_system_resource.h" 6#include "core/hle/kernel/k_system_resource.h"
5 7
6namespace Kernel { 8namespace Kernel {
7 9
8Result KSecureSystemResource::Initialize(size_t size, KResourceLimit* resource_limit, 10Result KSecureSystemResource::Initialize(size_t size, KResourceLimit* resource_limit,
9 KMemoryManager::Pool pool) { 11 KMemoryManager::Pool pool) {
10 // Unimplemented 12 // Set members.
11 UNREACHABLE(); 13 m_resource_limit = resource_limit;
14 m_resource_size = size;
15 m_resource_pool = pool;
16
17 // Determine required size for our secure resource.
18 const size_t secure_size = this->CalculateRequiredSecureMemorySize();
19
20 // Reserve memory for our secure resource.
21 KScopedResourceReservation memory_reservation(
22 m_resource_limit, Svc::LimitableResource::PhysicalMemoryMax, secure_size);
23 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
24
25 // Allocate secure memory.
26 R_TRY(KSystemControl::AllocateSecureMemory(m_kernel, std::addressof(m_resource_address),
27 m_resource_size, static_cast<u32>(m_resource_pool)));
28 ASSERT(m_resource_address != 0);
29
30 // Ensure we clean up the secure memory, if we fail past this point.
31 ON_RESULT_FAILURE {
32 KSystemControl::FreeSecureMemory(m_kernel, m_resource_address, m_resource_size,
33 static_cast<u32>(m_resource_pool));
34 };
35
36 // Check that our allocation is bigger than the reference counts needed for it.
37 const size_t rc_size =
38 Common::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(m_resource_size), PageSize);
39 R_UNLESS(m_resource_size > rc_size, ResultOutOfMemory);
40
41 // Get resource pointer.
42 KPhysicalAddress resource_paddr =
43 KPageTable::GetHeapPhysicalAddress(m_kernel.MemoryLayout(), m_resource_address);
44 auto* resource =
45 m_kernel.System().DeviceMemory().GetPointer<KPageTableManager::RefCount>(resource_paddr);
46
47 // Initialize slab heaps.
48 m_dynamic_page_manager.Initialize(m_resource_address + rc_size, m_resource_size - rc_size,
49 PageSize);
50 m_page_table_heap.Initialize(std::addressof(m_dynamic_page_manager), 0, resource);
51 m_memory_block_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
52 m_block_info_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
53
54 // Initialize managers.
55 m_page_table_manager.Initialize(std::addressof(m_dynamic_page_manager),
56 std::addressof(m_page_table_heap));
57 m_memory_block_slab_manager.Initialize(std::addressof(m_dynamic_page_manager),
58 std::addressof(m_memory_block_heap));
59 m_block_info_manager.Initialize(std::addressof(m_dynamic_page_manager),
60 std::addressof(m_block_info_heap));
61
62 // Set our managers.
63 this->SetManagers(m_memory_block_slab_manager, m_block_info_manager, m_page_table_manager);
64
65 // Commit the memory reservation.
66 memory_reservation.Commit();
67
68 // Open reference to our resource limit.
69 m_resource_limit->Open();
70
71 // Set ourselves as initialized.
72 m_is_initialized = true;
73
74 R_SUCCEED();
12} 75}
13 76
14void KSecureSystemResource::Finalize() { 77void KSecureSystemResource::Finalize() {
15 // Unimplemented 78 // Check that we have no outstanding allocations.
16 UNREACHABLE(); 79 ASSERT(m_memory_block_slab_manager.GetUsed() == 0);
80 ASSERT(m_block_info_manager.GetUsed() == 0);
81 ASSERT(m_page_table_manager.GetUsed() == 0);
82
83 // Free our secure memory.
84 KSystemControl::FreeSecureMemory(m_kernel, m_resource_address, m_resource_size,
85 static_cast<u32>(m_resource_pool));
86
87 // Release the memory reservation.
88 m_resource_limit->Release(Svc::LimitableResource::PhysicalMemoryMax,
89 this->CalculateRequiredSecureMemorySize());
90
91 // Close reference to our resource limit.
92 m_resource_limit->Close();
17} 93}
18 94
19size_t KSecureSystemResource::CalculateRequiredSecureMemorySize(size_t size, 95size_t KSecureSystemResource::CalculateRequiredSecureMemorySize(size_t size,
20 KMemoryManager::Pool pool) { 96 KMemoryManager::Pool pool) {
21 // Unimplemented 97 return KSystemControl::CalculateRequiredSecureMemorySize(size, static_cast<u32>(pool));
22 UNREACHABLE();
23} 98}
24 99
25} // namespace Kernel 100} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 7df8fd7f7..a882be403 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -122,16 +122,15 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress
122 case ThreadType::Main: 122 case ThreadType::Main:
123 ASSERT(arg == 0); 123 ASSERT(arg == 0);
124 [[fallthrough]]; 124 [[fallthrough]];
125 case ThreadType::HighPriority:
126 [[fallthrough]];
127 case ThreadType::Dummy:
128 [[fallthrough]];
129 case ThreadType::User: 125 case ThreadType::User:
130 ASSERT(((owner == nullptr) || 126 ASSERT(((owner == nullptr) ||
131 (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask())); 127 (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
132 ASSERT(((owner == nullptr) || (prio > Svc::LowestThreadPriority) || 128 ASSERT(((owner == nullptr) || (prio > Svc::LowestThreadPriority) ||
133 (owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask())); 129 (owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask()));
134 break; 130 break;
131 case ThreadType::HighPriority:
132 case ThreadType::Dummy:
133 break;
135 case ThreadType::Kernel: 134 case ThreadType::Kernel:
136 UNIMPLEMENTED(); 135 UNIMPLEMENTED();
137 break; 136 break;
@@ -403,7 +402,7 @@ void KThread::StartTermination() {
403 if (m_parent != nullptr) { 402 if (m_parent != nullptr) {
404 m_parent->ReleaseUserException(this); 403 m_parent->ReleaseUserException(this);
405 if (m_parent->GetPinnedThread(GetCurrentCoreId(m_kernel)) == this) { 404 if (m_parent->GetPinnedThread(GetCurrentCoreId(m_kernel)) == this) {
406 m_parent->UnpinCurrentThread(m_core_id); 405 m_parent->UnpinCurrentThread();
407 } 406 }
408 } 407 }
409 408
@@ -820,7 +819,7 @@ void KThread::CloneFpuStatus() {
820 ASSERT(this->GetOwnerProcess() != nullptr); 819 ASSERT(this->GetOwnerProcess() != nullptr);
821 ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(m_kernel)); 820 ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(m_kernel));
822 821
823 if (this->GetOwnerProcess()->Is64BitProcess()) { 822 if (this->GetOwnerProcess()->Is64Bit()) {
824 // Clone FPSR and FPCR. 823 // Clone FPSR and FPCR.
825 ThreadContext64 cur_ctx{}; 824 ThreadContext64 cur_ctx{};
826 m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx); 825 m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
@@ -923,7 +922,7 @@ Result KThread::GetThreadContext3(Common::ScratchBuffer<u8>& out) {
923 922
924 // If we're not terminating, get the thread's user context. 923 // If we're not terminating, get the thread's user context.
925 if (!this->IsTerminationRequested()) { 924 if (!this->IsTerminationRequested()) {
926 if (m_parent->Is64BitProcess()) { 925 if (m_parent->Is64Bit()) {
927 // Mask away mode bits, interrupt bits, IL bit, and other reserved bits. 926 // Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
928 auto context = GetContext64(); 927 auto context = GetContext64();
929 context.pstate &= 0xFF0FFE20; 928 context.pstate &= 0xFF0FFE20;
@@ -1174,6 +1173,9 @@ Result KThread::Run() {
1174 owner->IncrementRunningThreadCount(); 1173 owner->IncrementRunningThreadCount();
1175 } 1174 }
1176 1175
1176 // Open a reference, now that we're running.
1177 this->Open();
1178
1177 // Set our state and finish. 1179 // Set our state and finish.
1178 this->SetState(ThreadState::Runnable); 1180 this->SetState(ThreadState::Runnable);
1179 1181
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index d178c2453..e1f80b04f 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -721,6 +721,7 @@ private:
721 // For core KThread implementation 721 // For core KThread implementation
722 ThreadContext32 m_thread_context_32{}; 722 ThreadContext32 m_thread_context_32{};
723 ThreadContext64 m_thread_context_64{}; 723 ThreadContext64 m_thread_context_64{};
724 Common::IntrusiveListNode m_process_list_node;
724 Common::IntrusiveRedBlackTreeNode m_condvar_arbiter_tree_node{}; 725 Common::IntrusiveRedBlackTreeNode m_condvar_arbiter_tree_node{};
725 s32 m_priority{}; 726 s32 m_priority{};
726 using ConditionVariableThreadTreeTraits = 727 using ConditionVariableThreadTreeTraits =
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 24433d32b..ac76c71a8 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -101,35 +101,31 @@ struct KernelCore::Impl {
101 101
102 void InitializeCores() { 102 void InitializeCores() {
103 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { 103 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
104 cores[core_id]->Initialize((*application_process).Is64BitProcess()); 104 cores[core_id]->Initialize((*application_process).Is64Bit());
105 system.ApplicationMemory().SetCurrentPageTable(*application_process, core_id); 105 system.ApplicationMemory().SetCurrentPageTable(*application_process, core_id);
106 } 106 }
107 } 107 }
108 108
109 void CloseApplicationProcess() { 109 void TerminateApplicationProcess() {
110 KProcess* old_process = application_process.exchange(nullptr); 110 application_process.load()->Terminate();
111 if (old_process == nullptr) {
112 return;
113 }
114
115 // old_process->Close();
116 // TODO: The process should be destroyed based on accurate ref counting after
117 // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
118 old_process->Finalize();
119 old_process->Destroy();
120 } 111 }
121 112
122 void Shutdown() { 113 void Shutdown() {
123 is_shutting_down.store(true, std::memory_order_relaxed); 114 is_shutting_down.store(true, std::memory_order_relaxed);
124 SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); }); 115 SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
125 116
126 process_list.clear();
127
128 CloseServices(); 117 CloseServices();
129 118
119 auto* old_process = application_process.exchange(nullptr);
120 if (old_process) {
121 old_process->Close();
122 }
123
124 process_list.clear();
125
130 next_object_id = 0; 126 next_object_id = 0;
131 next_kernel_process_id = KProcess::InitialKIPIDMin; 127 next_kernel_process_id = KProcess::InitialProcessIdMin;
132 next_user_process_id = KProcess::ProcessIDMin; 128 next_user_process_id = KProcess::ProcessIdMin;
133 next_thread_id = 1; 129 next_thread_id = 1;
134 130
135 global_handle_table->Finalize(); 131 global_handle_table->Finalize();
@@ -176,8 +172,6 @@ struct KernelCore::Impl {
176 } 172 }
177 } 173 }
178 174
179 CloseApplicationProcess();
180
181 // Track kernel objects that were not freed on shutdown 175 // Track kernel objects that were not freed on shutdown
182 { 176 {
183 std::scoped_lock lk{registered_objects_lock}; 177 std::scoped_lock lk{registered_objects_lock};
@@ -344,6 +338,8 @@ struct KernelCore::Impl {
344 // Create the system page table managers. 338 // Create the system page table managers.
345 app_system_resource = std::make_unique<KSystemResource>(kernel); 339 app_system_resource = std::make_unique<KSystemResource>(kernel);
346 sys_system_resource = std::make_unique<KSystemResource>(kernel); 340 sys_system_resource = std::make_unique<KSystemResource>(kernel);
341 KAutoObject::Create(std::addressof(*app_system_resource));
342 KAutoObject::Create(std::addressof(*sys_system_resource));
347 343
348 // Set the managers for the system resources. 344 // Set the managers for the system resources.
349 app_system_resource->SetManagers(*app_memory_block_manager, *app_block_info_manager, 345 app_system_resource->SetManagers(*app_memory_block_manager, *app_block_info_manager,
@@ -368,6 +364,7 @@ struct KernelCore::Impl {
368 364
369 void MakeApplicationProcess(KProcess* process) { 365 void MakeApplicationProcess(KProcess* process) {
370 application_process = process; 366 application_process = process;
367 application_process.load()->Open();
371 } 368 }
372 369
373 static inline thread_local u8 host_thread_id = UINT8_MAX; 370 static inline thread_local u8 host_thread_id = UINT8_MAX;
@@ -792,8 +789,8 @@ struct KernelCore::Impl {
792 std::mutex registered_in_use_objects_lock; 789 std::mutex registered_in_use_objects_lock;
793 790
794 std::atomic<u32> next_object_id{0}; 791 std::atomic<u32> next_object_id{0};
795 std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin}; 792 std::atomic<u64> next_kernel_process_id{KProcess::InitialProcessIdMin};
796 std::atomic<u64> next_user_process_id{KProcess::ProcessIDMin}; 793 std::atomic<u64> next_user_process_id{KProcess::ProcessIdMin};
797 std::atomic<u64> next_thread_id{1}; 794 std::atomic<u64> next_thread_id{1};
798 795
799 // Lists all processes that exist in the current session. 796 // Lists all processes that exist in the current session.
@@ -924,10 +921,6 @@ const KProcess* KernelCore::ApplicationProcess() const {
924 return impl->application_process; 921 return impl->application_process;
925} 922}
926 923
927void KernelCore::CloseApplicationProcess() {
928 impl->CloseApplicationProcess();
929}
930
931const std::vector<KProcess*>& KernelCore::GetProcessList() const { 924const std::vector<KProcess*>& KernelCore::GetProcessList() const {
932 return impl->process_list; 925 return impl->process_list;
933} 926}
@@ -1128,8 +1121,8 @@ std::jthread KernelCore::RunOnHostCoreProcess(std::string&& process_name,
1128 std::function<void()> func) { 1121 std::function<void()> func) {
1129 // Make a new process. 1122 // Make a new process.
1130 KProcess* process = KProcess::Create(*this); 1123 KProcess* process = KProcess::Create(*this);
1131 ASSERT(R_SUCCEEDED(KProcess::Initialize(process, System(), "", KProcess::ProcessType::Userland, 1124 ASSERT(R_SUCCEEDED(
1132 GetSystemResourceLimit()))); 1125 process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
1133 1126
1134 // Ensure that we don't hold onto any extra references. 1127 // Ensure that we don't hold onto any extra references.
1135 SCOPE_EXIT({ process->Close(); }); 1128 SCOPE_EXIT({ process->Close(); });
@@ -1156,8 +1149,8 @@ void KernelCore::RunOnGuestCoreProcess(std::string&& process_name, std::function
1156 1149
1157 // Make a new process. 1150 // Make a new process.
1158 KProcess* process = KProcess::Create(*this); 1151 KProcess* process = KProcess::Create(*this);
1159 ASSERT(R_SUCCEEDED(KProcess::Initialize(process, System(), "", KProcess::ProcessType::Userland, 1152 ASSERT(R_SUCCEEDED(
1160 GetSystemResourceLimit()))); 1153 process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
1161 1154
1162 // Ensure that we don't hold onto any extra references. 1155 // Ensure that we don't hold onto any extra references.
1163 SCOPE_EXIT({ process->Close(); }); 1156 SCOPE_EXIT({ process->Close(); });
@@ -1266,7 +1259,8 @@ const Kernel::KSharedMemory& KernelCore::GetHidBusSharedMem() const {
1266 1259
1267void KernelCore::SuspendApplication(bool suspended) { 1260void KernelCore::SuspendApplication(bool suspended) {
1268 const bool should_suspend{exception_exited || suspended}; 1261 const bool should_suspend{exception_exited || suspended};
1269 const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable; 1262 const auto activity =
1263 should_suspend ? Svc::ProcessActivity::Paused : Svc::ProcessActivity::Runnable;
1270 1264
1271 // Get the application process. 1265 // Get the application process.
1272 KScopedAutoObject<KProcess> process = ApplicationProcess(); 1266 KScopedAutoObject<KProcess> process = ApplicationProcess();
@@ -1300,6 +1294,8 @@ void KernelCore::SuspendApplication(bool suspended) {
1300} 1294}
1301 1295
1302void KernelCore::ShutdownCores() { 1296void KernelCore::ShutdownCores() {
1297 impl->TerminateApplicationProcess();
1298
1303 KScopedSchedulerLock lk{*this}; 1299 KScopedSchedulerLock lk{*this};
1304 1300
1305 for (auto* thread : impl->shutdown_threads) { 1301 for (auto* thread : impl->shutdown_threads) {
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index d5b08eeb5..d8086c0ea 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -134,9 +134,6 @@ public:
134 /// Retrieves a const pointer to the application process. 134 /// Retrieves a const pointer to the application process.
135 const KProcess* ApplicationProcess() const; 135 const KProcess* ApplicationProcess() const;
136 136
137 /// Closes the application process.
138 void CloseApplicationProcess();
139
140 /// Retrieves the list of processes. 137 /// Retrieves the list of processes.
141 const std::vector<KProcess*>& GetProcessList() const; 138 const std::vector<KProcess*>& GetProcessList() const;
142 139
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 871d541d4..b76683969 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -4426,7 +4426,7 @@ void Call(Core::System& system, u32 imm) {
4426 auto& kernel = system.Kernel(); 4426 auto& kernel = system.Kernel();
4427 kernel.EnterSVCProfile(); 4427 kernel.EnterSVCProfile();
4428 4428
4429 if (GetCurrentProcess(system.Kernel()).Is64BitProcess()) { 4429 if (GetCurrentProcess(system.Kernel()).Is64Bit()) {
4430 Call64(system, imm); 4430 Call64(system, imm);
4431 } else { 4431 } else {
4432 Call32(system, imm); 4432 Call32(system, imm);
diff --git a/src/core/hle/kernel/svc/svc_info.cpp b/src/core/hle/kernel/svc/svc_info.cpp
index f99964028..ada998772 100644
--- a/src/core/hle/kernel/svc/svc_info.cpp
+++ b/src/core/hle/kernel/svc/svc_info.cpp
@@ -86,20 +86,19 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
86 R_SUCCEED(); 86 R_SUCCEED();
87 87
88 case InfoType::TotalMemorySize: 88 case InfoType::TotalMemorySize:
89 *result = process->GetTotalPhysicalMemoryAvailable(); 89 *result = process->GetTotalUserPhysicalMemorySize();
90 R_SUCCEED(); 90 R_SUCCEED();
91 91
92 case InfoType::UsedMemorySize: 92 case InfoType::UsedMemorySize:
93 *result = process->GetTotalPhysicalMemoryUsed(); 93 *result = process->GetUsedUserPhysicalMemorySize();
94 R_SUCCEED(); 94 R_SUCCEED();
95 95
96 case InfoType::SystemResourceSizeTotal: 96 case InfoType::SystemResourceSizeTotal:
97 *result = process->GetSystemResourceSize(); 97 *result = process->GetTotalSystemResourceSize();
98 R_SUCCEED(); 98 R_SUCCEED();
99 99
100 case InfoType::SystemResourceSizeUsed: 100 case InfoType::SystemResourceSizeUsed:
101 LOG_WARNING(Kernel_SVC, "(STUBBED) Attempted to query system resource usage"); 101 *result = process->GetUsedSystemResourceSize();
102 *result = process->GetSystemResourceUsage();
103 R_SUCCEED(); 102 R_SUCCEED();
104 103
105 case InfoType::ProgramId: 104 case InfoType::ProgramId:
@@ -111,20 +110,29 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
111 R_SUCCEED(); 110 R_SUCCEED();
112 111
113 case InfoType::TotalNonSystemMemorySize: 112 case InfoType::TotalNonSystemMemorySize:
114 *result = process->GetTotalPhysicalMemoryAvailableWithoutSystemResource(); 113 *result = process->GetTotalNonSystemUserPhysicalMemorySize();
115 R_SUCCEED(); 114 R_SUCCEED();
116 115
117 case InfoType::UsedNonSystemMemorySize: 116 case InfoType::UsedNonSystemMemorySize:
118 *result = process->GetTotalPhysicalMemoryUsedWithoutSystemResource(); 117 *result = process->GetUsedNonSystemUserPhysicalMemorySize();
119 R_SUCCEED(); 118 R_SUCCEED();
120 119
121 case InfoType::IsApplication: 120 case InfoType::IsApplication:
122 LOG_WARNING(Kernel_SVC, "(STUBBED) Assuming process is application"); 121 LOG_WARNING(Kernel_SVC, "(STUBBED) Assuming process is application");
123 *result = true; 122 *result = process->IsApplication();
124 R_SUCCEED(); 123 R_SUCCEED();
125 124
126 case InfoType::FreeThreadCount: 125 case InfoType::FreeThreadCount:
127 *result = process->GetFreeThreadCount(); 126 if (KResourceLimit* resource_limit = process->GetResourceLimit();
127 resource_limit != nullptr) {
128 const auto current_value =
129 resource_limit->GetCurrentValue(Svc::LimitableResource::ThreadCountMax);
130 const auto limit_value =
131 resource_limit->GetLimitValue(Svc::LimitableResource::ThreadCountMax);
132 *result = limit_value - current_value;
133 } else {
134 *result = 0;
135 }
128 R_SUCCEED(); 136 R_SUCCEED();
129 137
130 default: 138 default:
@@ -161,7 +169,7 @@ Result GetInfo(Core::System& system, u64* result, InfoType info_id_type, Handle
161 169
162 case InfoType::RandomEntropy: 170 case InfoType::RandomEntropy:
163 R_UNLESS(handle == 0, ResultInvalidHandle); 171 R_UNLESS(handle == 0, ResultInvalidHandle);
164 R_UNLESS(info_sub_id < KProcess::RANDOM_ENTROPY_SIZE, ResultInvalidCombination); 172 R_UNLESS(info_sub_id < 4, ResultInvalidCombination);
165 173
166 *result = GetCurrentProcess(system.Kernel()).GetRandomEntropy(info_sub_id); 174 *result = GetCurrentProcess(system.Kernel()).GetRandomEntropy(info_sub_id);
167 R_SUCCEED(); 175 R_SUCCEED();
diff --git a/src/core/hle/kernel/svc/svc_lock.cpp b/src/core/hle/kernel/svc/svc_lock.cpp
index 1d7bc4246..5f0833fcb 100644
--- a/src/core/hle/kernel/svc/svc_lock.cpp
+++ b/src/core/hle/kernel/svc/svc_lock.cpp
@@ -17,7 +17,7 @@ Result ArbitrateLock(Core::System& system, Handle thread_handle, u64 address, u3
17 R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory); 17 R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
18 R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress); 18 R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress);
19 19
20 R_RETURN(GetCurrentProcess(system.Kernel()).WaitForAddress(thread_handle, address, tag)); 20 R_RETURN(KConditionVariable::WaitForAddress(system.Kernel(), thread_handle, address, tag));
21} 21}
22 22
23/// Unlock a mutex 23/// Unlock a mutex
@@ -28,7 +28,7 @@ Result ArbitrateUnlock(Core::System& system, u64 address) {
28 R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory); 28 R_UNLESS(!IsKernelAddress(address), ResultInvalidCurrentMemory);
29 R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress); 29 R_UNLESS(Common::IsAligned(address, sizeof(u32)), ResultInvalidAddress);
30 30
31 R_RETURN(GetCurrentProcess(system.Kernel()).SignalToAddress(address)); 31 R_RETURN(KConditionVariable::SignalToAddress(system.Kernel(), address));
32} 32}
33 33
34Result ArbitrateLock64(Core::System& system, Handle thread_handle, uint64_t address, uint32_t tag) { 34Result ArbitrateLock64(Core::System& system, Handle thread_handle, uint64_t address, uint32_t tag) {
diff --git a/src/core/hle/kernel/svc/svc_physical_memory.cpp b/src/core/hle/kernel/svc/svc_physical_memory.cpp
index d3545f232..99330d02a 100644
--- a/src/core/hle/kernel/svc/svc_physical_memory.cpp
+++ b/src/core/hle/kernel/svc/svc_physical_memory.cpp
@@ -46,7 +46,7 @@ Result MapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
46 KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())}; 46 KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())};
47 auto& page_table{current_process->GetPageTable()}; 47 auto& page_table{current_process->GetPageTable()};
48 48
49 if (current_process->GetSystemResourceSize() == 0) { 49 if (current_process->GetTotalSystemResourceSize() == 0) {
50 LOG_ERROR(Kernel_SVC, "System Resource Size is zero"); 50 LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
51 R_THROW(ResultInvalidState); 51 R_THROW(ResultInvalidState);
52 } 52 }
@@ -95,7 +95,7 @@ Result UnmapPhysicalMemory(Core::System& system, u64 addr, u64 size) {
95 KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())}; 95 KProcess* const current_process{GetCurrentProcessPointer(system.Kernel())};
96 auto& page_table{current_process->GetPageTable()}; 96 auto& page_table{current_process->GetPageTable()};
97 97
98 if (current_process->GetSystemResourceSize() == 0) { 98 if (current_process->GetTotalSystemResourceSize() == 0) {
99 LOG_ERROR(Kernel_SVC, "System Resource Size is zero"); 99 LOG_ERROR(Kernel_SVC, "System Resource Size is zero");
100 R_THROW(ResultInvalidState); 100 R_THROW(ResultInvalidState);
101 } 101 }
diff --git a/src/core/hle/kernel/svc/svc_synchronization.cpp b/src/core/hle/kernel/svc/svc_synchronization.cpp
index 8ebc1bd1c..6c79cfd8d 100644
--- a/src/core/hle/kernel/svc/svc_synchronization.cpp
+++ b/src/core/hle/kernel/svc/svc_synchronization.cpp
@@ -132,7 +132,7 @@ void SynchronizePreemptionState(Core::System& system) {
132 GetCurrentThread(kernel).ClearInterruptFlag(); 132 GetCurrentThread(kernel).ClearInterruptFlag();
133 133
134 // Unpin the current thread. 134 // Unpin the current thread.
135 cur_process->UnpinCurrentThread(core_id); 135 cur_process->UnpinCurrentThread();
136 } 136 }
137} 137}
138 138
diff --git a/src/core/hle/kernel/svc/svc_thread.cpp b/src/core/hle/kernel/svc/svc_thread.cpp
index 933b82e30..755fd62b5 100644
--- a/src/core/hle/kernel/svc/svc_thread.cpp
+++ b/src/core/hle/kernel/svc/svc_thread.cpp
@@ -85,10 +85,6 @@ Result StartThread(Core::System& system, Handle thread_handle) {
85 // Try to start the thread. 85 // Try to start the thread.
86 R_TRY(thread->Run()); 86 R_TRY(thread->Run());
87 87
88 // If we succeeded, persist a reference to the thread.
89 thread->Open();
90 system.Kernel().RegisterInUseObject(thread.GetPointerUnsafe());
91
92 R_SUCCEED(); 88 R_SUCCEED();
93} 89}
94 90
@@ -99,7 +95,6 @@ void ExitThread(Core::System& system) {
99 auto* const current_thread = GetCurrentThreadPointer(system.Kernel()); 95 auto* const current_thread = GetCurrentThreadPointer(system.Kernel());
100 system.GlobalSchedulerContext().RemoveThread(current_thread); 96 system.GlobalSchedulerContext().RemoveThread(current_thread);
101 current_thread->Exit(); 97 current_thread->Exit();
102 system.Kernel().UnregisterInUseObject(current_thread);
103} 98}
104 99
105/// Sleep the current thread 100/// Sleep the current thread
@@ -260,7 +255,7 @@ Result GetThreadList(Core::System& system, s32* out_num_threads, u64 out_thread_
260 255
261 auto list_iter = thread_list.cbegin(); 256 auto list_iter = thread_list.cbegin();
262 for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) { 257 for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
263 memory.Write64(out_thread_ids, (*list_iter)->GetThreadId()); 258 memory.Write64(out_thread_ids, list_iter->GetThreadId());
264 out_thread_ids += sizeof(u64); 259 out_thread_ids += sizeof(u64);
265 } 260 }
266 261
diff --git a/src/core/hle/kernel/svc_generator.py b/src/core/hle/kernel/svc_generator.py
index 7fcbb1ba1..5531faac6 100644
--- a/src/core/hle/kernel/svc_generator.py
+++ b/src/core/hle/kernel/svc_generator.py
@@ -592,7 +592,7 @@ void Call(Core::System& system, u32 imm) {
592 auto& kernel = system.Kernel(); 592 auto& kernel = system.Kernel();
593 kernel.EnterSVCProfile(); 593 kernel.EnterSVCProfile();
594 594
595 if (GetCurrentProcess(system.Kernel()).Is64BitProcess()) { 595 if (GetCurrentProcess(system.Kernel()).Is64Bit()) {
596 Call64(system, imm); 596 Call64(system, imm);
597 } else { 597 } else {
598 Call32(system, imm); 598 Call32(system, imm);
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 251e6013c..50de02e36 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -604,13 +604,57 @@ enum class ProcessActivity : u32 {
604 Paused, 604 Paused,
605}; 605};
606 606
607enum class CreateProcessFlag : u32 {
608 // Is 64 bit?
609 Is64Bit = (1 << 0),
610
611 // What kind of address space?
612 AddressSpaceShift = 1,
613 AddressSpaceMask = (7 << AddressSpaceShift),
614 AddressSpace32Bit = (0 << AddressSpaceShift),
615 AddressSpace64BitDeprecated = (1 << AddressSpaceShift),
616 AddressSpace32BitWithoutAlias = (2 << AddressSpaceShift),
617 AddressSpace64Bit = (3 << AddressSpaceShift),
618
619 // Should JIT debug be done on crash?
620 EnableDebug = (1 << 4),
621
622 // Should ASLR be enabled for the process?
623 EnableAslr = (1 << 5),
624
625 // Is the process an application?
626 IsApplication = (1 << 6),
627
628 // 4.x deprecated: Should use secure memory?
629 DeprecatedUseSecureMemory = (1 << 7),
630
631 // 5.x+ Pool partition type.
632 PoolPartitionShift = 7,
633 PoolPartitionMask = (0xF << PoolPartitionShift),
634 PoolPartitionApplication = (0 << PoolPartitionShift),
635 PoolPartitionApplet = (1 << PoolPartitionShift),
636 PoolPartitionSystem = (2 << PoolPartitionShift),
637 PoolPartitionSystemNonSecure = (3 << PoolPartitionShift),
638
639 // 7.x+ Should memory allocation be optimized? This requires IsApplication.
640 OptimizeMemoryAllocation = (1 << 11),
641
642 // 11.x+ DisableDeviceAddressSpaceMerge.
643 DisableDeviceAddressSpaceMerge = (1 << 12),
644
645 // Mask of all flags.
646 All = Is64Bit | AddressSpaceMask | EnableDebug | EnableAslr | IsApplication |
647 PoolPartitionMask | OptimizeMemoryAllocation | DisableDeviceAddressSpaceMerge,
648};
649DECLARE_ENUM_FLAG_OPERATORS(CreateProcessFlag);
650
607struct CreateProcessParameter { 651struct CreateProcessParameter {
608 std::array<char, 12> name; 652 std::array<char, 12> name;
609 u32 version; 653 u32 version;
610 u64 program_id; 654 u64 program_id;
611 u64 code_address; 655 u64 code_address;
612 s32 code_num_pages; 656 s32 code_num_pages;
613 u32 flags; 657 CreateProcessFlag flags;
614 Handle reslimit; 658 Handle reslimit;
615 s32 system_resource_num_pages; 659 s32 system_resource_num_pages;
616}; 660};
diff --git a/src/core/hle/service/kernel_helpers.cpp b/src/core/hle/service/kernel_helpers.cpp
index 6a313a03b..f51e63564 100644
--- a/src/core/hle/service/kernel_helpers.cpp
+++ b/src/core/hle/service/kernel_helpers.cpp
@@ -21,10 +21,8 @@ ServiceContext::ServiceContext(Core::System& system_, std::string name_)
21 21
22 // Create the process. 22 // Create the process.
23 process = Kernel::KProcess::Create(kernel); 23 process = Kernel::KProcess::Create(kernel);
24 ASSERT(Kernel::KProcess::Initialize(process, system_, std::move(name_), 24 ASSERT(R_SUCCEEDED(process->Initialize(Kernel::Svc::CreateProcessParameter{},
25 Kernel::KProcess::ProcessType::KernelInternal, 25 kernel.GetSystemResourceLimit(), false)));
26 kernel.GetSystemResourceLimit())
27 .IsSuccess());
28 26
29 // Register the process. 27 // Register the process.
30 Kernel::KProcess::Register(kernel, process); 28 Kernel::KProcess::Register(kernel, process);
diff --git a/src/core/hle/service/nvnflinger/nvnflinger.cpp b/src/core/hle/service/nvnflinger/nvnflinger.cpp
index a07c621d9..bebb45eae 100644
--- a/src/core/hle/service/nvnflinger/nvnflinger.cpp
+++ b/src/core/hle/service/nvnflinger/nvnflinger.cpp
@@ -66,7 +66,6 @@ Nvnflinger::Nvnflinger(Core::System& system_, HosBinderDriverServer& hos_binder_
66 "ScreenComposition", 66 "ScreenComposition",
67 [this](std::uintptr_t, s64 time, 67 [this](std::uintptr_t, s64 time,
68 std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> { 68 std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
69 { const auto lock_guard = Lock(); }
70 vsync_signal.Set(); 69 vsync_signal.Set();
71 return std::chrono::nanoseconds(GetNextTicks()); 70 return std::chrono::nanoseconds(GetNextTicks());
72 }); 71 });
@@ -99,6 +98,7 @@ Nvnflinger::~Nvnflinger() {
99 } 98 }
100 99
101 ShutdownLayers(); 100 ShutdownLayers();
101 vsync_thread = {};
102 102
103 if (nvdrv) { 103 if (nvdrv) {
104 nvdrv->Close(disp_fd); 104 nvdrv->Close(disp_fd);
@@ -106,6 +106,7 @@ Nvnflinger::~Nvnflinger() {
106} 106}
107 107
108void Nvnflinger::ShutdownLayers() { 108void Nvnflinger::ShutdownLayers() {
109 const auto lock_guard = Lock();
109 for (auto& display : displays) { 110 for (auto& display : displays) {
110 for (size_t layer = 0; layer < display.GetNumLayers(); ++layer) { 111 for (size_t layer = 0; layer < display.GetNumLayers(); ++layer) {
111 display.GetLayer(layer).Core().NotifyShutdown(); 112 display.GetLayer(layer).Core().NotifyShutdown();
@@ -229,16 +230,6 @@ VI::Layer* Nvnflinger::FindLayer(u64 display_id, u64 layer_id) {
229 return display->FindLayer(layer_id); 230 return display->FindLayer(layer_id);
230} 231}
231 232
232const VI::Layer* Nvnflinger::FindLayer(u64 display_id, u64 layer_id) const {
233 const auto* const display = FindDisplay(display_id);
234
235 if (display == nullptr) {
236 return nullptr;
237 }
238
239 return display->FindLayer(layer_id);
240}
241
242VI::Layer* Nvnflinger::FindOrCreateLayer(u64 display_id, u64 layer_id) { 233VI::Layer* Nvnflinger::FindOrCreateLayer(u64 display_id, u64 layer_id) {
243 auto* const display = FindDisplay(display_id); 234 auto* const display = FindDisplay(display_id);
244 235
@@ -288,7 +279,6 @@ void Nvnflinger::Compose() {
288 auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>(disp_fd); 279 auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>(disp_fd);
289 ASSERT(nvdisp); 280 ASSERT(nvdisp);
290 281
291 guard->unlock();
292 Common::Rectangle<int> crop_rect{ 282 Common::Rectangle<int> crop_rect{
293 static_cast<int>(buffer.crop.Left()), static_cast<int>(buffer.crop.Top()), 283 static_cast<int>(buffer.crop.Left()), static_cast<int>(buffer.crop.Top()),
294 static_cast<int>(buffer.crop.Right()), static_cast<int>(buffer.crop.Bottom())}; 284 static_cast<int>(buffer.crop.Right()), static_cast<int>(buffer.crop.Bottom())};
@@ -299,7 +289,6 @@ void Nvnflinger::Compose() {
299 buffer.fence.fences, buffer.fence.num_fences); 289 buffer.fence.fences, buffer.fence.num_fences);
300 290
301 MicroProfileFlip(); 291 MicroProfileFlip();
302 guard->lock();
303 292
304 swap_interval = buffer.swap_interval; 293 swap_interval = buffer.swap_interval;
305 294
diff --git a/src/core/hle/service/nvnflinger/nvnflinger.h b/src/core/hle/service/nvnflinger/nvnflinger.h
index 14c783582..959d8b46b 100644
--- a/src/core/hle/service/nvnflinger/nvnflinger.h
+++ b/src/core/hle/service/nvnflinger/nvnflinger.h
@@ -117,9 +117,6 @@ private:
117 /// Finds the layer identified by the specified ID in the desired display. 117 /// Finds the layer identified by the specified ID in the desired display.
118 [[nodiscard]] VI::Layer* FindLayer(u64 display_id, u64 layer_id); 118 [[nodiscard]] VI::Layer* FindLayer(u64 display_id, u64 layer_id);
119 119
120 /// Finds the layer identified by the specified ID in the desired display.
121 [[nodiscard]] const VI::Layer* FindLayer(u64 display_id, u64 layer_id) const;
122
123 /// Finds the layer identified by the specified ID in the desired display, 120 /// Finds the layer identified by the specified ID in the desired display,
124 /// or creates the layer if it is not found. 121 /// or creates the layer if it is not found.
125 /// To be used when the system expects the specified ID to already exist. 122 /// To be used when the system expects the specified ID to already exist.
diff --git a/src/core/hle/service/pm/pm.cpp b/src/core/hle/service/pm/pm.cpp
index f9cf2dda3..d92499f05 100644
--- a/src/core/hle/service/pm/pm.cpp
+++ b/src/core/hle/service/pm/pm.cpp
@@ -37,7 +37,7 @@ std::optional<Kernel::KProcess*> SearchProcessList(
37void GetApplicationPidGeneric(HLERequestContext& ctx, 37void GetApplicationPidGeneric(HLERequestContext& ctx,
38 const std::vector<Kernel::KProcess*>& process_list) { 38 const std::vector<Kernel::KProcess*>& process_list) {
39 const auto process = SearchProcessList(process_list, [](const auto& proc) { 39 const auto process = SearchProcessList(process_list, [](const auto& proc) {
40 return proc->GetProcessId() == Kernel::KProcess::ProcessIDMin; 40 return proc->GetProcessId() == Kernel::KProcess::ProcessIdMin;
41 }); 41 });
42 42
43 IPC::ResponseBuilder rb{ctx, 4}; 43 IPC::ResponseBuilder rb{ctx, 4};
diff --git a/src/core/reporter.cpp b/src/core/reporter.cpp
index ed875d444..5d168cbc1 100644
--- a/src/core/reporter.cpp
+++ b/src/core/reporter.cpp
@@ -116,7 +116,7 @@ json GetProcessorStateDataAuto(Core::System& system) {
116 Core::ARM_Interface::ThreadContext64 context{}; 116 Core::ARM_Interface::ThreadContext64 context{};
117 arm.SaveContext(context); 117 arm.SaveContext(context);
118 118
119 return GetProcessorStateData(process->Is64BitProcess() ? "AArch64" : "AArch32", 119 return GetProcessorStateData(process->Is64Bit() ? "AArch64" : "AArch32",
120 GetInteger(process->GetEntryPoint()), context.sp, context.pc, 120 GetInteger(process->GetEntryPoint()), context.sp, context.pc,
121 context.pstate, context.cpu_registers); 121 context.pstate, context.cpu_registers);
122} 122}
diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp
index 0783a2430..7049c57b6 100644
--- a/src/yuzu/debugger/wait_tree.cpp
+++ b/src/yuzu/debugger/wait_tree.cpp
@@ -127,7 +127,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() cons
127 return list; 127 return list;
128 } 128 }
129 129
130 if (thread.GetOwnerProcess() == nullptr || !thread.GetOwnerProcess()->Is64BitProcess()) { 130 if (thread.GetOwnerProcess() == nullptr || !thread.GetOwnerProcess()->Is64Bit()) {
131 return list; 131 return list;
132 } 132 }
133 133
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index 1431cf2fe..816d804c4 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -2019,7 +2019,7 @@ void GMainWindow::BootGame(const QString& filename, u64 program_id, std::size_t
2019 std::filesystem::path{Common::U16StringFromBuffer(filename.utf16(), filename.size())} 2019 std::filesystem::path{Common::U16StringFromBuffer(filename.utf16(), filename.size())}
2020 .filename()); 2020 .filename());
2021 } 2021 }
2022 const bool is_64bit = system->Kernel().ApplicationProcess()->Is64BitProcess(); 2022 const bool is_64bit = system->Kernel().ApplicationProcess()->Is64Bit();
2023 const auto instruction_set_suffix = is_64bit ? tr("(64-bit)") : tr("(32-bit)"); 2023 const auto instruction_set_suffix = is_64bit ? tr("(64-bit)") : tr("(32-bit)");
2024 title_name = tr("%1 %2", "%1 is the title name. %2 indicates if the title is 64-bit or 32-bit") 2024 title_name = tr("%1 %2", "%1 is the title name. %2 indicates if the title is 64-bit or 32-bit")
2025 .arg(QString::fromStdString(title_name), instruction_set_suffix) 2025 .arg(QString::fromStdString(title_name), instruction_set_suffix)