summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/common/common_funcs.h17
-rw-r--r--src/core/CMakeLists.txt7
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp6
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp6
-rw-r--r--src/core/core.cpp2
-rw-r--r--src/core/cpu_manager.cpp25
-rw-r--r--src/core/hardware_properties.h36
-rw-r--r--src/core/hle/kernel/client_port.h2
-rw-r--r--src/core/hle/kernel/client_session.cpp4
-rw-r--r--src/core/hle/kernel/client_session.h6
-rw-r--r--src/core/hle/kernel/global_scheduler_context.cpp4
-rw-r--r--src/core/hle/kernel/global_scheduler_context.h19
-rw-r--r--src/core/hle/kernel/handle_table.cpp6
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp6
-rw-r--r--src/core/hle/kernel/hle_ipc.h15
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp28
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp42
-rw-r--r--src/core/hle/kernel/k_condition_variable.h10
-rw-r--r--src/core/hle/kernel/k_light_lock.cpp130
-rw-r--r--src/core/hle/kernel/k_light_lock.h41
-rw-r--r--src/core/hle/kernel/k_priority_queue.h4
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp255
-rw-r--r--src/core/hle/kernel/k_scheduler.h49
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h34
-rw-r--r--src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h18
-rw-r--r--src/core/hle/kernel/k_synchronization_object.cpp20
-rw-r--r--src/core/hle/kernel/k_synchronization_object.h6
-rw-r--r--src/core/hle/kernel/k_thread.cpp1050
-rw-r--r--src/core/hle/kernel/k_thread.h768
-rw-r--r--src/core/hle/kernel/k_thread_queue.h81
-rw-r--r--src/core/hle/kernel/kernel.cpp76
-rw-r--r--src/core/hle/kernel/kernel.h18
-rw-r--r--src/core/hle/kernel/object.h2
-rw-r--r--src/core/hle/kernel/process.cpp91
-rw-r--r--src/core/hle/kernel/process.h88
-rw-r--r--src/core/hle/kernel/readable_event.cpp2
-rw-r--r--src/core/hle/kernel/readable_event.h2
-rw-r--r--src/core/hle/kernel/resource_limit.h2
-rw-r--r--src/core/hle/kernel/server_port.cpp2
-rw-r--r--src/core/hle/kernel/server_port.h2
-rw-r--r--src/core/hle/kernel/server_session.cpp8
-rw-r--r--src/core/hle/kernel/server_session.h12
-rw-r--r--src/core/hle/kernel/session.h2
-rw-r--r--src/core/hle/kernel/shared_memory.h2
-rw-r--r--src/core/hle/kernel/svc.cpp439
-rw-r--r--src/core/hle/kernel/svc_results.h5
-rw-r--r--src/core/hle/kernel/svc_types.h18
-rw-r--r--src/core/hle/kernel/svc_wrap.h56
-rw-r--r--src/core/hle/kernel/thread.cpp460
-rw-r--r--src/core/hle/kernel/thread.h782
-rw-r--r--src/core/hle/kernel/time_manager.cpp44
-rw-r--r--src/core/hle/kernel/time_manager.h10
-rw-r--r--src/core/hle/kernel/transfer_memory.h2
-rw-r--r--src/core/hle/kernel/writable_event.cpp6
-rw-r--r--src/core/hle/kernel/writable_event.h3
-rw-r--r--src/core/hle/service/am/am.cpp10
-rw-r--r--src/core/hle/service/am/am.h1
-rw-r--r--src/core/hle/service/nfp/nfp.cpp2
-rw-r--r--src/core/hle/service/nvdrv/interface.cpp2
-rw-r--r--src/core/hle/service/prepo/prepo.cpp11
-rw-r--r--src/core/hle/service/service.cpp2
-rw-r--r--src/core/hle/service/sockets/bsd.cpp20
-rw-r--r--src/core/hle/service/sockets/bsd.h1
-rw-r--r--src/core/hle/service/time/time.cpp2
-rw-r--r--src/core/hle/service/time/time.h2
-rw-r--r--src/core/hle/service/time/time_sharedmemory.h2
-rw-r--r--src/core/hle/service/vi/vi.cpp2
-rw-r--r--src/core/loader/nro.cpp6
-rw-r--r--src/core/loader/nso.cpp6
-rw-r--r--src/video_core/memory_manager.cpp23
-rw-r--r--src/video_core/memory_manager.h6
-rw-r--r--src/yuzu/configuration/configure_input.cpp6
-rw-r--r--src/yuzu/configuration/configure_input_player.cpp46
-rw-r--r--src/yuzu/debugger/wait_tree.cpp57
-rw-r--r--src/yuzu/debugger/wait_tree.h14
-rw-r--r--src/yuzu/main.cpp2
76 files changed, 3080 insertions, 1974 deletions
diff --git a/src/common/common_funcs.h b/src/common/common_funcs.h
index 75f3027fb..71b64e32a 100644
--- a/src/common/common_funcs.h
+++ b/src/common/common_funcs.h
@@ -97,10 +97,27 @@ __declspec(dllimport) void __stdcall DebugBreak(void);
97#define R_UNLESS(expr, res) \ 97#define R_UNLESS(expr, res) \
98 { \ 98 { \
99 if (!(expr)) { \ 99 if (!(expr)) { \
100 if (res.IsError()) { \
101 LOG_ERROR(Kernel, "Failed with result: {}", res.raw); \
102 } \
100 return res; \ 103 return res; \
101 } \ 104 } \
102 } 105 }
103 106
107#define R_SUCCEEDED(res) (res.IsSuccess())
108
109/// Evaluates an expression that returns a result, and returns the result if it would fail.
110#define R_TRY(res_expr) \
111 { \
112 const auto _tmp_r_try_rc = (res_expr); \
113 if (_tmp_r_try_rc.IsError()) { \
114 return _tmp_r_try_rc; \
115 } \
116 }
117
118/// Evaluates a boolean expression, and succeeds if that expression is true.
119#define R_SUCCEED_IF(expr) R_UNLESS(!(expr), RESULT_SUCCESS)
120
104namespace Common { 121namespace Common {
105 122
106[[nodiscard]] constexpr u32 MakeMagic(char a, char b, char c, char d) { 123[[nodiscard]] constexpr u32 MakeMagic(char a, char b, char c, char d) {
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 9c9b3195b..397cc028f 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -160,6 +160,8 @@ add_library(core STATIC
160 hle/kernel/k_affinity_mask.h 160 hle/kernel/k_affinity_mask.h
161 hle/kernel/k_condition_variable.cpp 161 hle/kernel/k_condition_variable.cpp
162 hle/kernel/k_condition_variable.h 162 hle/kernel/k_condition_variable.h
163 hle/kernel/k_light_lock.cpp
164 hle/kernel/k_light_lock.h
163 hle/kernel/k_priority_queue.h 165 hle/kernel/k_priority_queue.h
164 hle/kernel/k_scheduler.cpp 166 hle/kernel/k_scheduler.cpp
165 hle/kernel/k_scheduler.h 167 hle/kernel/k_scheduler.h
@@ -168,6 +170,9 @@ add_library(core STATIC
168 hle/kernel/k_scoped_scheduler_lock_and_sleep.h 170 hle/kernel/k_scoped_scheduler_lock_and_sleep.h
169 hle/kernel/k_synchronization_object.cpp 171 hle/kernel/k_synchronization_object.cpp
170 hle/kernel/k_synchronization_object.h 172 hle/kernel/k_synchronization_object.h
173 hle/kernel/k_thread.cpp
174 hle/kernel/k_thread.h
175 hle/kernel/k_thread_queue.h
171 hle/kernel/kernel.cpp 176 hle/kernel/kernel.cpp
172 hle/kernel/kernel.h 177 hle/kernel/kernel.h
173 hle/kernel/memory/address_space_info.cpp 178 hle/kernel/memory/address_space_info.cpp
@@ -216,8 +221,6 @@ add_library(core STATIC
216 hle/kernel/svc_results.h 221 hle/kernel/svc_results.h
217 hle/kernel/svc_types.h 222 hle/kernel/svc_types.h
218 hle/kernel/svc_wrap.h 223 hle/kernel/svc_wrap.h
219 hle/kernel/thread.cpp
220 hle/kernel/thread.h
221 hle/kernel/time_manager.cpp 224 hle/kernel/time_manager.cpp
222 hle/kernel/time_manager.h 225 hle/kernel/time_manager.h
223 hle/kernel/transfer_memory.cpp 226 hle/kernel/transfer_memory.cpp
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index 6c4c8e9e4..c650a4dfb 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -255,6 +255,9 @@ void ARM_Dynarmic_32::ChangeProcessorID(std::size_t new_core_id) {
255} 255}
256 256
257void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) { 257void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) {
258 if (!jit) {
259 return;
260 }
258 Dynarmic::A32::Context context; 261 Dynarmic::A32::Context context;
259 jit->SaveContext(context); 262 jit->SaveContext(context);
260 ctx.cpu_registers = context.Regs(); 263 ctx.cpu_registers = context.Regs();
@@ -264,6 +267,9 @@ void ARM_Dynarmic_32::SaveContext(ThreadContext32& ctx) {
264} 267}
265 268
266void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) { 269void ARM_Dynarmic_32::LoadContext(const ThreadContext32& ctx) {
270 if (!jit) {
271 return;
272 }
267 Dynarmic::A32::Context context; 273 Dynarmic::A32::Context context;
268 context.Regs() = ctx.cpu_registers; 274 context.Regs() = ctx.cpu_registers;
269 context.ExtRegs() = ctx.extension_registers; 275 context.ExtRegs() = ctx.extension_registers;
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 4c5ebca22..ae5566ab8 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -294,6 +294,9 @@ void ARM_Dynarmic_64::ChangeProcessorID(std::size_t new_core_id) {
294} 294}
295 295
296void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) { 296void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) {
297 if (!jit) {
298 return;
299 }
297 ctx.cpu_registers = jit->GetRegisters(); 300 ctx.cpu_registers = jit->GetRegisters();
298 ctx.sp = jit->GetSP(); 301 ctx.sp = jit->GetSP();
299 ctx.pc = jit->GetPC(); 302 ctx.pc = jit->GetPC();
@@ -305,6 +308,9 @@ void ARM_Dynarmic_64::SaveContext(ThreadContext64& ctx) {
305} 308}
306 309
307void ARM_Dynarmic_64::LoadContext(const ThreadContext64& ctx) { 310void ARM_Dynarmic_64::LoadContext(const ThreadContext64& ctx) {
311 if (!jit) {
312 return;
313 }
308 jit->SetRegisters(ctx.cpu_registers); 314 jit->SetRegisters(ctx.cpu_registers);
309 jit->SetSP(ctx.sp); 315 jit->SetSP(ctx.sp);
310 jit->SetPC(ctx.pc); 316 jit->SetPC(ctx.pc);
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 86bdc7f6b..30f5e1128 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -28,10 +28,10 @@
28#include "core/hardware_interrupt_manager.h" 28#include "core/hardware_interrupt_manager.h"
29#include "core/hle/kernel/client_port.h" 29#include "core/hle/kernel/client_port.h"
30#include "core/hle/kernel/k_scheduler.h" 30#include "core/hle/kernel/k_scheduler.h"
31#include "core/hle/kernel/k_thread.h"
31#include "core/hle/kernel/kernel.h" 32#include "core/hle/kernel/kernel.h"
32#include "core/hle/kernel/physical_core.h" 33#include "core/hle/kernel/physical_core.h"
33#include "core/hle/kernel/process.h" 34#include "core/hle/kernel/process.h"
34#include "core/hle/kernel/thread.h"
35#include "core/hle/service/am/applets/applets.h" 35#include "core/hle/service/am/applets/applets.h"
36#include "core/hle/service/apm/controller.h" 36#include "core/hle/service/apm/controller.h"
37#include "core/hle/service/filesystem/filesystem.h" 37#include "core/hle/service/filesystem/filesystem.h"
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp
index 373395047..8f04fb8f5 100644
--- a/src/core/cpu_manager.cpp
+++ b/src/core/cpu_manager.cpp
@@ -11,9 +11,9 @@
11#include "core/core_timing.h" 11#include "core/core_timing.h"
12#include "core/cpu_manager.h" 12#include "core/cpu_manager.h"
13#include "core/hle/kernel/k_scheduler.h" 13#include "core/hle/kernel/k_scheduler.h"
14#include "core/hle/kernel/k_thread.h"
14#include "core/hle/kernel/kernel.h" 15#include "core/hle/kernel/kernel.h"
15#include "core/hle/kernel/physical_core.h" 16#include "core/hle/kernel/physical_core.h"
16#include "core/hle/kernel/thread.h"
17#include "video_core/gpu.h" 17#include "video_core/gpu.h"
18 18
19namespace Core { 19namespace Core {
@@ -147,7 +147,7 @@ void CpuManager::MultiCoreRunSuspendThread() {
147 while (true) { 147 while (true) {
148 auto core = kernel.GetCurrentHostThreadID(); 148 auto core = kernel.GetCurrentHostThreadID();
149 auto& scheduler = *kernel.CurrentScheduler(); 149 auto& scheduler = *kernel.CurrentScheduler();
150 Kernel::Thread* current_thread = scheduler.GetCurrentThread(); 150 Kernel::KThread* current_thread = scheduler.GetCurrentThread();
151 Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context); 151 Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context);
152 ASSERT(scheduler.ContextSwitchPending()); 152 ASSERT(scheduler.ContextSwitchPending());
153 ASSERT(core == kernel.GetCurrentHostThreadID()); 153 ASSERT(core == kernel.GetCurrentHostThreadID());
@@ -208,7 +208,6 @@ void CpuManager::SingleCoreRunGuestThread() {
208 208
209void CpuManager::SingleCoreRunGuestLoop() { 209void CpuManager::SingleCoreRunGuestLoop() {
210 auto& kernel = system.Kernel(); 210 auto& kernel = system.Kernel();
211 auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
212 while (true) { 211 while (true) {
213 auto* physical_core = &kernel.CurrentPhysicalCore(); 212 auto* physical_core = &kernel.CurrentPhysicalCore();
214 system.EnterDynarmicProfile(); 213 system.EnterDynarmicProfile();
@@ -217,9 +216,9 @@ void CpuManager::SingleCoreRunGuestLoop() {
217 physical_core = &kernel.CurrentPhysicalCore(); 216 physical_core = &kernel.CurrentPhysicalCore();
218 } 217 }
219 system.ExitDynarmicProfile(); 218 system.ExitDynarmicProfile();
220 thread->SetPhantomMode(true); 219 kernel.SetIsPhantomModeForSingleCore(true);
221 system.CoreTiming().Advance(); 220 system.CoreTiming().Advance();
222 thread->SetPhantomMode(false); 221 kernel.SetIsPhantomModeForSingleCore(false);
223 physical_core->ArmInterface().ClearExclusiveState(); 222 physical_core->ArmInterface().ClearExclusiveState();
224 PreemptSingleCore(); 223 PreemptSingleCore();
225 auto& scheduler = kernel.Scheduler(current_core); 224 auto& scheduler = kernel.Scheduler(current_core);
@@ -245,7 +244,7 @@ void CpuManager::SingleCoreRunSuspendThread() {
245 while (true) { 244 while (true) {
246 auto core = kernel.GetCurrentHostThreadID(); 245 auto core = kernel.GetCurrentHostThreadID();
247 auto& scheduler = *kernel.CurrentScheduler(); 246 auto& scheduler = *kernel.CurrentScheduler();
248 Kernel::Thread* current_thread = scheduler.GetCurrentThread(); 247 Kernel::KThread* current_thread = scheduler.GetCurrentThread();
249 Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context); 248 Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context);
250 ASSERT(scheduler.ContextSwitchPending()); 249 ASSERT(scheduler.ContextSwitchPending());
251 ASSERT(core == kernel.GetCurrentHostThreadID()); 250 ASSERT(core == kernel.GetCurrentHostThreadID());
@@ -255,22 +254,23 @@ void CpuManager::SingleCoreRunSuspendThread() {
255 254
256void CpuManager::PreemptSingleCore(bool from_running_enviroment) { 255void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
257 { 256 {
258 auto& scheduler = system.Kernel().Scheduler(current_core); 257 auto& kernel = system.Kernel();
259 Kernel::Thread* current_thread = scheduler.GetCurrentThread(); 258 auto& scheduler = kernel.Scheduler(current_core);
259 Kernel::KThread* current_thread = scheduler.GetCurrentThread();
260 if (idle_count >= 4 || from_running_enviroment) { 260 if (idle_count >= 4 || from_running_enviroment) {
261 if (!from_running_enviroment) { 261 if (!from_running_enviroment) {
262 system.CoreTiming().Idle(); 262 system.CoreTiming().Idle();
263 idle_count = 0; 263 idle_count = 0;
264 } 264 }
265 current_thread->SetPhantomMode(true); 265 kernel.SetIsPhantomModeForSingleCore(true);
266 system.CoreTiming().Advance(); 266 system.CoreTiming().Advance();
267 current_thread->SetPhantomMode(false); 267 kernel.SetIsPhantomModeForSingleCore(false);
268 } 268 }
269 current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES); 269 current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
270 system.CoreTiming().ResetTicks(); 270 system.CoreTiming().ResetTicks();
271 scheduler.Unload(scheduler.GetCurrentThread()); 271 scheduler.Unload(scheduler.GetCurrentThread());
272 272
273 auto& next_scheduler = system.Kernel().Scheduler(current_core); 273 auto& next_scheduler = kernel.Scheduler(current_core);
274 Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext()); 274 Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext());
275 } 275 }
276 276
@@ -278,8 +278,7 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
278 { 278 {
279 auto& scheduler = system.Kernel().Scheduler(current_core); 279 auto& scheduler = system.Kernel().Scheduler(current_core);
280 scheduler.Reload(scheduler.GetCurrentThread()); 280 scheduler.Reload(scheduler.GetCurrentThread());
281 auto* currrent_thread2 = scheduler.GetCurrentThread(); 281 if (!scheduler.IsIdle()) {
282 if (!currrent_thread2->IsIdleThread()) {
283 idle_count = 0; 282 idle_count = 0;
284 } 283 }
285 } 284 }
diff --git a/src/core/hardware_properties.h b/src/core/hardware_properties.h
index 456b41e1b..176a72c67 100644
--- a/src/core/hardware_properties.h
+++ b/src/core/hardware_properties.h
@@ -4,8 +4,10 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <array>
7#include <tuple> 8#include <tuple>
8 9
10#include "common/bit_util.h"
9#include "common/common_types.h" 11#include "common/common_types.h"
10 12
11namespace Core { 13namespace Core {
@@ -18,34 +20,12 @@ constexpr u64 BASE_CLOCK_RATE = 1019215872; // Switch cpu frequency is 1020MHz u
18constexpr u64 CNTFREQ = 19200000; // Switch's hardware clock speed 20constexpr u64 CNTFREQ = 19200000; // Switch's hardware clock speed
19constexpr u32 NUM_CPU_CORES = 4; // Number of CPU Cores 21constexpr u32 NUM_CPU_CORES = 4; // Number of CPU Cores
20 22
21} // namespace Hardware 23// Virtual to Physical core map.
22 24constexpr std::array<s32, Common::BitSize<u64>()> VirtualToPhysicalCoreMap{
23constexpr u32 INVALID_HOST_THREAD_ID = 0xFFFFFFFF; 25 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
24 26 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
25struct EmuThreadHandle {
26 u32 host_handle;
27 u32 guest_handle;
28
29 u64 GetRaw() const {
30 return (static_cast<u64>(host_handle) << 32) | guest_handle;
31 }
32
33 bool operator==(const EmuThreadHandle& rhs) const {
34 return std::tie(host_handle, guest_handle) == std::tie(rhs.host_handle, rhs.guest_handle);
35 }
36
37 bool operator!=(const EmuThreadHandle& rhs) const {
38 return !operator==(rhs);
39 }
40
41 static constexpr EmuThreadHandle InvalidHandle() {
42 constexpr u32 invalid_handle = 0xFFFFFFFF;
43 return {invalid_handle, invalid_handle};
44 }
45
46 bool IsInvalid() const {
47 return (*this) == InvalidHandle();
48 }
49}; 27};
50 28
29} // namespace Hardware
30
51} // namespace Core 31} // namespace Core
diff --git a/src/core/hle/kernel/client_port.h b/src/core/hle/kernel/client_port.h
index 9762bbf0d..77559ebf9 100644
--- a/src/core/hle/kernel/client_port.h
+++ b/src/core/hle/kernel/client_port.h
@@ -51,6 +51,8 @@ public:
51 */ 51 */
52 void ConnectionClosed(); 52 void ConnectionClosed();
53 53
54 void Finalize() override {}
55
54private: 56private:
55 std::shared_ptr<ServerPort> server_port; ///< ServerPort associated with this client port. 57 std::shared_ptr<ServerPort> server_port; ///< ServerPort associated with this client port.
56 u32 max_sessions = 0; ///< Maximum number of simultaneous sessions the port can have 58 u32 max_sessions = 0; ///< Maximum number of simultaneous sessions the port can have
diff --git a/src/core/hle/kernel/client_session.cpp b/src/core/hle/kernel/client_session.cpp
index e8e52900d..a2be1a8f6 100644
--- a/src/core/hle/kernel/client_session.cpp
+++ b/src/core/hle/kernel/client_session.cpp
@@ -5,9 +5,9 @@
5#include "core/hle/kernel/client_session.h" 5#include "core/hle/kernel/client_session.h"
6#include "core/hle/kernel/errors.h" 6#include "core/hle/kernel/errors.h"
7#include "core/hle/kernel/hle_ipc.h" 7#include "core/hle/kernel/hle_ipc.h"
8#include "core/hle/kernel/k_thread.h"
8#include "core/hle/kernel/server_session.h" 9#include "core/hle/kernel/server_session.h"
9#include "core/hle/kernel/session.h" 10#include "core/hle/kernel/session.h"
10#include "core/hle/kernel/thread.h"
11#include "core/hle/result.h" 11#include "core/hle/result.h"
12 12
13namespace Kernel { 13namespace Kernel {
@@ -38,7 +38,7 @@ ResultVal<std::shared_ptr<ClientSession>> ClientSession::Create(KernelCore& kern
38 return MakeResult(std::move(client_session)); 38 return MakeResult(std::move(client_session));
39} 39}
40 40
41ResultCode ClientSession::SendSyncRequest(std::shared_ptr<Thread> thread, 41ResultCode ClientSession::SendSyncRequest(std::shared_ptr<KThread> thread,
42 Core::Memory::Memory& memory, 42 Core::Memory::Memory& memory,
43 Core::Timing::CoreTiming& core_timing) { 43 Core::Timing::CoreTiming& core_timing) {
44 // Keep ServerSession alive until we're done working with it. 44 // Keep ServerSession alive until we're done working with it.
diff --git a/src/core/hle/kernel/client_session.h b/src/core/hle/kernel/client_session.h
index d5c9ebee8..85aafeaf4 100644
--- a/src/core/hle/kernel/client_session.h
+++ b/src/core/hle/kernel/client_session.h
@@ -24,7 +24,7 @@ namespace Kernel {
24 24
25class KernelCore; 25class KernelCore;
26class Session; 26class Session;
27class Thread; 27class KThread;
28 28
29class ClientSession final : public KSynchronizationObject { 29class ClientSession final : public KSynchronizationObject {
30public: 30public:
@@ -46,11 +46,13 @@ public:
46 return HANDLE_TYPE; 46 return HANDLE_TYPE;
47 } 47 }
48 48
49 ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory, 49 ResultCode SendSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory,
50 Core::Timing::CoreTiming& core_timing); 50 Core::Timing::CoreTiming& core_timing);
51 51
52 bool IsSignaled() const override; 52 bool IsSignaled() const override;
53 53
54 void Finalize() override {}
55
54private: 56private:
55 static ResultVal<std::shared_ptr<ClientSession>> Create(KernelCore& kernel, 57 static ResultVal<std::shared_ptr<ClientSession>> Create(KernelCore& kernel,
56 std::shared_ptr<Session> parent, 58 std::shared_ptr<Session> parent,
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
index a133e8ed0..c6838649f 100644
--- a/src/core/hle/kernel/global_scheduler_context.cpp
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -17,12 +17,12 @@ GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
17 17
18GlobalSchedulerContext::~GlobalSchedulerContext() = default; 18GlobalSchedulerContext::~GlobalSchedulerContext() = default;
19 19
20void GlobalSchedulerContext::AddThread(std::shared_ptr<Thread> thread) { 20void GlobalSchedulerContext::AddThread(std::shared_ptr<KThread> thread) {
21 std::scoped_lock lock{global_list_guard}; 21 std::scoped_lock lock{global_list_guard};
22 thread_list.push_back(std::move(thread)); 22 thread_list.push_back(std::move(thread));
23} 23}
24 24
25void GlobalSchedulerContext::RemoveThread(std::shared_ptr<Thread> thread) { 25void GlobalSchedulerContext::RemoveThread(std::shared_ptr<KThread> thread) {
26 std::scoped_lock lock{global_list_guard}; 26 std::scoped_lock lock{global_list_guard};
27 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), 27 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
28 thread_list.end()); 28 thread_list.end());
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
index 5c7b89290..11592843e 100644
--- a/src/core/hle/kernel/global_scheduler_context.h
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -12,7 +12,8 @@
12#include "core/hardware_properties.h" 12#include "core/hardware_properties.h"
13#include "core/hle/kernel/k_priority_queue.h" 13#include "core/hle/kernel/k_priority_queue.h"
14#include "core/hle/kernel/k_scheduler_lock.h" 14#include "core/hle/kernel/k_scheduler_lock.h"
15#include "core/hle/kernel/thread.h" 15#include "core/hle/kernel/k_thread.h"
16#include "core/hle/kernel/svc_types.h"
16 17
17namespace Kernel { 18namespace Kernel {
18 19
@@ -20,8 +21,12 @@ class KernelCore;
20class SchedulerLock; 21class SchedulerLock;
21 22
22using KSchedulerPriorityQueue = 23using KSchedulerPriorityQueue =
23 KPriorityQueue<Thread, Core::Hardware::NUM_CPU_CORES, THREADPRIO_LOWEST, THREADPRIO_HIGHEST>; 24 KPriorityQueue<KThread, Core::Hardware::NUM_CPU_CORES, Svc::LowestThreadPriority,
24constexpr s32 HighestCoreMigrationAllowedPriority = 2; 25 Svc::HighestThreadPriority>;
26
27static constexpr s32 HighestCoreMigrationAllowedPriority = 2;
28static_assert(Svc::LowestThreadPriority >= HighestCoreMigrationAllowedPriority);
29static_assert(Svc::HighestThreadPriority <= HighestCoreMigrationAllowedPriority);
25 30
26class GlobalSchedulerContext final { 31class GlobalSchedulerContext final {
27 friend class KScheduler; 32 friend class KScheduler;
@@ -33,13 +38,13 @@ public:
33 ~GlobalSchedulerContext(); 38 ~GlobalSchedulerContext();
34 39
35 /// Adds a new thread to the scheduler 40 /// Adds a new thread to the scheduler
36 void AddThread(std::shared_ptr<Thread> thread); 41 void AddThread(std::shared_ptr<KThread> thread);
37 42
38 /// Removes a thread from the scheduler 43 /// Removes a thread from the scheduler
39 void RemoveThread(std::shared_ptr<Thread> thread); 44 void RemoveThread(std::shared_ptr<KThread> thread);
40 45
41 /// Returns a list of all threads managed by the scheduler 46 /// Returns a list of all threads managed by the scheduler
42 [[nodiscard]] const std::vector<std::shared_ptr<Thread>>& GetThreadList() const { 47 [[nodiscard]] const std::vector<std::shared_ptr<KThread>>& GetThreadList() const {
43 return thread_list; 48 return thread_list;
44 } 49 }
45 50
@@ -74,7 +79,7 @@ private:
74 LockType scheduler_lock; 79 LockType scheduler_lock;
75 80
76 /// Lists all thread ids that aren't deleted/etc. 81 /// Lists all thread ids that aren't deleted/etc.
77 std::vector<std::shared_ptr<Thread>> thread_list; 82 std::vector<std::shared_ptr<KThread>> thread_list;
78 Common::SpinLock global_list_guard{}; 83 Common::SpinLock global_list_guard{};
79}; 84};
80 85
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
index 40988b0fd..1a2fa9cd8 100644
--- a/src/core/hle/kernel/handle_table.cpp
+++ b/src/core/hle/kernel/handle_table.cpp
@@ -9,9 +9,9 @@
9#include "core/hle/kernel/errors.h" 9#include "core/hle/kernel/errors.h"
10#include "core/hle/kernel/handle_table.h" 10#include "core/hle/kernel/handle_table.h"
11#include "core/hle/kernel/k_scheduler.h" 11#include "core/hle/kernel/k_scheduler.h"
12#include "core/hle/kernel/k_thread.h"
12#include "core/hle/kernel/kernel.h" 13#include "core/hle/kernel/kernel.h"
13#include "core/hle/kernel/process.h" 14#include "core/hle/kernel/process.h"
14#include "core/hle/kernel/thread.h"
15 15
16namespace Kernel { 16namespace Kernel {
17namespace { 17namespace {
@@ -89,6 +89,10 @@ ResultCode HandleTable::Close(Handle handle) {
89 89
90 const u16 slot = GetSlot(handle); 90 const u16 slot = GetSlot(handle);
91 91
92 if (objects[slot].use_count() == 1) {
93 objects[slot]->Finalize();
94 }
95
92 objects[slot] = nullptr; 96 objects[slot] = nullptr;
93 97
94 generations[slot] = next_free_slot; 98 generations[slot] = next_free_slot;
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index a419f9602..c7b10ca7a 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -19,12 +19,12 @@
19#include "core/hle/kernel/hle_ipc.h" 19#include "core/hle/kernel/hle_ipc.h"
20#include "core/hle/kernel/k_scheduler.h" 20#include "core/hle/kernel/k_scheduler.h"
21#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 21#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
22#include "core/hle/kernel/k_thread.h"
22#include "core/hle/kernel/kernel.h" 23#include "core/hle/kernel/kernel.h"
23#include "core/hle/kernel/object.h" 24#include "core/hle/kernel/object.h"
24#include "core/hle/kernel/process.h" 25#include "core/hle/kernel/process.h"
25#include "core/hle/kernel/readable_event.h" 26#include "core/hle/kernel/readable_event.h"
26#include "core/hle/kernel/server_session.h" 27#include "core/hle/kernel/server_session.h"
27#include "core/hle/kernel/thread.h"
28#include "core/hle/kernel/time_manager.h" 28#include "core/hle/kernel/time_manager.h"
29#include "core/hle/kernel/writable_event.h" 29#include "core/hle/kernel/writable_event.h"
30#include "core/memory.h" 30#include "core/memory.h"
@@ -48,7 +48,7 @@ void SessionRequestHandler::ClientDisconnected(
48 48
49HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory, 49HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
50 std::shared_ptr<ServerSession> server_session, 50 std::shared_ptr<ServerSession> server_session,
51 std::shared_ptr<Thread> thread) 51 std::shared_ptr<KThread> thread)
52 : server_session(std::move(server_session)), 52 : server_session(std::move(server_session)),
53 thread(std::move(thread)), kernel{kernel}, memory{memory} { 53 thread(std::move(thread)), kernel{kernel}, memory{memory} {
54 cmd_buf[0] = 0; 54 cmd_buf[0] = 0;
@@ -182,7 +182,7 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(const HandleTabl
182 return RESULT_SUCCESS; 182 return RESULT_SUCCESS;
183} 183}
184 184
185ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) { 185ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(KThread& thread) {
186 auto& owner_process = *thread.GetOwnerProcess(); 186 auto& owner_process = *thread.GetOwnerProcess();
187 auto& handle_table = owner_process.GetHandleTable(); 187 auto& handle_table = owner_process.GetHandleTable();
188 188
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index 698f607e6..9f764c79a 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -40,7 +40,7 @@ class HLERequestContext;
40class KernelCore; 40class KernelCore;
41class Process; 41class Process;
42class ServerSession; 42class ServerSession;
43class Thread; 43class KThread;
44class ReadableEvent; 44class ReadableEvent;
45class WritableEvent; 45class WritableEvent;
46 46
@@ -110,7 +110,7 @@ class HLERequestContext {
110public: 110public:
111 explicit HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory, 111 explicit HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
112 std::shared_ptr<ServerSession> session, 112 std::shared_ptr<ServerSession> session,
113 std::shared_ptr<Thread> thread); 113 std::shared_ptr<KThread> thread);
114 ~HLERequestContext(); 114 ~HLERequestContext();
115 115
116 /// Returns a pointer to the IPC command buffer for this request. 116 /// Returns a pointer to the IPC command buffer for this request.
@@ -126,15 +126,12 @@ public:
126 return server_session; 126 return server_session;
127 } 127 }
128 128
129 using WakeupCallback = std::function<void(
130 std::shared_ptr<Thread> thread, HLERequestContext& context, ThreadWakeupReason reason)>;
131
132 /// Populates this context with data from the requesting process/thread. 129 /// Populates this context with data from the requesting process/thread.
133 ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table, 130 ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table,
134 u32_le* src_cmdbuf); 131 u32_le* src_cmdbuf);
135 132
136 /// Writes data from this context back to the requesting process/thread. 133 /// Writes data from this context back to the requesting process/thread.
137 ResultCode WriteToOutgoingCommandBuffer(Thread& thread); 134 ResultCode WriteToOutgoingCommandBuffer(KThread& thread);
138 135
139 u32_le GetCommand() const { 136 u32_le GetCommand() const {
140 return command; 137 return command;
@@ -267,11 +264,11 @@ public:
267 264
268 std::string Description() const; 265 std::string Description() const;
269 266
270 Thread& GetThread() { 267 KThread& GetThread() {
271 return *thread; 268 return *thread;
272 } 269 }
273 270
274 const Thread& GetThread() const { 271 const KThread& GetThread() const {
275 return *thread; 272 return *thread;
276 } 273 }
277 274
@@ -286,7 +283,7 @@ private:
286 283
287 std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf; 284 std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
288 std::shared_ptr<Kernel::ServerSession> server_session; 285 std::shared_ptr<Kernel::ServerSession> server_session;
289 std::shared_ptr<Thread> thread; 286 std::shared_ptr<KThread> thread;
290 // TODO(yuriks): Check common usage of this and optimize size accordingly 287 // TODO(yuriks): Check common usage of this and optimize size accordingly
291 boost::container::small_vector<std::shared_ptr<Object>, 8> move_objects; 288 boost::container::small_vector<std::shared_ptr<Object>, 8> move_objects;
292 boost::container::small_vector<std::shared_ptr<Object>, 8> copy_objects; 289 boost::container::small_vector<std::shared_ptr<Object>, 8> copy_objects;
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
index d9e702f13..1685d25bb 100644
--- a/src/core/hle/kernel/k_address_arbiter.cpp
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -7,9 +7,9 @@
7#include "core/hle/kernel/k_address_arbiter.h" 7#include "core/hle/kernel/k_address_arbiter.h"
8#include "core/hle/kernel/k_scheduler.h" 8#include "core/hle/kernel/k_scheduler.h"
9#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 9#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
10#include "core/hle/kernel/k_thread.h"
10#include "core/hle/kernel/kernel.h" 11#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/svc_results.h" 12#include "core/hle/kernel/svc_results.h"
12#include "core/hle/kernel/thread.h"
13#include "core/hle/kernel/time_manager.h" 13#include "core/hle/kernel/time_manager.h"
14#include "core/memory.h" 14#include "core/memory.h"
15 15
@@ -96,7 +96,7 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
96 auto it = thread_tree.nfind_light({addr, -1}); 96 auto it = thread_tree.nfind_light({addr, -1});
97 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && 97 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
98 (it->GetAddressArbiterKey() == addr)) { 98 (it->GetAddressArbiterKey() == addr)) {
99 Thread* target_thread = std::addressof(*it); 99 KThread* target_thread = std::addressof(*it);
100 target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS); 100 target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
101 101
102 ASSERT(target_thread->IsWaitingForAddressArbiter()); 102 ASSERT(target_thread->IsWaitingForAddressArbiter());
@@ -125,7 +125,7 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32
125 auto it = thread_tree.nfind_light({addr, -1}); 125 auto it = thread_tree.nfind_light({addr, -1});
126 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && 126 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
127 (it->GetAddressArbiterKey() == addr)) { 127 (it->GetAddressArbiterKey() == addr)) {
128 Thread* target_thread = std::addressof(*it); 128 KThread* target_thread = std::addressof(*it);
129 target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS); 129 target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
130 130
131 ASSERT(target_thread->IsWaitingForAddressArbiter()); 131 ASSERT(target_thread->IsWaitingForAddressArbiter());
@@ -215,7 +215,7 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
215 215
216 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && 216 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
217 (it->GetAddressArbiterKey() == addr)) { 217 (it->GetAddressArbiterKey() == addr)) {
218 Thread* target_thread = std::addressof(*it); 218 KThread* target_thread = std::addressof(*it);
219 target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS); 219 target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
220 220
221 ASSERT(target_thread->IsWaitingForAddressArbiter()); 221 ASSERT(target_thread->IsWaitingForAddressArbiter());
@@ -231,11 +231,10 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
231 231
232ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) { 232ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
233 // Prepare to wait. 233 // Prepare to wait.
234 Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); 234 KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
235 Handle timer = InvalidHandle;
236 235
237 { 236 {
238 KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout); 237 KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
239 238
240 // Check that the thread isn't terminating. 239 // Check that the thread isn't terminating.
241 if (cur_thread->IsTerminationRequested()) { 240 if (cur_thread->IsTerminationRequested()) {
@@ -280,10 +279,7 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
280 } 279 }
281 280
282 // Cancel the timer wait. 281 // Cancel the timer wait.
283 if (timer != InvalidHandle) { 282 kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
284 auto& time_manager = kernel.TimeManager();
285 time_manager.UnscheduleTimeEvent(timer);
286 }
287 283
288 // Remove from the address arbiter. 284 // Remove from the address arbiter.
289 { 285 {
@@ -302,11 +298,10 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
302 298
303ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { 299ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
304 // Prepare to wait. 300 // Prepare to wait.
305 Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); 301 KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
306 Handle timer = InvalidHandle;
307 302
308 { 303 {
309 KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout); 304 KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
310 305
311 // Check that the thread isn't terminating. 306 // Check that the thread isn't terminating.
312 if (cur_thread->IsTerminationRequested()) { 307 if (cur_thread->IsTerminationRequested()) {
@@ -344,10 +339,7 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
344 } 339 }
345 340
346 // Cancel the timer wait. 341 // Cancel the timer wait.
347 if (timer != InvalidHandle) { 342 kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
348 auto& time_manager = kernel.TimeManager();
349 time_manager.UnscheduleTimeEvent(timer);
350 }
351 343
352 // Remove from the address arbiter. 344 // Remove from the address arbiter.
353 { 345 {
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index 49a068310..f0ad8b390 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -10,11 +10,11 @@
10#include "core/hle/kernel/k_scheduler.h" 10#include "core/hle/kernel/k_scheduler.h"
11#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 11#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
12#include "core/hle/kernel/k_synchronization_object.h" 12#include "core/hle/kernel/k_synchronization_object.h"
13#include "core/hle/kernel/k_thread.h"
13#include "core/hle/kernel/kernel.h" 14#include "core/hle/kernel/kernel.h"
14#include "core/hle/kernel/process.h" 15#include "core/hle/kernel/process.h"
15#include "core/hle/kernel/svc_common.h" 16#include "core/hle/kernel/svc_common.h"
16#include "core/hle/kernel/svc_results.h" 17#include "core/hle/kernel/svc_results.h"
17#include "core/hle/kernel/thread.h"
18#include "core/memory.h" 18#include "core/memory.h"
19 19
20namespace Kernel { 20namespace Kernel {
@@ -66,7 +66,7 @@ KConditionVariable::KConditionVariable(Core::System& system_)
66KConditionVariable::~KConditionVariable() = default; 66KConditionVariable::~KConditionVariable() = default;
67 67
68ResultCode KConditionVariable::SignalToAddress(VAddr addr) { 68ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
69 Thread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread(); 69 KThread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread();
70 70
71 // Signal the address. 71 // Signal the address.
72 { 72 {
@@ -74,7 +74,7 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
74 74
75 // Remove waiter thread. 75 // Remove waiter thread.
76 s32 num_waiters{}; 76 s32 num_waiters{};
77 Thread* next_owner_thread = 77 KThread* next_owner_thread =
78 owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr); 78 owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
79 79
80 // Determine the next tag. 80 // Determine the next tag.
@@ -103,11 +103,11 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
103} 103}
104 104
105ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) { 105ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
106 Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); 106 KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
107 107
108 // Wait for the address. 108 // Wait for the address.
109 { 109 {
110 std::shared_ptr<Thread> owner_thread; 110 std::shared_ptr<KThread> owner_thread;
111 ASSERT(!owner_thread); 111 ASSERT(!owner_thread);
112 { 112 {
113 KScopedSchedulerLock sl(kernel); 113 KScopedSchedulerLock sl(kernel);
@@ -126,7 +126,7 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val
126 R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS); 126 R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS);
127 127
128 // Get the lock owner thread. 128 // Get the lock owner thread.
129 owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<Thread>(handle); 129 owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(handle);
130 R_UNLESS(owner_thread, Svc::ResultInvalidHandle); 130 R_UNLESS(owner_thread, Svc::ResultInvalidHandle);
131 131
132 // Update the lock. 132 // Update the lock.
@@ -143,7 +143,7 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val
143 // Remove the thread as a waiter from the lock owner. 143 // Remove the thread as a waiter from the lock owner.
144 { 144 {
145 KScopedSchedulerLock sl(kernel); 145 KScopedSchedulerLock sl(kernel);
146 Thread* owner_thread = cur_thread->GetLockOwner(); 146 KThread* owner_thread = cur_thread->GetLockOwner();
147 if (owner_thread != nullptr) { 147 if (owner_thread != nullptr) {
148 owner_thread->RemoveWaiter(cur_thread); 148 owner_thread->RemoveWaiter(cur_thread);
149 } 149 }
@@ -154,7 +154,7 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val
154 return cur_thread->GetWaitResult(std::addressof(dummy)); 154 return cur_thread->GetWaitResult(std::addressof(dummy));
155} 155}
156 156
157Thread* KConditionVariable::SignalImpl(Thread* thread) { 157KThread* KConditionVariable::SignalImpl(KThread* thread) {
158 // Check pre-conditions. 158 // Check pre-conditions.
159 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 159 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
160 160
@@ -174,7 +174,7 @@ Thread* KConditionVariable::SignalImpl(Thread* thread) {
174 } 174 }
175 } 175 }
176 176
177 Thread* thread_to_close = nullptr; 177 KThread* thread_to_close = nullptr;
178 if (can_access) { 178 if (can_access) {
179 if (prev_tag == InvalidHandle) { 179 if (prev_tag == InvalidHandle) {
180 // If nobody held the lock previously, we're all good. 180 // If nobody held the lock previously, we're all good.
@@ -182,7 +182,7 @@ Thread* KConditionVariable::SignalImpl(Thread* thread) {
182 thread->Wakeup(); 182 thread->Wakeup();
183 } else { 183 } else {
184 // Get the previous owner. 184 // Get the previous owner.
185 auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<Thread>( 185 auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(
186 prev_tag & ~Svc::HandleWaitMask); 186 prev_tag & ~Svc::HandleWaitMask);
187 187
188 if (owner_thread) { 188 if (owner_thread) {
@@ -210,8 +210,8 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
210 210
211 // TODO(bunnei): This should just be Thread once we implement KAutoObject instead of using 211 // TODO(bunnei): This should just be Thread once we implement KAutoObject instead of using
212 // std::shared_ptr. 212 // std::shared_ptr.
213 std::vector<std::shared_ptr<Thread>> thread_list; 213 std::vector<std::shared_ptr<KThread>> thread_list;
214 std::array<Thread*, MaxThreads> thread_array; 214 std::array<KThread*, MaxThreads> thread_array;
215 s32 num_to_close{}; 215 s32 num_to_close{};
216 216
217 // Perform signaling. 217 // Perform signaling.
@@ -222,9 +222,9 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
222 auto it = thread_tree.nfind_light({cv_key, -1}); 222 auto it = thread_tree.nfind_light({cv_key, -1});
223 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && 223 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
224 (it->GetConditionVariableKey() == cv_key)) { 224 (it->GetConditionVariableKey() == cv_key)) {
225 Thread* target_thread = std::addressof(*it); 225 KThread* target_thread = std::addressof(*it);
226 226
227 if (Thread* thread = SignalImpl(target_thread); thread != nullptr) { 227 if (KThread* thread = SignalImpl(target_thread); thread != nullptr) {
228 if (num_to_close < MaxThreads) { 228 if (num_to_close < MaxThreads) {
229 thread_array[num_to_close++] = thread; 229 thread_array[num_to_close++] = thread;
230 } else { 230 } else {
@@ -257,11 +257,10 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
257 257
258ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { 258ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
259 // Prepare to wait. 259 // Prepare to wait.
260 Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); 260 KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
261 Handle timer = InvalidHandle;
262 261
263 { 262 {
264 KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout); 263 KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
265 264
266 // Set the synced object. 265 // Set the synced object.
267 cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut); 266 cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
@@ -276,7 +275,7 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
276 { 275 {
277 // Remove waiter thread. 276 // Remove waiter thread.
278 s32 num_waiters{}; 277 s32 num_waiters{};
279 Thread* next_owner_thread = 278 KThread* next_owner_thread =
280 cur_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr); 279 cur_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
281 280
282 // Update for the next owner thread. 281 // Update for the next owner thread.
@@ -322,16 +321,13 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
322 } 321 }
323 322
324 // Cancel the timer wait. 323 // Cancel the timer wait.
325 if (timer != InvalidHandle) { 324 kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
326 auto& time_manager = kernel.TimeManager();
327 time_manager.UnscheduleTimeEvent(timer);
328 }
329 325
330 // Remove from the condition variable. 326 // Remove from the condition variable.
331 { 327 {
332 KScopedSchedulerLock sl(kernel); 328 KScopedSchedulerLock sl(kernel);
333 329
334 if (Thread* owner = cur_thread->GetLockOwner(); owner != nullptr) { 330 if (KThread* owner = cur_thread->GetLockOwner(); owner != nullptr) {
335 owner->RemoveWaiter(cur_thread); 331 owner->RemoveWaiter(cur_thread);
336 } 332 }
337 333
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h
index 98ed5b323..861dbd420 100644
--- a/src/core/hle/kernel/k_condition_variable.h
+++ b/src/core/hle/kernel/k_condition_variable.h
@@ -8,8 +8,8 @@
8#include "common/common_types.h" 8#include "common/common_types.h"
9 9
10#include "core/hle/kernel/k_scheduler.h" 10#include "core/hle/kernel/k_scheduler.h"
11#include "core/hle/kernel/k_thread.h"
11#include "core/hle/kernel/kernel.h" 12#include "core/hle/kernel/kernel.h"
12#include "core/hle/kernel/thread.h"
13#include "core/hle/result.h" 13#include "core/hle/result.h"
14 14
15namespace Core { 15namespace Core {
@@ -20,7 +20,7 @@ namespace Kernel {
20 20
21class KConditionVariable { 21class KConditionVariable {
22public: 22public:
23 using ThreadTree = typename Thread::ConditionVariableThreadTreeType; 23 using ThreadTree = typename KThread::ConditionVariableThreadTreeType;
24 24
25 explicit KConditionVariable(Core::System& system_); 25 explicit KConditionVariable(Core::System& system_);
26 ~KConditionVariable(); 26 ~KConditionVariable();
@@ -34,7 +34,7 @@ public:
34 [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout); 34 [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
35 35
36private: 36private:
37 [[nodiscard]] Thread* SignalImpl(Thread* thread); 37 [[nodiscard]] KThread* SignalImpl(KThread* thread);
38 38
39 ThreadTree thread_tree; 39 ThreadTree thread_tree;
40 40
@@ -43,14 +43,14 @@ private:
43}; 43};
44 44
45inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree, 45inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
46 Thread* thread) { 46 KThread* thread) {
47 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 47 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
48 48
49 tree->erase(tree->iterator_to(*thread)); 49 tree->erase(tree->iterator_to(*thread));
50} 50}
51 51
52inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree, 52inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
53 Thread* thread) { 53 KThread* thread) {
54 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 54 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
55 55
56 tree->insert(*thread); 56 tree->insert(*thread);
diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp
new file mode 100644
index 000000000..f974022e8
--- /dev/null
+++ b/src/core/hle/kernel/k_light_lock.cpp
@@ -0,0 +1,130 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/hle/kernel/k_light_lock.h"
6#include "core/hle/kernel/k_scheduler.h"
7#include "core/hle/kernel/k_thread.h"
8#include "core/hle/kernel/kernel.h"
9
10namespace Kernel {
11
12void KLightLock::Lock() {
13 const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
14 const uintptr_t cur_thread_tag = (cur_thread | 1);
15
16 while (true) {
17 uintptr_t old_tag = tag.load(std::memory_order_relaxed);
18
19 while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1,
20 std::memory_order_acquire)) {
21 if ((old_tag | 1) == cur_thread_tag) {
22 return;
23 }
24 }
25
26 if ((old_tag == 0) || ((old_tag | 1) == cur_thread_tag)) {
27 break;
28 }
29
30 LockSlowPath(old_tag | 1, cur_thread);
31 }
32}
33
34void KLightLock::Unlock() {
35 const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
36 uintptr_t expected = cur_thread;
37 do {
38 if (expected != cur_thread) {
39 return UnlockSlowPath(cur_thread);
40 }
41 } while (!tag.compare_exchange_weak(expected, 0, std::memory_order_release));
42}
43
44void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
45 KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread);
46
47 // Pend the current thread waiting on the owner thread.
48 {
49 KScopedSchedulerLock sl{kernel};
50
51 // Ensure we actually have locking to do.
52 if (tag.load(std::memory_order_relaxed) != _owner) {
53 return;
54 }
55
56 // Add the current thread as a waiter on the owner.
57 KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ULL);
58 cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
59 owner_thread->AddWaiter(cur_thread);
60
61 // Set thread states.
62 if (cur_thread->GetState() == ThreadState::Runnable) {
63 cur_thread->SetState(ThreadState::Waiting);
64 } else {
65 KScheduler::SetSchedulerUpdateNeeded(kernel);
66 }
67
68 if (owner_thread->IsSuspended()) {
69 owner_thread->ContinueIfHasKernelWaiters();
70 }
71 }
72
73 // We're no longer waiting on the lock owner.
74 {
75 KScopedSchedulerLock sl{kernel};
76 KThread* owner_thread = cur_thread->GetLockOwner();
77 if (owner_thread) {
78 owner_thread->RemoveWaiter(cur_thread);
79 KScheduler::SetSchedulerUpdateNeeded(kernel);
80 }
81 }
82}
83
84void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
85 KThread* owner_thread = reinterpret_cast<KThread*>(_cur_thread);
86
87 // Unlock.
88 {
89 KScopedSchedulerLock sl{kernel};
90
91 // Get the next owner.
92 s32 num_waiters = 0;
93 KThread* next_owner = owner_thread->RemoveWaiterByKey(
94 std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag)));
95
96 // Pass the lock to the next owner.
97 uintptr_t next_tag = 0;
98 if (next_owner) {
99 next_tag = reinterpret_cast<uintptr_t>(next_owner);
100 if (num_waiters > 1) {
101 next_tag |= 0x1;
102 }
103
104 if (next_owner->GetState() == ThreadState::Waiting) {
105 next_owner->SetState(ThreadState::Runnable);
106 } else {
107 KScheduler::SetSchedulerUpdateNeeded(kernel);
108 }
109
110 if (next_owner->IsSuspended()) {
111 next_owner->ContinueIfHasKernelWaiters();
112 }
113 }
114
115 // We may have unsuspended in the process of acquiring the lock, so we'll re-suspend now if
116 // so.
117 if (owner_thread->IsSuspended()) {
118 owner_thread->TrySuspend();
119 }
120
121 // Write the new tag value.
122 tag.store(next_tag);
123 }
124}
125
126bool KLightLock::IsLockedByCurrentThread() const {
127 return (tag | 1ULL) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)) | 1ULL);
128}
129
130} // namespace Kernel
diff --git a/src/core/hle/kernel/k_light_lock.h b/src/core/hle/kernel/k_light_lock.h
new file mode 100644
index 000000000..f4c45f76a
--- /dev/null
+++ b/src/core/hle/kernel/k_light_lock.h
@@ -0,0 +1,41 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <atomic>
8
9#include "common/common_types.h"
10#include "core/hle/kernel/k_scoped_lock.h"
11
12namespace Kernel {
13
14class KernelCore;
15
16class KLightLock {
17public:
18 explicit KLightLock(KernelCore& kernel_) : kernel{kernel_} {}
19
20 void Lock();
21
22 void Unlock();
23
24 void LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
25
26 void UnlockSlowPath(uintptr_t cur_thread);
27
28 bool IsLocked() const {
29 return tag != 0;
30 }
31
32 bool IsLockedByCurrentThread() const;
33
34private:
35 std::atomic<uintptr_t> tag{};
36 KernelCore& kernel;
37};
38
39using KScopedLightLock = KScopedLock<KLightLock>;
40
41} // namespace Kernel
diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h
index 0dc929040..13d628b85 100644
--- a/src/core/hle/kernel/k_priority_queue.h
+++ b/src/core/hle/kernel/k_priority_queue.h
@@ -18,7 +18,7 @@
18 18
19namespace Kernel { 19namespace Kernel {
20 20
21class Thread; 21class KThread;
22 22
23template <typename T> 23template <typename T>
24concept KPriorityQueueAffinityMask = !std::is_reference_v<T> && requires(T & t) { 24concept KPriorityQueueAffinityMask = !std::is_reference_v<T> && requires(T & t) {
@@ -367,7 +367,7 @@ public:
367 this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member); 367 this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
368 } 368 }
369 369
370 constexpr Thread* MoveToScheduledBack(Member* member) { 370 constexpr KThread* MoveToScheduledBack(Member* member) {
371 return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), 371 return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(),
372 member); 372 member);
373 } 373 }
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 12b5619fb..bb5f43b53 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -17,25 +17,30 @@
17#include "core/cpu_manager.h" 17#include "core/cpu_manager.h"
18#include "core/hle/kernel/k_scheduler.h" 18#include "core/hle/kernel/k_scheduler.h"
19#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 19#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
20#include "core/hle/kernel/k_thread.h"
20#include "core/hle/kernel/kernel.h" 21#include "core/hle/kernel/kernel.h"
21#include "core/hle/kernel/physical_core.h" 22#include "core/hle/kernel/physical_core.h"
22#include "core/hle/kernel/process.h" 23#include "core/hle/kernel/process.h"
23#include "core/hle/kernel/thread.h"
24#include "core/hle/kernel/time_manager.h" 24#include "core/hle/kernel/time_manager.h"
25 25
26namespace Kernel { 26namespace Kernel {
27 27
28static void IncrementScheduledCount(Kernel::Thread* thread) { 28static void IncrementScheduledCount(Kernel::KThread* thread) {
29 if (auto process = thread->GetOwnerProcess(); process) { 29 if (auto process = thread->GetOwnerProcess(); process) {
30 process->IncrementScheduledCount(); 30 process->IncrementScheduledCount();
31 } 31 }
32} 32}
33 33
34void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, 34void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule) {
35 Core::EmuThreadHandle global_thread) { 35 auto scheduler = kernel.CurrentScheduler();
36 const u32 current_core = global_thread.host_handle; 36
37 bool must_context_switch = global_thread.guest_handle != InvalidHandle && 37 u32 current_core{0xF};
38 (current_core < Core::Hardware::NUM_CPU_CORES); 38 bool must_context_switch{};
39 if (scheduler) {
40 current_core = scheduler->core_id;
41 // TODO(bunnei): Should be set to true when we deprecate single core
42 must_context_switch = !kernel.IsPhantomModeForSingleCore();
43 }
39 44
40 while (cores_pending_reschedule != 0) { 45 while (cores_pending_reschedule != 0) {
41 const auto core = static_cast<u32>(std::countr_zero(cores_pending_reschedule)); 46 const auto core = static_cast<u32>(std::countr_zero(cores_pending_reschedule));
@@ -56,28 +61,27 @@ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedul
56 } 61 }
57} 62}
58 63
59u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { 64u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
60 std::scoped_lock lock{guard}; 65 std::scoped_lock lock{guard};
61 if (Thread* prev_highest_thread = this->state.highest_priority_thread; 66 if (KThread* prev_highest_thread = state.highest_priority_thread;
62 prev_highest_thread != highest_thread) { 67 prev_highest_thread != highest_thread) {
63 if (prev_highest_thread != nullptr) { 68 if (prev_highest_thread != nullptr) {
64 IncrementScheduledCount(prev_highest_thread); 69 IncrementScheduledCount(prev_highest_thread);
65 prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks()); 70 prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks());
66 } 71 }
67 if (this->state.should_count_idle) { 72 if (state.should_count_idle) {
68 if (highest_thread != nullptr) { 73 if (highest_thread != nullptr) {
69 // if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) { 74 if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) {
70 // process->SetRunningThread(this->core_id, highest_thread, 75 process->SetRunningThread(core_id, highest_thread, state.idle_count);
71 // this->state.idle_count); 76 }
72 //}
73 } else { 77 } else {
74 this->state.idle_count++; 78 state.idle_count++;
75 } 79 }
76 } 80 }
77 81
78 this->state.highest_priority_thread = highest_thread; 82 state.highest_priority_thread = highest_thread;
79 this->state.needs_scheduling = true; 83 state.needs_scheduling.store(true);
80 return (1ULL << this->core_id); 84 return (1ULL << core_id);
81 } else { 85 } else {
82 return 0; 86 return 0;
83 } 87 }
@@ -90,16 +94,29 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
90 ClearSchedulerUpdateNeeded(kernel); 94 ClearSchedulerUpdateNeeded(kernel);
91 95
92 u64 cores_needing_scheduling = 0, idle_cores = 0; 96 u64 cores_needing_scheduling = 0, idle_cores = 0;
93 Thread* top_threads[Core::Hardware::NUM_CPU_CORES]; 97 KThread* top_threads[Core::Hardware::NUM_CPU_CORES];
94 auto& priority_queue = GetPriorityQueue(kernel); 98 auto& priority_queue = GetPriorityQueue(kernel);
95 99
96 /// We want to go over all cores, finding the highest priority thread and determining if 100 /// We want to go over all cores, finding the highest priority thread and determining if
97 /// scheduling is needed for that core. 101 /// scheduling is needed for that core.
98 for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { 102 for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
99 Thread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id)); 103 KThread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
100 if (top_thread != nullptr) { 104 if (top_thread != nullptr) {
101 // If the thread has no waiters, we need to check if the process has a thread pinned. 105 // If the thread has no waiters, we need to check if the process has a thread pinned.
102 // TODO(bunnei): Implement thread pinning 106 if (top_thread->GetNumKernelWaiters() == 0) {
107 if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) {
108 if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id));
109 pinned != nullptr && pinned != top_thread) {
110 // We prefer our parent's pinned thread if possible. However, we also don't
111 // want to schedule un-runnable threads.
112 if (pinned->GetRawState() == ThreadState::Runnable) {
113 top_thread = pinned;
114 } else {
115 top_thread = nullptr;
116 }
117 }
118 }
119 }
103 } else { 120 } else {
104 idle_cores |= (1ULL << core_id); 121 idle_cores |= (1ULL << core_id);
105 } 122 }
@@ -112,7 +129,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
112 // Idle cores are bad. We're going to try to migrate threads to each idle core in turn. 129 // Idle cores are bad. We're going to try to migrate threads to each idle core in turn.
113 while (idle_cores != 0) { 130 while (idle_cores != 0) {
114 const auto core_id = static_cast<u32>(std::countr_zero(idle_cores)); 131 const auto core_id = static_cast<u32>(std::countr_zero(idle_cores));
115 if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) { 132 if (KThread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
116 s32 migration_candidates[Core::Hardware::NUM_CPU_CORES]; 133 s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
117 size_t num_candidates = 0; 134 size_t num_candidates = 0;
118 135
@@ -120,7 +137,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
120 while (suggested != nullptr) { 137 while (suggested != nullptr) {
121 // Check if the suggested thread is the top thread on its core. 138 // Check if the suggested thread is the top thread on its core.
122 const s32 suggested_core = suggested->GetActiveCore(); 139 const s32 suggested_core = suggested->GetActiveCore();
123 if (Thread* top_thread = 140 if (KThread* top_thread =
124 (suggested_core >= 0) ? top_threads[suggested_core] : nullptr; 141 (suggested_core >= 0) ? top_threads[suggested_core] : nullptr;
125 top_thread != suggested) { 142 top_thread != suggested) {
126 // Make sure we're not dealing with threads too high priority for migration. 143 // Make sure we're not dealing with threads too high priority for migration.
@@ -152,7 +169,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
152 // Check if there's some other thread that can run on the candidate core. 169 // Check if there's some other thread that can run on the candidate core.
153 const s32 candidate_core = migration_candidates[i]; 170 const s32 candidate_core = migration_candidates[i];
154 suggested = top_threads[candidate_core]; 171 suggested = top_threads[candidate_core];
155 if (Thread* next_on_candidate_core = 172 if (KThread* next_on_candidate_core =
156 priority_queue.GetScheduledNext(candidate_core, suggested); 173 priority_queue.GetScheduledNext(candidate_core, suggested);
157 next_on_candidate_core != nullptr) { 174 next_on_candidate_core != nullptr) {
158 // The candidate core can run some other thread! We'll migrate its current 175 // The candidate core can run some other thread! We'll migrate its current
@@ -182,7 +199,20 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
182 return cores_needing_scheduling; 199 return cores_needing_scheduling;
183} 200}
184 201
185void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, ThreadState old_state) { 202void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) {
203 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
204 for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; ++i) {
205 // Get an atomic reference to the core scheduler's previous thread.
206 std::atomic_ref<KThread*> prev_thread(kernel.Scheduler(static_cast<s32>(i)).prev_thread);
207 static_assert(std::atomic_ref<KThread*>::is_always_lock_free);
208
209 // Atomically clear the previous thread if it's our target.
210 KThread* compare = thread;
211 prev_thread.compare_exchange_strong(compare, nullptr);
212 }
213}
214
215void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state) {
186 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 216 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
187 217
188 // Check if the state has changed, because if it hasn't there's nothing to do. 218 // Check if the state has changed, because if it hasn't there's nothing to do.
@@ -205,7 +235,7 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, Thread
205 } 235 }
206} 236}
207 237
208void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, s32 old_priority) { 238void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority) {
209 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 239 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
210 240
211 // If the thread is runnable, we want to change its priority in the queue. 241 // If the thread is runnable, we want to change its priority in the queue.
@@ -217,7 +247,7 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, s32
217 } 247 }
218} 248}
219 249
220void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, 250void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
221 const KAffinityMask& old_affinity, s32 old_core) { 251 const KAffinityMask& old_affinity, s32 old_core) {
222 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 252 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
223 253
@@ -237,8 +267,8 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
237 auto& priority_queue = GetPriorityQueue(kernel); 267 auto& priority_queue = GetPriorityQueue(kernel);
238 268
239 // Rotate the front of the queue to the end. 269 // Rotate the front of the queue to the end.
240 Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority); 270 KThread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
241 Thread* next_thread = nullptr; 271 KThread* next_thread = nullptr;
242 if (top_thread != nullptr) { 272 if (top_thread != nullptr) {
243 next_thread = priority_queue.MoveToScheduledBack(top_thread); 273 next_thread = priority_queue.MoveToScheduledBack(top_thread);
244 if (next_thread != top_thread) { 274 if (next_thread != top_thread) {
@@ -249,11 +279,11 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
249 279
250 // While we have a suggested thread, try to migrate it! 280 // While we have a suggested thread, try to migrate it!
251 { 281 {
252 Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority); 282 KThread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
253 while (suggested != nullptr) { 283 while (suggested != nullptr) {
254 // Check if the suggested thread is the top thread on its core. 284 // Check if the suggested thread is the top thread on its core.
255 const s32 suggested_core = suggested->GetActiveCore(); 285 const s32 suggested_core = suggested->GetActiveCore();
256 if (Thread* top_on_suggested_core = 286 if (KThread* top_on_suggested_core =
257 (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) 287 (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
258 : nullptr; 288 : nullptr;
259 top_on_suggested_core != suggested) { 289 top_on_suggested_core != suggested) {
@@ -285,7 +315,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
285 // Now that we might have migrated a thread with the same priority, check if we can do better. 315 // Now that we might have migrated a thread with the same priority, check if we can do better.
286 316
287 { 317 {
288 Thread* best_thread = priority_queue.GetScheduledFront(core_id); 318 KThread* best_thread = priority_queue.GetScheduledFront(core_id);
289 if (best_thread == GetCurrentThread()) { 319 if (best_thread == GetCurrentThread()) {
290 best_thread = priority_queue.GetScheduledNext(core_id, best_thread); 320 best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
291 } 321 }
@@ -293,7 +323,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
293 // If the best thread we can choose has a priority the same or worse than ours, try to 323 // If the best thread we can choose has a priority the same or worse than ours, try to
294 // migrate a higher priority thread. 324 // migrate a higher priority thread.
295 if (best_thread != nullptr && best_thread->GetPriority() >= priority) { 325 if (best_thread != nullptr && best_thread->GetPriority() >= priority) {
296 Thread* suggested = priority_queue.GetSuggestedFront(core_id); 326 KThread* suggested = priority_queue.GetSuggestedFront(core_id);
297 while (suggested != nullptr) { 327 while (suggested != nullptr) {
298 // If the suggestion's priority is the same as ours, don't bother. 328 // If the suggestion's priority is the same as ours, don't bother.
299 if (suggested->GetPriority() >= best_thread->GetPriority()) { 329 if (suggested->GetPriority() >= best_thread->GetPriority()) {
@@ -302,7 +332,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
302 332
303 // Check if the suggested thread is the top thread on its core. 333 // Check if the suggested thread is the top thread on its core.
304 const s32 suggested_core = suggested->GetActiveCore(); 334 const s32 suggested_core = suggested->GetActiveCore();
305 if (Thread* top_on_suggested_core = 335 if (KThread* top_on_suggested_core =
306 (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) 336 (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
307 : nullptr; 337 : nullptr;
308 top_on_suggested_core != suggested) { 338 top_on_suggested_core != suggested) {
@@ -352,12 +382,14 @@ void KScheduler::DisableScheduling(KernelCore& kernel) {
352 } 382 }
353} 383}
354 384
355void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, 385void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
356 Core::EmuThreadHandle global_thread) {
357 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { 386 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
358 scheduler->GetCurrentThread()->EnableDispatch(); 387 ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1);
388 if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) {
389 scheduler->GetCurrentThread()->EnableDispatch();
390 }
359 } 391 }
360 RescheduleCores(kernel, cores_needing_scheduling, global_thread); 392 RescheduleCores(kernel, cores_needing_scheduling);
361} 393}
362 394
363u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { 395u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
@@ -372,15 +404,13 @@ KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) {
372 return kernel.GlobalSchedulerContext().priority_queue; 404 return kernel.GlobalSchedulerContext().priority_queue;
373} 405}
374 406
375void KScheduler::YieldWithoutCoreMigration() { 407void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
376 auto& kernel = system.Kernel();
377
378 // Validate preconditions. 408 // Validate preconditions.
379 ASSERT(CanSchedule(kernel)); 409 ASSERT(CanSchedule(kernel));
380 ASSERT(kernel.CurrentProcess() != nullptr); 410 ASSERT(kernel.CurrentProcess() != nullptr);
381 411
382 // Get the current thread and process. 412 // Get the current thread and process.
383 Thread& cur_thread = *GetCurrentThread(); 413 KThread& cur_thread = Kernel::GetCurrentThread(kernel);
384 Process& cur_process = *kernel.CurrentProcess(); 414 Process& cur_process = *kernel.CurrentProcess();
385 415
386 // If the thread's yield count matches, there's nothing for us to do. 416 // If the thread's yield count matches, there's nothing for us to do.
@@ -398,7 +428,7 @@ void KScheduler::YieldWithoutCoreMigration() {
398 const auto cur_state = cur_thread.GetRawState(); 428 const auto cur_state = cur_thread.GetRawState();
399 if (cur_state == ThreadState::Runnable) { 429 if (cur_state == ThreadState::Runnable) {
400 // Put the current thread at the back of the queue. 430 // Put the current thread at the back of the queue.
401 Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); 431 KThread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
402 IncrementScheduledCount(std::addressof(cur_thread)); 432 IncrementScheduledCount(std::addressof(cur_thread));
403 433
404 // If the next thread is different, we have an update to perform. 434 // If the next thread is different, we have an update to perform.
@@ -413,15 +443,13 @@ void KScheduler::YieldWithoutCoreMigration() {
413 } 443 }
414} 444}
415 445
416void KScheduler::YieldWithCoreMigration() { 446void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
417 auto& kernel = system.Kernel();
418
419 // Validate preconditions. 447 // Validate preconditions.
420 ASSERT(CanSchedule(kernel)); 448 ASSERT(CanSchedule(kernel));
421 ASSERT(kernel.CurrentProcess() != nullptr); 449 ASSERT(kernel.CurrentProcess() != nullptr);
422 450
423 // Get the current thread and process. 451 // Get the current thread and process.
424 Thread& cur_thread = *GetCurrentThread(); 452 KThread& cur_thread = Kernel::GetCurrentThread(kernel);
425 Process& cur_process = *kernel.CurrentProcess(); 453 Process& cur_process = *kernel.CurrentProcess();
426 454
427 // If the thread's yield count matches, there's nothing for us to do. 455 // If the thread's yield count matches, there's nothing for us to do.
@@ -442,17 +470,17 @@ void KScheduler::YieldWithCoreMigration() {
442 const s32 core_id = cur_thread.GetActiveCore(); 470 const s32 core_id = cur_thread.GetActiveCore();
443 471
444 // Put the current thread at the back of the queue. 472 // Put the current thread at the back of the queue.
445 Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); 473 KThread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
446 IncrementScheduledCount(std::addressof(cur_thread)); 474 IncrementScheduledCount(std::addressof(cur_thread));
447 475
448 // While we have a suggested thread, try to migrate it! 476 // While we have a suggested thread, try to migrate it!
449 bool recheck = false; 477 bool recheck = false;
450 Thread* suggested = priority_queue.GetSuggestedFront(core_id); 478 KThread* suggested = priority_queue.GetSuggestedFront(core_id);
451 while (suggested != nullptr) { 479 while (suggested != nullptr) {
452 // Check if the suggested thread is the thread running on its core. 480 // Check if the suggested thread is the thread running on its core.
453 const s32 suggested_core = suggested->GetActiveCore(); 481 const s32 suggested_core = suggested->GetActiveCore();
454 482
455 if (Thread* running_on_suggested_core = 483 if (KThread* running_on_suggested_core =
456 (suggested_core >= 0) 484 (suggested_core >= 0)
457 ? kernel.Scheduler(suggested_core).state.highest_priority_thread 485 ? kernel.Scheduler(suggested_core).state.highest_priority_thread
458 : nullptr; 486 : nullptr;
@@ -503,15 +531,13 @@ void KScheduler::YieldWithCoreMigration() {
503 } 531 }
504} 532}
505 533
506void KScheduler::YieldToAnyThread() { 534void KScheduler::YieldToAnyThread(KernelCore& kernel) {
507 auto& kernel = system.Kernel();
508
509 // Validate preconditions. 535 // Validate preconditions.
510 ASSERT(CanSchedule(kernel)); 536 ASSERT(CanSchedule(kernel));
511 ASSERT(kernel.CurrentProcess() != nullptr); 537 ASSERT(kernel.CurrentProcess() != nullptr);
512 538
513 // Get the current thread and process. 539 // Get the current thread and process.
514 Thread& cur_thread = *GetCurrentThread(); 540 KThread& cur_thread = Kernel::GetCurrentThread(kernel);
515 Process& cur_process = *kernel.CurrentProcess(); 541 Process& cur_process = *kernel.CurrentProcess();
516 542
517 // If the thread's yield count matches, there's nothing for us to do. 543 // If the thread's yield count matches, there's nothing for us to do.
@@ -539,11 +565,11 @@ void KScheduler::YieldToAnyThread() {
539 // If there's nothing scheduled, we can try to perform a migration. 565 // If there's nothing scheduled, we can try to perform a migration.
540 if (priority_queue.GetScheduledFront(core_id) == nullptr) { 566 if (priority_queue.GetScheduledFront(core_id) == nullptr) {
541 // While we have a suggested thread, try to migrate it! 567 // While we have a suggested thread, try to migrate it!
542 Thread* suggested = priority_queue.GetSuggestedFront(core_id); 568 KThread* suggested = priority_queue.GetSuggestedFront(core_id);
543 while (suggested != nullptr) { 569 while (suggested != nullptr) {
544 // Check if the suggested thread is the top thread on its core. 570 // Check if the suggested thread is the top thread on its core.
545 const s32 suggested_core = suggested->GetActiveCore(); 571 const s32 suggested_core = suggested->GetActiveCore();
546 if (Thread* top_on_suggested_core = 572 if (KThread* top_on_suggested_core =
547 (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) 573 (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
548 : nullptr; 574 : nullptr;
549 top_on_suggested_core != suggested) { 575 top_on_suggested_core != suggested) {
@@ -581,22 +607,21 @@ void KScheduler::YieldToAnyThread() {
581 } 607 }
582} 608}
583 609
584KScheduler::KScheduler(Core::System& system, std::size_t core_id) 610KScheduler::KScheduler(Core::System& system, s32 core_id) : system(system), core_id(core_id) {
585 : system(system), core_id(core_id) {
586 switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this); 611 switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this);
587 this->state.needs_scheduling = true; 612 state.needs_scheduling.store(true);
588 this->state.interrupt_task_thread_runnable = false; 613 state.interrupt_task_thread_runnable = false;
589 this->state.should_count_idle = false; 614 state.should_count_idle = false;
590 this->state.idle_count = 0; 615 state.idle_count = 0;
591 this->state.idle_thread_stack = nullptr; 616 state.idle_thread_stack = nullptr;
592 this->state.highest_priority_thread = nullptr; 617 state.highest_priority_thread = nullptr;
593} 618}
594 619
595KScheduler::~KScheduler() = default; 620KScheduler::~KScheduler() = default;
596 621
597Thread* KScheduler::GetCurrentThread() const { 622KThread* KScheduler::GetCurrentThread() const {
598 if (current_thread) { 623 if (auto result = current_thread.load(); result) {
599 return current_thread; 624 return result;
600 } 625 }
601 return idle_thread; 626 return idle_thread;
602} 627}
@@ -613,7 +638,7 @@ void KScheduler::RescheduleCurrentCore() {
613 phys_core.ClearInterrupt(); 638 phys_core.ClearInterrupt();
614 } 639 }
615 guard.lock(); 640 guard.lock();
616 if (this->state.needs_scheduling) { 641 if (state.needs_scheduling.load()) {
617 Schedule(); 642 Schedule();
618 } else { 643 } else {
619 guard.unlock(); 644 guard.unlock();
@@ -624,66 +649,76 @@ void KScheduler::OnThreadStart() {
624 SwitchContextStep2(); 649 SwitchContextStep2();
625} 650}
626 651
627void KScheduler::Unload(Thread* thread) { 652void KScheduler::Unload(KThread* thread) {
653 LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
654
628 if (thread) { 655 if (thread) {
629 thread->SetIsRunning(false); 656 if (thread->IsCallingSvc()) {
630 if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) {
631 system.ArmInterface(core_id).ExceptionalExit(); 657 system.ArmInterface(core_id).ExceptionalExit();
632 thread->SetContinuousOnSVC(false); 658 thread->ClearIsCallingSvc();
633 } 659 }
634 if (!thread->IsHLEThread() && !thread->HasExited()) { 660 if (!thread->IsTerminationRequested()) {
661 prev_thread = thread;
662
635 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); 663 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
636 cpu_core.SaveContext(thread->GetContext32()); 664 cpu_core.SaveContext(thread->GetContext32());
637 cpu_core.SaveContext(thread->GetContext64()); 665 cpu_core.SaveContext(thread->GetContext64());
638 // Save the TPIDR_EL0 system register in case it was modified. 666 // Save the TPIDR_EL0 system register in case it was modified.
639 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); 667 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
640 cpu_core.ClearExclusiveState(); 668 cpu_core.ClearExclusiveState();
669 } else {
670 prev_thread = nullptr;
641 } 671 }
642 thread->context_guard.unlock(); 672 thread->context_guard.unlock();
643 } 673 }
644} 674}
645 675
646void KScheduler::Reload(Thread* thread) { 676void KScheduler::Reload(KThread* thread) {
677 LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread ? thread->GetName() : "nullptr");
678
647 if (thread) { 679 if (thread) {
648 ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); 680 ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
649 681
650 // Cancel any outstanding wakeup events for this thread
651 thread->SetIsRunning(true);
652 thread->SetWasRunning(false);
653
654 auto* const thread_owner_process = thread->GetOwnerProcess(); 682 auto* const thread_owner_process = thread->GetOwnerProcess();
655 if (thread_owner_process != nullptr) { 683 if (thread_owner_process != nullptr) {
656 system.Kernel().MakeCurrentProcess(thread_owner_process); 684 system.Kernel().MakeCurrentProcess(thread_owner_process);
657 } 685 }
658 if (!thread->IsHLEThread()) { 686
659 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); 687 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
660 cpu_core.LoadContext(thread->GetContext32()); 688 cpu_core.LoadContext(thread->GetContext32());
661 cpu_core.LoadContext(thread->GetContext64()); 689 cpu_core.LoadContext(thread->GetContext64());
662 cpu_core.SetTlsAddress(thread->GetTLSAddress()); 690 cpu_core.SetTlsAddress(thread->GetTLSAddress());
663 cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); 691 cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
664 cpu_core.ClearExclusiveState(); 692 cpu_core.ClearExclusiveState();
665 }
666 } 693 }
667} 694}
668 695
669void KScheduler::SwitchContextStep2() { 696void KScheduler::SwitchContextStep2() {
670 // Load context of new thread 697 // Load context of new thread
671 Reload(current_thread); 698 Reload(current_thread.load());
672 699
673 RescheduleCurrentCore(); 700 RescheduleCurrentCore();
674} 701}
675 702
676void KScheduler::ScheduleImpl() { 703void KScheduler::ScheduleImpl() {
677 Thread* previous_thread = current_thread; 704 KThread* previous_thread = current_thread.load();
678 current_thread = state.highest_priority_thread; 705 KThread* next_thread = state.highest_priority_thread;
679 706
680 this->state.needs_scheduling = false; 707 state.needs_scheduling = false;
708
709 // We never want to schedule a null thread, so use the idle thread if we don't have a next.
710 if (next_thread == nullptr) {
711 next_thread = idle_thread;
712 }
681 713
682 if (current_thread == previous_thread) { 714 // If we're not actually switching thread, there's nothing to do.
715 if (next_thread == current_thread.load()) {
683 guard.unlock(); 716 guard.unlock();
684 return; 717 return;
685 } 718 }
686 719
720 current_thread.store(next_thread);
721
687 Process* const previous_process = system.Kernel().CurrentProcess(); 722 Process* const previous_process = system.Kernel().CurrentProcess();
688 723
689 UpdateLastContextSwitchTime(previous_thread, previous_process); 724 UpdateLastContextSwitchTime(previous_thread, previous_process);
@@ -714,28 +749,29 @@ void KScheduler::SwitchToCurrent() {
714 while (true) { 749 while (true) {
715 { 750 {
716 std::scoped_lock lock{guard}; 751 std::scoped_lock lock{guard};
717 current_thread = state.highest_priority_thread; 752 current_thread.store(state.highest_priority_thread);
718 this->state.needs_scheduling = false; 753 state.needs_scheduling.store(false);
719 } 754 }
720 const auto is_switch_pending = [this] { 755 const auto is_switch_pending = [this] {
721 std::scoped_lock lock{guard}; 756 std::scoped_lock lock{guard};
722 return state.needs_scheduling.load(std::memory_order_relaxed); 757 return state.needs_scheduling.load();
723 }; 758 };
724 do { 759 do {
725 if (current_thread != nullptr && !current_thread->IsHLEThread()) { 760 auto next_thread = current_thread.load();
726 current_thread->context_guard.lock(); 761 if (next_thread != nullptr) {
727 if (current_thread->GetRawState() != ThreadState::Runnable) { 762 next_thread->context_guard.lock();
728 current_thread->context_guard.unlock(); 763 if (next_thread->GetRawState() != ThreadState::Runnable) {
764 next_thread->context_guard.unlock();
729 break; 765 break;
730 } 766 }
731 if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) { 767 if (next_thread->GetActiveCore() != core_id) {
732 current_thread->context_guard.unlock(); 768 next_thread->context_guard.unlock();
733 break; 769 break;
734 } 770 }
735 } 771 }
736 std::shared_ptr<Common::Fiber>* next_context; 772 std::shared_ptr<Common::Fiber>* next_context;
737 if (current_thread != nullptr) { 773 if (next_thread != nullptr) {
738 next_context = &current_thread->GetHostContext(); 774 next_context = &next_thread->GetHostContext();
739 } else { 775 } else {
740 next_context = &idle_thread->GetHostContext(); 776 next_context = &idle_thread->GetHostContext();
741 } 777 }
@@ -744,13 +780,13 @@ void KScheduler::SwitchToCurrent() {
744 } 780 }
745} 781}
746 782
747void KScheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { 783void KScheduler::UpdateLastContextSwitchTime(KThread* thread, Process* process) {
748 const u64 prev_switch_ticks = last_context_switch_time; 784 const u64 prev_switch_ticks = last_context_switch_time;
749 const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks(); 785 const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
750 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; 786 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
751 787
752 if (thread != nullptr) { 788 if (thread != nullptr) {
753 thread->UpdateCPUTimeTicks(update_ticks); 789 thread->AddCpuTime(core_id, update_ticks);
754 } 790 }
755 791
756 if (process != nullptr) { 792 if (process != nullptr) {
@@ -764,15 +800,10 @@ void KScheduler::Initialize() {
764 std::string name = "Idle Thread Id:" + std::to_string(core_id); 800 std::string name = "Idle Thread Id:" + std::to_string(core_id);
765 std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc(); 801 std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
766 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); 802 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
767 ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE); 803 auto thread_res = KThread::Create(system, ThreadType::Main, name, 0,
768 auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0, 804 KThread::IdleThreadPriority, 0, static_cast<u32>(core_id), 0,
769 nullptr, std::move(init_func), init_func_parameter); 805 nullptr, std::move(init_func), init_func_parameter);
770 idle_thread = thread_res.Unwrap().get(); 806 idle_thread = thread_res.Unwrap().get();
771
772 {
773 KScopedSchedulerLock lock{system.Kernel()};
774 idle_thread->SetState(ThreadState::Runnable);
775 }
776} 807}
777 808
778KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel) 809KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 783665123..f595b9a5c 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -29,29 +29,33 @@ namespace Kernel {
29class KernelCore; 29class KernelCore;
30class Process; 30class Process;
31class SchedulerLock; 31class SchedulerLock;
32class Thread; 32class KThread;
33 33
34class KScheduler final { 34class KScheduler final {
35public: 35public:
36 explicit KScheduler(Core::System& system, std::size_t core_id); 36 explicit KScheduler(Core::System& system, s32 core_id);
37 ~KScheduler(); 37 ~KScheduler();
38 38
39 /// Reschedules to the next available thread (call after current thread is suspended) 39 /// Reschedules to the next available thread (call after current thread is suspended)
40 void RescheduleCurrentCore(); 40 void RescheduleCurrentCore();
41 41
42 /// Reschedules cores pending reschedule, to be called on EnableScheduling. 42 /// Reschedules cores pending reschedule, to be called on EnableScheduling.
43 static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, 43 static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule);
44 Core::EmuThreadHandle global_thread);
45 44
46 /// The next two are for SingleCore Only. 45 /// The next two are for SingleCore Only.
47 /// Unload current thread before preempting core. 46 /// Unload current thread before preempting core.
48 void Unload(Thread* thread); 47 void Unload(KThread* thread);
49 48
50 /// Reload current thread after core preemption. 49 /// Reload current thread after core preemption.
51 void Reload(Thread* thread); 50 void Reload(KThread* thread);
52 51
53 /// Gets the current running thread 52 /// Gets the current running thread
54 [[nodiscard]] Thread* GetCurrentThread() const; 53 [[nodiscard]] KThread* GetCurrentThread() const;
54
55 /// Returns true if the scheduler is idle
56 [[nodiscard]] bool IsIdle() const {
57 return GetCurrentThread() == idle_thread;
58 }
55 59
56 /// Gets the timestamp for the last context switch in ticks. 60 /// Gets the timestamp for the last context switch in ticks.
57 [[nodiscard]] u64 GetLastContextSwitchTicks() const; 61 [[nodiscard]] u64 GetLastContextSwitchTicks() const;
@@ -72,14 +76,14 @@ public:
72 return switch_fiber; 76 return switch_fiber;
73 } 77 }
74 78
75 [[nodiscard]] u64 UpdateHighestPriorityThread(Thread* highest_thread); 79 [[nodiscard]] u64 UpdateHighestPriorityThread(KThread* highest_thread);
76 80
77 /** 81 /**
78 * Takes a thread and moves it to the back of the it's priority list. 82 * Takes a thread and moves it to the back of the it's priority list.
79 * 83 *
80 * @note This operation can be redundant and no scheduling is changed if marked as so. 84 * @note This operation can be redundant and no scheduling is changed if marked as so.
81 */ 85 */
82 void YieldWithoutCoreMigration(); 86 static void YieldWithoutCoreMigration(KernelCore& kernel);
83 87
84 /** 88 /**
85 * Takes a thread and moves it to the back of the it's priority list. 89 * Takes a thread and moves it to the back of the it's priority list.
@@ -88,7 +92,7 @@ public:
88 * 92 *
89 * @note This operation can be redundant and no scheduling is changed if marked as so. 93 * @note This operation can be redundant and no scheduling is changed if marked as so.
90 */ 94 */
91 void YieldWithCoreMigration(); 95 static void YieldWithCoreMigration(KernelCore& kernel);
92 96
93 /** 97 /**
94 * Takes a thread and moves it out of the scheduling queue. 98 * Takes a thread and moves it out of the scheduling queue.
@@ -97,16 +101,18 @@ public:
97 * 101 *
98 * @note This operation can be redundant and no scheduling is changed if marked as so. 102 * @note This operation can be redundant and no scheduling is changed if marked as so.
99 */ 103 */
100 void YieldToAnyThread(); 104 static void YieldToAnyThread(KernelCore& kernel);
105
106 static void ClearPreviousThread(KernelCore& kernel, KThread* thread);
101 107
102 /// Notify the scheduler a thread's status has changed. 108 /// Notify the scheduler a thread's status has changed.
103 static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, ThreadState old_state); 109 static void OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state);
104 110
105 /// Notify the scheduler a thread's priority has changed. 111 /// Notify the scheduler a thread's priority has changed.
106 static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, s32 old_priority); 112 static void OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority);
107 113
108 /// Notify the scheduler a thread's core and/or affinity mask has changed. 114 /// Notify the scheduler a thread's core and/or affinity mask has changed.
109 static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, 115 static void OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread,
110 const KAffinityMask& old_affinity, s32 old_core); 116 const KAffinityMask& old_affinity, s32 old_core);
111 117
112 static bool CanSchedule(KernelCore& kernel); 118 static bool CanSchedule(KernelCore& kernel);
@@ -114,8 +120,7 @@ public:
114 static void SetSchedulerUpdateNeeded(KernelCore& kernel); 120 static void SetSchedulerUpdateNeeded(KernelCore& kernel);
115 static void ClearSchedulerUpdateNeeded(KernelCore& kernel); 121 static void ClearSchedulerUpdateNeeded(KernelCore& kernel);
116 static void DisableScheduling(KernelCore& kernel); 122 static void DisableScheduling(KernelCore& kernel);
117 static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, 123 static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling);
118 Core::EmuThreadHandle global_thread);
119 [[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel); 124 [[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
120 125
121private: 126private:
@@ -163,13 +168,15 @@ private:
163 * most recent tick count retrieved. No special arithmetic is 168 * most recent tick count retrieved. No special arithmetic is
164 * applied to it. 169 * applied to it.
165 */ 170 */
166 void UpdateLastContextSwitchTime(Thread* thread, Process* process); 171 void UpdateLastContextSwitchTime(KThread* thread, Process* process);
167 172
168 static void OnSwitch(void* this_scheduler); 173 static void OnSwitch(void* this_scheduler);
169 void SwitchToCurrent(); 174 void SwitchToCurrent();
170 175
171 Thread* current_thread{}; 176 KThread* prev_thread{};
172 Thread* idle_thread{}; 177 std::atomic<KThread*> current_thread{};
178
179 KThread* idle_thread;
173 180
174 std::shared_ptr<Common::Fiber> switch_fiber{}; 181 std::shared_ptr<Common::Fiber> switch_fiber{};
175 182
@@ -178,7 +185,7 @@ private:
178 bool interrupt_task_thread_runnable{}; 185 bool interrupt_task_thread_runnable{};
179 bool should_count_idle{}; 186 bool should_count_idle{};
180 u64 idle_count{}; 187 u64 idle_count{};
181 Thread* highest_priority_thread{}; 188 KThread* highest_priority_thread{};
182 void* idle_thread_stack{}; 189 void* idle_thread_stack{};
183 }; 190 };
184 191
@@ -186,7 +193,7 @@ private:
186 193
187 Core::System& system; 194 Core::System& system;
188 u64 last_context_switch_time{}; 195 u64 last_context_switch_time{};
189 const std::size_t core_id; 196 const s32 core_id;
190 197
191 Common::SpinLock guard{}; 198 Common::SpinLock guard{};
192}; 199};
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
index 9b40bd22c..169455d18 100644
--- a/src/core/hle/kernel/k_scheduler_lock.h
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -10,6 +10,7 @@
10#include "common/assert.h" 10#include "common/assert.h"
11#include "common/spin_lock.h" 11#include "common/spin_lock.h"
12#include "core/hardware_properties.h" 12#include "core/hardware_properties.h"
13#include "core/hle/kernel/k_thread.h"
13#include "core/hle/kernel/kernel.h" 14#include "core/hle/kernel/kernel.h"
14 15
15namespace Kernel { 16namespace Kernel {
@@ -22,46 +23,45 @@ public:
22 explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {} 23 explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {}
23 24
24 bool IsLockedByCurrentThread() const { 25 bool IsLockedByCurrentThread() const {
25 return this->owner_thread == kernel.GetCurrentEmuThreadID(); 26 return owner_thread == GetCurrentThreadPointer(kernel);
26 } 27 }
27 28
28 void Lock() { 29 void Lock() {
29 if (this->IsLockedByCurrentThread()) { 30 if (IsLockedByCurrentThread()) {
30 // If we already own the lock, we can just increment the count. 31 // If we already own the lock, we can just increment the count.
31 ASSERT(this->lock_count > 0); 32 ASSERT(lock_count > 0);
32 this->lock_count++; 33 lock_count++;
33 } else { 34 } else {
34 // Otherwise, we want to disable scheduling and acquire the spinlock. 35 // Otherwise, we want to disable scheduling and acquire the spinlock.
35 SchedulerType::DisableScheduling(kernel); 36 SchedulerType::DisableScheduling(kernel);
36 this->spin_lock.lock(); 37 spin_lock.lock();
37 38
38 // For debug, ensure that our state is valid. 39 // For debug, ensure that our state is valid.
39 ASSERT(this->lock_count == 0); 40 ASSERT(lock_count == 0);
40 ASSERT(this->owner_thread == Core::EmuThreadHandle::InvalidHandle()); 41 ASSERT(owner_thread == nullptr);
41 42
42 // Increment count, take ownership. 43 // Increment count, take ownership.
43 this->lock_count = 1; 44 lock_count = 1;
44 this->owner_thread = kernel.GetCurrentEmuThreadID(); 45 owner_thread = GetCurrentThreadPointer(kernel);
45 } 46 }
46 } 47 }
47 48
48 void Unlock() { 49 void Unlock() {
49 ASSERT(this->IsLockedByCurrentThread()); 50 ASSERT(IsLockedByCurrentThread());
50 ASSERT(this->lock_count > 0); 51 ASSERT(lock_count > 0);
51 52
52 // Release an instance of the lock. 53 // Release an instance of the lock.
53 if ((--this->lock_count) == 0) { 54 if ((--lock_count) == 0) {
54 // We're no longer going to hold the lock. Take note of what cores need scheduling. 55 // We're no longer going to hold the lock. Take note of what cores need scheduling.
55 const u64 cores_needing_scheduling = 56 const u64 cores_needing_scheduling =
56 SchedulerType::UpdateHighestPriorityThreads(kernel); 57 SchedulerType::UpdateHighestPriorityThreads(kernel);
57 Core::EmuThreadHandle leaving_thread = owner_thread;
58 58
59 // Note that we no longer hold the lock, and unlock the spinlock. 59 // Note that we no longer hold the lock, and unlock the spinlock.
60 this->owner_thread = Core::EmuThreadHandle::InvalidHandle(); 60 owner_thread = nullptr;
61 this->spin_lock.unlock(); 61 spin_lock.unlock();
62 62
63 // Enable scheduling, and perform a rescheduling operation. 63 // Enable scheduling, and perform a rescheduling operation.
64 SchedulerType::EnableScheduling(kernel, cores_needing_scheduling, leaving_thread); 64 SchedulerType::EnableScheduling(kernel, cores_needing_scheduling);
65 } 65 }
66 } 66 }
67 67
@@ -69,7 +69,7 @@ private:
69 KernelCore& kernel; 69 KernelCore& kernel;
70 Common::SpinLock spin_lock{}; 70 Common::SpinLock spin_lock{};
71 s32 lock_count{}; 71 s32 lock_count{};
72 Core::EmuThreadHandle owner_thread{Core::EmuThreadHandle::InvalidHandle()}; 72 KThread* owner_thread{};
73}; 73};
74 74
75} // namespace Kernel 75} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
index 2bb3817fa..f8189e107 100644
--- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
+++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
@@ -9,27 +9,24 @@
9 9
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "core/hle/kernel/handle_table.h" 11#include "core/hle/kernel/handle_table.h"
12#include "core/hle/kernel/k_thread.h"
12#include "core/hle/kernel/kernel.h" 13#include "core/hle/kernel/kernel.h"
13#include "core/hle/kernel/thread.h"
14#include "core/hle/kernel/time_manager.h" 14#include "core/hle/kernel/time_manager.h"
15 15
16namespace Kernel { 16namespace Kernel {
17 17
18class KScopedSchedulerLockAndSleep { 18class KScopedSchedulerLockAndSleep {
19public: 19public:
20 explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* t, 20 explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, KThread* t, s64 timeout)
21 s64 timeout) 21 : kernel(kernel), thread(t), timeout_tick(timeout) {
22 : kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) {
23 event_handle = InvalidHandle;
24
25 // Lock the scheduler. 22 // Lock the scheduler.
26 kernel.GlobalSchedulerContext().scheduler_lock.Lock(); 23 kernel.GlobalSchedulerContext().scheduler_lock.Lock();
27 } 24 }
28 25
29 ~KScopedSchedulerLockAndSleep() { 26 ~KScopedSchedulerLockAndSleep() {
30 // Register the sleep. 27 // Register the sleep.
31 if (this->timeout_tick > 0) { 28 if (timeout_tick > 0) {
32 kernel.TimeManager().ScheduleTimeEvent(event_handle, this->thread, this->timeout_tick); 29 kernel.TimeManager().ScheduleTimeEvent(thread, timeout_tick);
33 } 30 }
34 31
35 // Unlock the scheduler. 32 // Unlock the scheduler.
@@ -37,13 +34,12 @@ public:
37 } 34 }
38 35
39 void CancelSleep() { 36 void CancelSleep() {
40 this->timeout_tick = 0; 37 timeout_tick = 0;
41 } 38 }
42 39
43private: 40private:
44 KernelCore& kernel; 41 KernelCore& kernel;
45 Handle& event_handle; 42 KThread* thread{};
46 Thread* thread{};
47 s64 timeout_tick{}; 43 s64 timeout_tick{};
48}; 44};
49 45
diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp
index 1c508cb55..a3b34f82f 100644
--- a/src/core/hle/kernel/k_synchronization_object.cpp
+++ b/src/core/hle/kernel/k_synchronization_object.cpp
@@ -7,9 +7,9 @@
7#include "core/hle/kernel/k_scheduler.h" 7#include "core/hle/kernel/k_scheduler.h"
8#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 8#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
9#include "core/hle/kernel/k_synchronization_object.h" 9#include "core/hle/kernel/k_synchronization_object.h"
10#include "core/hle/kernel/k_thread.h"
10#include "core/hle/kernel/kernel.h" 11#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/svc_results.h" 12#include "core/hle/kernel/svc_results.h"
12#include "core/hle/kernel/thread.h"
13 13
14namespace Kernel { 14namespace Kernel {
15 15
@@ -20,12 +20,11 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
20 std::vector<ThreadListNode> thread_nodes(num_objects); 20 std::vector<ThreadListNode> thread_nodes(num_objects);
21 21
22 // Prepare for wait. 22 // Prepare for wait.
23 Thread* thread = kernel.CurrentScheduler()->GetCurrentThread(); 23 KThread* thread = kernel.CurrentScheduler()->GetCurrentThread();
24 Handle timer = InvalidHandle;
25 24
26 { 25 {
27 // Setup the scheduling lock and sleep. 26 // Setup the scheduling lock and sleep.
28 KScopedSchedulerLockAndSleep slp(kernel, timer, thread, timeout); 27 KScopedSchedulerLockAndSleep slp{kernel, thread, timeout};
29 28
30 // Check if any of the objects are already signaled. 29 // Check if any of the objects are already signaled.
31 for (auto i = 0; i < num_objects; ++i) { 30 for (auto i = 0; i < num_objects; ++i) {
@@ -90,10 +89,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
90 thread->SetWaitObjectsForDebugging({}); 89 thread->SetWaitObjectsForDebugging({});
91 90
92 // Cancel the timer as needed. 91 // Cancel the timer as needed.
93 if (timer != InvalidHandle) { 92 kernel.TimeManager().UnscheduleTimeEvent(thread);
94 auto& time_manager = kernel.TimeManager();
95 time_manager.UnscheduleTimeEvent(timer);
96 }
97 93
98 // Get the wait result. 94 // Get the wait result.
99 ResultCode wait_result{RESULT_SUCCESS}; 95 ResultCode wait_result{RESULT_SUCCESS};
@@ -136,7 +132,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
136 132
137KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : Object{kernel} {} 133KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : Object{kernel} {}
138 134
139KSynchronizationObject ::~KSynchronizationObject() = default; 135KSynchronizationObject::~KSynchronizationObject() = default;
140 136
141void KSynchronizationObject::NotifyAvailable(ResultCode result) { 137void KSynchronizationObject::NotifyAvailable(ResultCode result) {
142 KScopedSchedulerLock lock(kernel); 138 KScopedSchedulerLock lock(kernel);
@@ -148,7 +144,7 @@ void KSynchronizationObject::NotifyAvailable(ResultCode result) {
148 144
149 // Iterate over each thread. 145 // Iterate over each thread.
150 for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { 146 for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
151 Thread* thread = cur_node->thread; 147 KThread* thread = cur_node->thread;
152 if (thread->GetState() == ThreadState::Waiting) { 148 if (thread->GetState() == ThreadState::Waiting) {
153 thread->SetSyncedObject(this, result); 149 thread->SetSyncedObject(this, result);
154 thread->SetState(ThreadState::Runnable); 150 thread->SetState(ThreadState::Runnable);
@@ -156,8 +152,8 @@ void KSynchronizationObject::NotifyAvailable(ResultCode result) {
156 } 152 }
157} 153}
158 154
159std::vector<Thread*> KSynchronizationObject::GetWaitingThreadsForDebugging() const { 155std::vector<KThread*> KSynchronizationObject::GetWaitingThreadsForDebugging() const {
160 std::vector<Thread*> threads; 156 std::vector<KThread*> threads;
161 157
162 // If debugging, dump the list of waiters. 158 // If debugging, dump the list of waiters.
163 { 159 {
diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h
index 14d80ebf1..f65c71c28 100644
--- a/src/core/hle/kernel/k_synchronization_object.h
+++ b/src/core/hle/kernel/k_synchronization_object.h
@@ -13,14 +13,14 @@ namespace Kernel {
13 13
14class KernelCore; 14class KernelCore;
15class Synchronization; 15class Synchronization;
16class Thread; 16class KThread;
17 17
18/// Class that represents a Kernel object that a thread can be waiting on 18/// Class that represents a Kernel object that a thread can be waiting on
19class KSynchronizationObject : public Object { 19class KSynchronizationObject : public Object {
20public: 20public:
21 struct ThreadListNode { 21 struct ThreadListNode {
22 ThreadListNode* next{}; 22 ThreadListNode* next{};
23 Thread* thread{}; 23 KThread* thread{};
24 }; 24 };
25 25
26 [[nodiscard]] static ResultCode Wait(KernelCore& kernel, s32* out_index, 26 [[nodiscard]] static ResultCode Wait(KernelCore& kernel, s32* out_index,
@@ -29,7 +29,7 @@ public:
29 29
30 [[nodiscard]] virtual bool IsSignaled() const = 0; 30 [[nodiscard]] virtual bool IsSignaled() const = 0;
31 31
32 [[nodiscard]] std::vector<Thread*> GetWaitingThreadsForDebugging() const; 32 [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const;
33 33
34protected: 34protected:
35 explicit KSynchronizationObject(KernelCore& kernel); 35 explicit KSynchronizationObject(KernelCore& kernel);
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
new file mode 100644
index 000000000..aa100e139
--- /dev/null
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -0,0 +1,1050 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <cinttypes>
7#include <optional>
8#include <vector>
9
10#include "common/assert.h"
11#include "common/bit_util.h"
12#include "common/common_funcs.h"
13#include "common/common_types.h"
14#include "common/fiber.h"
15#include "common/logging/log.h"
16#include "common/scope_exit.h"
17#include "common/thread_queue_list.h"
18#include "core/core.h"
19#include "core/cpu_manager.h"
20#include "core/hardware_properties.h"
21#include "core/hle/kernel/errors.h"
22#include "core/hle/kernel/handle_table.h"
23#include "core/hle/kernel/k_condition_variable.h"
24#include "core/hle/kernel/k_scheduler.h"
25#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
26#include "core/hle/kernel/k_thread.h"
27#include "core/hle/kernel/k_thread_queue.h"
28#include "core/hle/kernel/kernel.h"
29#include "core/hle/kernel/memory/memory_layout.h"
30#include "core/hle/kernel/object.h"
31#include "core/hle/kernel/process.h"
32#include "core/hle/kernel/resource_limit.h"
33#include "core/hle/kernel/svc_results.h"
34#include "core/hle/kernel/time_manager.h"
35#include "core/hle/result.h"
36#include "core/memory.h"
37
38#ifdef ARCHITECTURE_x86_64
39#include "core/arm/dynarmic/arm_dynarmic_32.h"
40#include "core/arm/dynarmic/arm_dynarmic_64.h"
41#endif
42
43namespace {
44static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
45 u32 entry_point, u32 arg) {
46 context = {};
47 context.cpu_registers[0] = arg;
48 context.cpu_registers[15] = entry_point;
49 context.cpu_registers[13] = stack_top;
50}
51
52static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top,
53 VAddr entry_point, u64 arg) {
54 context = {};
55 context.cpu_registers[0] = arg;
56 context.pc = entry_point;
57 context.sp = stack_top;
58 // TODO(merry): Perform a hardware test to determine the below value.
59 context.fpcr = 0;
60}
61} // namespace
62
63namespace Kernel {
64
65KThread::KThread(KernelCore& kernel)
66 : KSynchronizationObject{kernel}, activity_pause_lock{kernel} {}
67KThread::~KThread() = default;
68
69ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
70 s32 virt_core, Process* owner, ThreadType type) {
71 // Assert parameters are valid.
72 ASSERT((type == ThreadType::Main) ||
73 (Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
74 ASSERT((owner != nullptr) || (type != ThreadType::User));
75 ASSERT(0 <= virt_core && virt_core < static_cast<s32>(Common::BitSize<u64>()));
76
77 // Convert the virtual core to a physical core.
78 const s32 phys_core = Core::Hardware::VirtualToPhysicalCoreMap[virt_core];
79 ASSERT(0 <= phys_core && phys_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
80
81 // First, clear the TLS address.
82 tls_address = {};
83
84 // Next, assert things based on the type.
85 switch (type) {
86 case ThreadType::Main:
87 ASSERT(arg == 0);
88 [[fallthrough]];
89 case ThreadType::HighPriority:
90 [[fallthrough]];
91 case ThreadType::User:
92 ASSERT(((owner == nullptr) ||
93 (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
94 ASSERT(((owner == nullptr) ||
95 (owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask()));
96 break;
97 case ThreadType::Kernel:
98 UNIMPLEMENTED();
99 break;
100 default:
101 UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
102 break;
103 }
104 thread_type_for_debugging = type;
105
106 // Set the ideal core ID and affinity mask.
107 virtual_ideal_core_id = virt_core;
108 physical_ideal_core_id = phys_core;
109 virtual_affinity_mask = 1ULL << virt_core;
110 physical_affinity_mask.SetAffinity(phys_core, true);
111
112 // Set the thread state.
113 thread_state = (type == ThreadType::Main) ? ThreadState::Runnable : ThreadState::Initialized;
114
115 // Set TLS address.
116 tls_address = 0;
117
118 // Set parent and condvar tree.
119 parent = nullptr;
120 condvar_tree = nullptr;
121
122 // Set sync booleans.
123 signaled = false;
124 termination_requested = false;
125 wait_cancelled = false;
126 cancellable = false;
127
128 // Set core ID and wait result.
129 core_id = phys_core;
130 wait_result = Svc::ResultNoSynchronizationObject;
131
132 // Set priorities.
133 priority = prio;
134 base_priority = prio;
135
136 // Set sync object and waiting lock to null.
137 synced_object = nullptr;
138
139 // Initialize sleeping queue.
140 sleeping_queue = nullptr;
141
142 // Set suspend flags.
143 suspend_request_flags = 0;
144 suspend_allowed_flags = static_cast<u32>(ThreadState::SuspendFlagMask);
145
146 // We're neither debug attached, nor are we nesting our priority inheritance.
147 debug_attached = false;
148 priority_inheritance_count = 0;
149
150 // We haven't been scheduled, and we have done no light IPC.
151 schedule_count = -1;
152 last_scheduled_tick = 0;
153 light_ipc_data = nullptr;
154
155 // We're not waiting for a lock, and we haven't disabled migration.
156 lock_owner = nullptr;
157 num_core_migration_disables = 0;
158
159 // We have no waiters, but we do have an entrypoint.
160 num_kernel_waiters = 0;
161
162 // Set our current core id.
163 current_core_id = phys_core;
164
165 // We haven't released our resource limit hint, and we've spent no time on the cpu.
166 resource_limit_release_hint = false;
167 cpu_time = 0;
168
169 // Clear our stack parameters.
170 std::memset(static_cast<void*>(std::addressof(GetStackParameters())), 0,
171 sizeof(StackParameters));
172
173 // Setup the TLS, if needed.
174 if (type == ThreadType::User) {
175 tls_address = owner->CreateTLSRegion();
176 }
177
178 // Set parent, if relevant.
179 if (owner != nullptr) {
180 parent = owner;
181 parent->IncrementThreadCount();
182 }
183
184 // Initialize thread context.
185 ResetThreadContext64(thread_context_64, user_stack_top, func, arg);
186 ResetThreadContext32(thread_context_32, static_cast<u32>(user_stack_top),
187 static_cast<u32>(func), static_cast<u32>(arg));
188
189 // Setup the stack parameters.
190 StackParameters& sp = GetStackParameters();
191 sp.cur_thread = this;
192 sp.disable_count = 1;
193 SetInExceptionHandler();
194
195 // Set thread ID.
196 thread_id = kernel.CreateNewThreadID();
197
198 // We initialized!
199 initialized = true;
200
201 // Register ourselves with our parent process.
202 if (parent != nullptr) {
203 parent->RegisterThread(this);
204 if (parent->IsSuspended()) {
205 RequestSuspend(SuspendType::Process);
206 }
207 }
208
209 return RESULT_SUCCESS;
210}
211
212ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
213 VAddr user_stack_top, s32 prio, s32 core, Process* owner,
214 ThreadType type) {
215 // Initialize the thread.
216 R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
217
218 return RESULT_SUCCESS;
219}
220
221void KThread::Finalize() {
222 // If the thread has an owner process, unregister it.
223 if (parent != nullptr) {
224 parent->UnregisterThread(this);
225 }
226
227 // If the thread has a local region, delete it.
228 if (tls_address != 0) {
229 parent->FreeTLSRegion(tls_address);
230 }
231
232 // Release any waiters.
233 {
234 ASSERT(lock_owner == nullptr);
235 KScopedSchedulerLock sl{kernel};
236
237 auto it = waiter_list.begin();
238 while (it != waiter_list.end()) {
239 // The thread shouldn't be a kernel waiter.
240 it->SetLockOwner(nullptr);
241 it->SetSyncedObject(nullptr, Svc::ResultInvalidState);
242 it->Wakeup();
243 it = waiter_list.erase(it);
244 }
245 }
246
247 // Decrement the parent process's thread count.
248 if (parent != nullptr) {
249 parent->DecrementThreadCount();
250 parent->GetResourceLimit()->Release(ResourceType::Threads, 1);
251 }
252}
253
254bool KThread::IsSignaled() const {
255 return signaled;
256}
257
258void KThread::Wakeup() {
259 KScopedSchedulerLock sl{kernel};
260
261 if (GetState() == ThreadState::Waiting) {
262 if (sleeping_queue != nullptr) {
263 sleeping_queue->WakeupThread(this);
264 } else {
265 SetState(ThreadState::Runnable);
266 }
267 }
268}
269
270void KThread::StartTermination() {
271 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
272
273 // Release user exception and unpin, if relevant.
274 if (parent != nullptr) {
275 parent->ReleaseUserException(this);
276 if (parent->GetPinnedThread(GetCurrentCoreId(kernel)) == this) {
277 parent->UnpinCurrentThread();
278 }
279 }
280
281 // Set state to terminated.
282 SetState(ThreadState::Terminated);
283
284 // Clear the thread's status as running in parent.
285 if (parent != nullptr) {
286 parent->ClearRunningThread(this);
287 }
288
289 // Signal.
290 signaled = true;
291 NotifyAvailable();
292
293 // Clear previous thread in KScheduler.
294 KScheduler::ClearPreviousThread(kernel, this);
295
296 // Register terminated dpc flag.
297 RegisterDpc(DpcFlag::Terminated);
298}
299
300void KThread::Pin() {
301 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
302
303 // Set ourselves as pinned.
304 GetStackParameters().is_pinned = true;
305
306 // Disable core migration.
307 ASSERT(num_core_migration_disables == 0);
308 {
309 ++num_core_migration_disables;
310
311 // Save our ideal state to restore when we're unpinned.
312 original_physical_ideal_core_id = physical_ideal_core_id;
313 original_physical_affinity_mask = physical_affinity_mask;
314
315 // Bind ourselves to this core.
316 const s32 active_core = GetActiveCore();
317 const s32 current_core = GetCurrentCoreId(kernel);
318
319 SetActiveCore(current_core);
320 physical_ideal_core_id = current_core;
321 physical_affinity_mask.SetAffinityMask(1ULL << current_core);
322
323 if (active_core != current_core || physical_affinity_mask.GetAffinityMask() !=
324 original_physical_affinity_mask.GetAffinityMask()) {
325 KScheduler::OnThreadAffinityMaskChanged(kernel, this, original_physical_affinity_mask,
326 active_core);
327 }
328 }
329
330 // Disallow performing thread suspension.
331 {
332 // Update our allow flags.
333 suspend_allowed_flags &= ~(1 << (static_cast<u32>(SuspendType::Thread) +
334 static_cast<u32>(ThreadState::SuspendShift)));
335
336 // Update our state.
337 const ThreadState old_state = thread_state;
338 thread_state = static_cast<ThreadState>(GetSuspendFlags() |
339 static_cast<u32>(old_state & ThreadState::Mask));
340 if (thread_state != old_state) {
341 KScheduler::OnThreadStateChanged(kernel, this, old_state);
342 }
343 }
344
345 // TODO(bunnei): Update our SVC access permissions.
346 ASSERT(parent != nullptr);
347}
348
349void KThread::Unpin() {
350 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
351
352 // Set ourselves as unpinned.
353 GetStackParameters().is_pinned = false;
354
355 // Enable core migration.
356 ASSERT(num_core_migration_disables == 1);
357 {
358 num_core_migration_disables--;
359
360 // Restore our original state.
361 const KAffinityMask old_mask = physical_affinity_mask;
362
363 physical_ideal_core_id = original_physical_ideal_core_id;
364 physical_affinity_mask = original_physical_affinity_mask;
365
366 if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
367 const s32 active_core = GetActiveCore();
368
369 if (!physical_affinity_mask.GetAffinity(active_core)) {
370 if (physical_ideal_core_id >= 0) {
371 SetActiveCore(physical_ideal_core_id);
372 } else {
373 SetActiveCore(static_cast<s32>(
374 Common::BitSize<u64>() - 1 -
375 std::countl_zero(physical_affinity_mask.GetAffinityMask())));
376 }
377 }
378 KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core);
379 }
380 }
381
382 // Allow performing thread suspension (if termination hasn't been requested).
383 {
384 // Update our allow flags.
385 if (!IsTerminationRequested()) {
386 suspend_allowed_flags |= (1 << (static_cast<u32>(SuspendType::Thread) +
387 static_cast<u32>(ThreadState::SuspendShift)));
388 }
389
390 // Update our state.
391 const ThreadState old_state = thread_state;
392 thread_state = static_cast<ThreadState>(GetSuspendFlags() |
393 static_cast<u32>(old_state & ThreadState::Mask));
394 if (thread_state != old_state) {
395 KScheduler::OnThreadStateChanged(kernel, this, old_state);
396 }
397 }
398
399 // TODO(bunnei): Update our SVC access permissions.
400 ASSERT(parent != nullptr);
401
402 // Resume any threads that began waiting on us while we were pinned.
403 for (auto it = pinned_waiter_list.begin(); it != pinned_waiter_list.end(); ++it) {
404 if (it->GetState() == ThreadState::Waiting) {
405 it->SetState(ThreadState::Runnable);
406 }
407 }
408}
409
410ResultCode KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
411 KScopedSchedulerLock sl{kernel};
412
413 // Get the virtual mask.
414 *out_ideal_core = virtual_ideal_core_id;
415 *out_affinity_mask = virtual_affinity_mask;
416
417 return RESULT_SUCCESS;
418}
419
420ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
421 KScopedSchedulerLock sl{kernel};
422 ASSERT(num_core_migration_disables >= 0);
423
424 // Select between core mask and original core mask.
425 if (num_core_migration_disables == 0) {
426 *out_ideal_core = physical_ideal_core_id;
427 *out_affinity_mask = physical_affinity_mask.GetAffinityMask();
428 } else {
429 *out_ideal_core = original_physical_ideal_core_id;
430 *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask();
431 }
432
433 return RESULT_SUCCESS;
434}
435
436ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) {
437 ASSERT(parent != nullptr);
438 ASSERT(v_affinity_mask != 0);
439 KScopedLightLock lk{activity_pause_lock};
440
441 // Set the core mask.
442 u64 p_affinity_mask = 0;
443 {
444 KScopedSchedulerLock sl{kernel};
445 ASSERT(num_core_migration_disables >= 0);
446
447 // If the core id is no-update magic, preserve the ideal core id.
448 if (core_id == Svc::IdealCoreNoUpdate) {
449 core_id = virtual_ideal_core_id;
450 R_UNLESS(((1ULL << core_id) & v_affinity_mask) != 0, Svc::ResultInvalidCombination);
451 }
452
453 // Set the virtual core/affinity mask.
454 virtual_ideal_core_id = core_id;
455 virtual_affinity_mask = v_affinity_mask;
456
457 // Translate the virtual core to a physical core.
458 if (core_id >= 0) {
459 core_id = Core::Hardware::VirtualToPhysicalCoreMap[core_id];
460 }
461
462 // Translate the virtual affinity mask to a physical one.
463 while (v_affinity_mask != 0) {
464 const u64 next = std::countr_zero(v_affinity_mask);
465 v_affinity_mask &= ~(1ULL << next);
466 p_affinity_mask |= (1ULL << Core::Hardware::VirtualToPhysicalCoreMap[next]);
467 }
468
469 // If we haven't disabled migration, perform an affinity change.
470 if (num_core_migration_disables == 0) {
471 const KAffinityMask old_mask = physical_affinity_mask;
472
473 // Set our new ideals.
474 physical_ideal_core_id = core_id;
475 physical_affinity_mask.SetAffinityMask(p_affinity_mask);
476
477 if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
478 const s32 active_core = GetActiveCore();
479
480 if (active_core >= 0 && !physical_affinity_mask.GetAffinity(active_core)) {
481 const s32 new_core = static_cast<s32>(
482 physical_ideal_core_id >= 0
483 ? physical_ideal_core_id
484 : Common::BitSize<u64>() - 1 -
485 std::countl_zero(physical_affinity_mask.GetAffinityMask()));
486 SetActiveCore(new_core);
487 }
488 KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core);
489 }
490 } else {
491 // Otherwise, we edit the original affinity for restoration later.
492 original_physical_ideal_core_id = core_id;
493 original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
494 }
495 }
496
497 // Update the pinned waiter list.
498 {
499 bool retry_update{};
500 bool thread_is_pinned{};
501 do {
502 // Lock the scheduler.
503 KScopedSchedulerLock sl{kernel};
504
505 // Don't do any further management if our termination has been requested.
506 R_SUCCEED_IF(IsTerminationRequested());
507
508 // By default, we won't need to retry.
509 retry_update = false;
510
511 // Check if the thread is currently running.
512 bool thread_is_current{};
513 s32 thread_core;
514 for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES);
515 ++thread_core) {
516 if (kernel.Scheduler(thread_core).GetCurrentThread() == this) {
517 thread_is_current = true;
518 break;
519 }
520 }
521
522 // If the thread is currently running, check whether it's no longer allowed under the
523 // new mask.
524 if (thread_is_current && ((1ULL << thread_core) & p_affinity_mask) == 0) {
525 // If the thread is pinned, we want to wait until it's not pinned.
526 if (GetStackParameters().is_pinned) {
527 // Verify that the current thread isn't terminating.
528 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
529 Svc::ResultTerminationRequested);
530
531 // Note that the thread was pinned.
532 thread_is_pinned = true;
533
534 // Wait until the thread isn't pinned any more.
535 pinned_waiter_list.push_back(GetCurrentThread(kernel));
536 GetCurrentThread(kernel).SetState(ThreadState::Waiting);
537 } else {
538 // If the thread isn't pinned, release the scheduler lock and retry until it's
539 // not current.
540 retry_update = true;
541 }
542 }
543 } while (retry_update);
544
545 // If the thread was pinned, it no longer is, and we should remove the current thread from
546 // our waiter list.
547 if (thread_is_pinned) {
548 // Lock the scheduler.
549 KScopedSchedulerLock sl{kernel};
550
551 // Remove from the list.
552 pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
553 }
554 }
555
556 return RESULT_SUCCESS;
557}
558
559void KThread::SetBasePriority(s32 value) {
560 ASSERT(Svc::HighestThreadPriority <= value && value <= Svc::LowestThreadPriority);
561
562 KScopedSchedulerLock sl{kernel};
563
564 // Change our base priority.
565 base_priority = value;
566
567 // Perform a priority restoration.
568 RestorePriority(kernel, this);
569}
570
571void KThread::RequestSuspend(SuspendType type) {
572 KScopedSchedulerLock sl{kernel};
573
574 // Note the request in our flags.
575 suspend_request_flags |=
576 (1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)));
577
578 // Try to perform the suspend.
579 TrySuspend();
580}
581
582void KThread::Resume(SuspendType type) {
583 KScopedSchedulerLock sl{kernel};
584
585 // Clear the request in our flags.
586 suspend_request_flags &=
587 ~(1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)));
588
589 // Update our state.
590 const ThreadState old_state = thread_state;
591 thread_state = static_cast<ThreadState>(GetSuspendFlags() |
592 static_cast<u32>(old_state & ThreadState::Mask));
593 if (thread_state != old_state) {
594 KScheduler::OnThreadStateChanged(kernel, this, old_state);
595 }
596}
597
598void KThread::WaitCancel() {
599 KScopedSchedulerLock sl{kernel};
600
601 // Check if we're waiting and cancellable.
602 if (GetState() == ThreadState::Waiting && cancellable) {
603 if (sleeping_queue != nullptr) {
604 sleeping_queue->WakeupThread(this);
605 wait_cancelled = true;
606 } else {
607 SetSyncedObject(nullptr, Svc::ResultCancelled);
608 SetState(ThreadState::Runnable);
609 wait_cancelled = false;
610 }
611 } else {
612 // Otherwise, note that we cancelled a wait.
613 wait_cancelled = true;
614 }
615}
616
617void KThread::TrySuspend() {
618 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
619 ASSERT(IsSuspendRequested());
620
621 // Ensure that we have no waiters.
622 if (GetNumKernelWaiters() > 0) {
623 return;
624 }
625 ASSERT(GetNumKernelWaiters() == 0);
626
627 // Perform the suspend.
628 Suspend();
629}
630
631void KThread::Suspend() {
632 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
633 ASSERT(IsSuspendRequested());
634
635 // Set our suspend flags in state.
636 const auto old_state = thread_state;
637 thread_state = static_cast<ThreadState>(GetSuspendFlags()) | (old_state & ThreadState::Mask);
638
639 // Note the state change in scheduler.
640 KScheduler::OnThreadStateChanged(kernel, this, old_state);
641}
642
643void KThread::Continue() {
644 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
645
646 // Clear our suspend flags in state.
647 const auto old_state = thread_state;
648 thread_state = old_state & ThreadState::Mask;
649
650 // Note the state change in scheduler.
651 KScheduler::OnThreadStateChanged(kernel, this, old_state);
652}
653
654ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
655 // Lock ourselves.
656 KScopedLightLock lk(activity_pause_lock);
657
658 // Set the activity.
659 {
660 // Lock the scheduler.
661 KScopedSchedulerLock sl{kernel};
662
663 // Verify our state.
664 const auto cur_state = GetState();
665 R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable),
666 Svc::ResultInvalidState);
667
668 // Either pause or resume.
669 if (activity == Svc::ThreadActivity::Paused) {
670 // Verify that we're not suspended.
671 R_UNLESS(!IsSuspendRequested(SuspendType::Thread), Svc::ResultInvalidState);
672
673 // Suspend.
674 RequestSuspend(SuspendType::Thread);
675 } else {
676 ASSERT(activity == Svc::ThreadActivity::Runnable);
677
678 // Verify that we're suspended.
679 R_UNLESS(IsSuspendRequested(SuspendType::Thread), Svc::ResultInvalidState);
680
681 // Resume.
682 Resume(SuspendType::Thread);
683 }
684 }
685
686 // If the thread is now paused, update the pinned waiter list.
687 if (activity == Svc::ThreadActivity::Paused) {
688 bool thread_is_pinned{};
689 bool thread_is_current{};
690 do {
691 // Lock the scheduler.
692 KScopedSchedulerLock sl{kernel};
693
694 // Don't do any further management if our termination has been requested.
695 R_SUCCEED_IF(IsTerminationRequested());
696
697 // Check whether the thread is pinned.
698 if (GetStackParameters().is_pinned) {
699 // Verify that the current thread isn't terminating.
700 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
701 Svc::ResultTerminationRequested);
702
703 // Note that the thread was pinned and not current.
704 thread_is_pinned = true;
705 thread_is_current = false;
706
707 // Wait until the thread isn't pinned any more.
708 pinned_waiter_list.push_back(GetCurrentThread(kernel));
709 GetCurrentThread(kernel).SetState(ThreadState::Waiting);
710 } else {
711 // Check if the thread is currently running.
712 // If it is, we'll need to retry.
713 thread_is_current = false;
714
715 for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
716 if (kernel.Scheduler(i).GetCurrentThread() == this) {
717 thread_is_current = true;
718 break;
719 }
720 }
721 }
722 } while (thread_is_current);
723
724 // If the thread was pinned, it no longer is, and we should remove the current thread from
725 // our waiter list.
726 if (thread_is_pinned) {
727 // Lock the scheduler.
728 KScopedSchedulerLock sl{kernel};
729
730 // Remove from the list.
731 pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
732 }
733 }
734
735 return RESULT_SUCCESS;
736}
737
738ResultCode KThread::GetThreadContext3(std::vector<u8>& out) {
739 // Lock ourselves.
740 KScopedLightLock lk{activity_pause_lock};
741
742 // Get the context.
743 {
744 // Lock the scheduler.
745 KScopedSchedulerLock sl{kernel};
746
747 // Verify that we're suspended.
748 R_UNLESS(IsSuspendRequested(SuspendType::Thread), Svc::ResultInvalidState);
749
750 // If we're not terminating, get the thread's user context.
751 if (!IsTerminationRequested()) {
752 if (parent->Is64BitProcess()) {
753 // Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
754 auto context = GetContext64();
755 context.pstate &= 0xFF0FFE20;
756
757 out.resize(sizeof(context));
758 std::memcpy(out.data(), &context, sizeof(context));
759 } else {
760 // Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
761 auto context = GetContext32();
762 context.cpsr &= 0xFF0FFE20;
763
764 out.resize(sizeof(context));
765 std::memcpy(out.data(), &context, sizeof(context));
766 }
767 }
768 }
769
770 return RESULT_SUCCESS;
771}
772
773void KThread::AddWaiterImpl(KThread* thread) {
774 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
775
776 // Find the right spot to insert the waiter.
777 auto it = waiter_list.begin();
778 while (it != waiter_list.end()) {
779 if (it->GetPriority() > thread->GetPriority()) {
780 break;
781 }
782 it++;
783 }
784
785 // Keep track of how many kernel waiters we have.
786 if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
787 ASSERT((num_kernel_waiters++) >= 0);
788 }
789
790 // Insert the waiter.
791 waiter_list.insert(it, *thread);
792 thread->SetLockOwner(this);
793}
794
795void KThread::RemoveWaiterImpl(KThread* thread) {
796 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
797
798 // Keep track of how many kernel waiters we have.
799 if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
800 ASSERT((num_kernel_waiters--) > 0);
801 }
802
803 // Remove the waiter.
804 waiter_list.erase(waiter_list.iterator_to(*thread));
805 thread->SetLockOwner(nullptr);
806}
807
808void KThread::RestorePriority(KernelCore& kernel, KThread* thread) {
809 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
810
811 while (true) {
812 // We want to inherit priority where possible.
813 s32 new_priority = thread->GetBasePriority();
814 if (thread->HasWaiters()) {
815 new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority());
816 }
817
818 // If the priority we would inherit is not different from ours, don't do anything.
819 if (new_priority == thread->GetPriority()) {
820 return;
821 }
822
823 // Ensure we don't violate condition variable red black tree invariants.
824 if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
825 BeforeUpdatePriority(kernel, cv_tree, thread);
826 }
827
828 // Change the priority.
829 const s32 old_priority = thread->GetPriority();
830 thread->SetPriority(new_priority);
831
832 // Restore the condition variable, if relevant.
833 if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
834 AfterUpdatePriority(kernel, cv_tree, thread);
835 }
836
837 // Update the scheduler.
838 KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority);
839
840 // Keep the lock owner up to date.
841 KThread* lock_owner = thread->GetLockOwner();
842 if (lock_owner == nullptr) {
843 return;
844 }
845
846 // Update the thread in the lock owner's sorted list, and continue inheriting.
847 lock_owner->RemoveWaiterImpl(thread);
848 lock_owner->AddWaiterImpl(thread);
849 thread = lock_owner;
850 }
851}
852
853void KThread::AddWaiter(KThread* thread) {
854 AddWaiterImpl(thread);
855 RestorePriority(kernel, this);
856}
857
858void KThread::RemoveWaiter(KThread* thread) {
859 RemoveWaiterImpl(thread);
860 RestorePriority(kernel, this);
861}
862
863KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
864 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
865
866 s32 num_waiters{};
867 KThread* next_lock_owner{};
868 auto it = waiter_list.begin();
869 while (it != waiter_list.end()) {
870 if (it->GetAddressKey() == key) {
871 KThread* thread = std::addressof(*it);
872
873 // Keep track of how many kernel waiters we have.
874 if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
875 ASSERT((num_kernel_waiters--) > 0);
876 }
877 it = waiter_list.erase(it);
878
879 // Update the next lock owner.
880 if (next_lock_owner == nullptr) {
881 next_lock_owner = thread;
882 next_lock_owner->SetLockOwner(nullptr);
883 } else {
884 next_lock_owner->AddWaiterImpl(thread);
885 }
886 num_waiters++;
887 } else {
888 it++;
889 }
890 }
891
892 // Do priority updates, if we have a next owner.
893 if (next_lock_owner) {
894 RestorePriority(kernel, this);
895 RestorePriority(kernel, next_lock_owner);
896 }
897
898 // Return output.
899 *out_num_waiters = num_waiters;
900 return next_lock_owner;
901}
902
903ResultCode KThread::Run() {
904 while (true) {
905 KScopedSchedulerLock lk{kernel};
906
907 // If either this thread or the current thread are requesting termination, note it.
908 R_UNLESS(!IsTerminationRequested(), Svc::ResultTerminationRequested);
909 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
910 Svc::ResultTerminationRequested);
911
912 // Ensure our thread state is correct.
913 R_UNLESS(GetState() == ThreadState::Initialized, Svc::ResultInvalidState);
914
915 // If the current thread has been asked to suspend, suspend it and retry.
916 if (GetCurrentThread(kernel).IsSuspended()) {
917 GetCurrentThread(kernel).Suspend();
918 continue;
919 }
920
921 // If we're not a kernel thread and we've been asked to suspend, suspend ourselves.
922 if (IsUserThread() && IsSuspended()) {
923 Suspend();
924 }
925
926 // Set our state and finish.
927 SetState(ThreadState::Runnable);
928 return RESULT_SUCCESS;
929 }
930}
931
932void KThread::Exit() {
933 ASSERT(this == GetCurrentThreadPointer(kernel));
934
935 // Release the thread resource hint from parent.
936 if (parent != nullptr) {
937 // TODO(bunnei): Hint that the resource is about to be released.
938 resource_limit_release_hint = true;
939 }
940
941 // Perform termination.
942 {
943 KScopedSchedulerLock sl{kernel};
944
945 // Disallow all suspension.
946 suspend_allowed_flags = 0;
947
948 // Start termination.
949 StartTermination();
950 }
951}
952
953ResultCode KThread::Sleep(s64 timeout) {
954 ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
955 ASSERT(this == GetCurrentThreadPointer(kernel));
956 ASSERT(timeout > 0);
957
958 {
959 // Setup the scheduling lock and sleep.
960 KScopedSchedulerLockAndSleep slp{kernel, this, timeout};
961
962 // Check if the thread should terminate.
963 if (IsTerminationRequested()) {
964 slp.CancelSleep();
965 return Svc::ResultTerminationRequested;
966 }
967
968 // Mark the thread as waiting.
969 SetState(ThreadState::Waiting);
970 SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
971 }
972
973 // The lock/sleep is done.
974
975 // Cancel the timer.
976 kernel.TimeManager().UnscheduleTimeEvent(this);
977
978 return RESULT_SUCCESS;
979}
980
981void KThread::SetState(ThreadState state) {
982 KScopedSchedulerLock sl{kernel};
983
984 // Clear debugging state
985 SetMutexWaitAddressForDebugging({});
986 SetWaitReasonForDebugging({});
987
988 const ThreadState old_state = thread_state;
989 thread_state =
990 static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask));
991 if (thread_state != old_state) {
992 KScheduler::OnThreadStateChanged(kernel, this, old_state);
993 }
994}
995
996std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
997 return host_context;
998}
999
1000ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, ThreadType type_flags,
1001 std::string name, VAddr entry_point,
1002 u32 priority, u64 arg, s32 processor_id,
1003 VAddr stack_top, Process* owner_process) {
1004 std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
1005 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
1006 return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top,
1007 owner_process, std::move(init_func), init_func_parameter);
1008}
1009
1010ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, ThreadType type_flags,
1011 std::string name, VAddr entry_point,
1012 u32 priority, u64 arg, s32 processor_id,
1013 VAddr stack_top, Process* owner_process,
1014 std::function<void(void*)>&& thread_start_func,
1015 void* thread_start_parameter) {
1016 auto& kernel = system.Kernel();
1017
1018 std::shared_ptr<KThread> thread = std::make_shared<KThread>(kernel);
1019
1020 if (const auto result =
1021 thread->InitializeThread(thread.get(), entry_point, arg, stack_top, priority,
1022 processor_id, owner_process, type_flags);
1023 result.IsError()) {
1024 return result;
1025 }
1026
1027 thread->name = name;
1028
1029 auto& scheduler = kernel.GlobalSchedulerContext();
1030 scheduler.AddThread(thread);
1031
1032 thread->host_context =
1033 std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
1034
1035 return MakeResult<std::shared_ptr<KThread>>(std::move(thread));
1036}
1037
1038KThread* GetCurrentThreadPointer(KernelCore& kernel) {
1039 return kernel.GetCurrentEmuThread();
1040}
1041
1042KThread& GetCurrentThread(KernelCore& kernel) {
1043 return *GetCurrentThreadPointer(kernel);
1044}
1045
1046s32 GetCurrentCoreId(KernelCore& kernel) {
1047 return GetCurrentThread(kernel).GetCurrentCore();
1048}
1049
1050} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
new file mode 100644
index 000000000..c8ac656a4
--- /dev/null
+++ b/src/core/hle/kernel/k_thread.h
@@ -0,0 +1,768 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <span>
9#include <string>
10#include <utility>
11#include <vector>
12
13#include <boost/intrusive/list.hpp>
14
15#include "common/common_types.h"
16#include "common/intrusive_red_black_tree.h"
17#include "common/spin_lock.h"
18#include "core/arm/arm_interface.h"
19#include "core/hle/kernel/k_affinity_mask.h"
20#include "core/hle/kernel/k_light_lock.h"
21#include "core/hle/kernel/k_synchronization_object.h"
22#include "core/hle/kernel/object.h"
23#include "core/hle/kernel/svc_common.h"
24#include "core/hle/kernel/svc_types.h"
25#include "core/hle/result.h"
26
27namespace Common {
28class Fiber;
29}
30
31namespace Core {
32class ARM_Interface;
33class System;
34} // namespace Core
35
36namespace Kernel {
37
38class GlobalSchedulerContext;
39class KernelCore;
40class Process;
41class KScheduler;
42class KThreadQueue;
43
44using KThreadFunction = VAddr;
45
46enum class ThreadType : u32 {
47 Main = 0,
48 Kernel = 1,
49 HighPriority = 2,
50 User = 3,
51};
52DECLARE_ENUM_FLAG_OPERATORS(ThreadType);
53
54enum class SuspendType : u32 {
55 Process = 0,
56 Thread = 1,
57 Debug = 2,
58 Backtrace = 3,
59 Init = 4,
60
61 Count,
62};
63
64enum class ThreadState : u16 {
65 Initialized = 0,
66 Waiting = 1,
67 Runnable = 2,
68 Terminated = 3,
69
70 SuspendShift = 4,
71 Mask = (1 << SuspendShift) - 1,
72
73 ProcessSuspended = (1 << (0 + SuspendShift)),
74 ThreadSuspended = (1 << (1 + SuspendShift)),
75 DebugSuspended = (1 << (2 + SuspendShift)),
76 BacktraceSuspended = (1 << (3 + SuspendShift)),
77 InitSuspended = (1 << (4 + SuspendShift)),
78
79 SuspendFlagMask = ((1 << 5) - 1) << SuspendShift,
80};
81DECLARE_ENUM_FLAG_OPERATORS(ThreadState);
82
83enum class DpcFlag : u32 {
84 Terminating = (1 << 0),
85 Terminated = (1 << 1),
86};
87
88enum class ThreadWaitReasonForDebugging : u32 {
89 None, ///< Thread is not waiting
90 Sleep, ///< Thread is waiting due to a SleepThread SVC
91 IPC, ///< Thread is waiting for the reply from an IPC request
92 Synchronization, ///< Thread is waiting due to a WaitSynchronization SVC
93 ConditionVar, ///< Thread is waiting due to a WaitProcessWideKey SVC
94 Arbitration, ///< Thread is waiting due to a SignalToAddress/WaitForAddress SVC
95 Suspended, ///< Thread is waiting due to process suspension
96};
97
98[[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel);
99[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel);
100[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel);
101
102class KThread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> {
103 friend class KScheduler;
104 friend class Process;
105
106public:
107 static constexpr s32 DefaultThreadPriority = 44;
108 static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1;
109
110 explicit KThread(KernelCore& kernel);
111 ~KThread() override;
112
113public:
114 using ThreadContext32 = Core::ARM_Interface::ThreadContext32;
115 using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
116 using WaiterList = boost::intrusive::list<KThread>;
117
118 /**
119 * Creates and returns a new thread. The new thread is immediately scheduled
120 * @param system The instance of the whole system
121 * @param name The friendly name desired for the thread
122 * @param entry_point The address at which the thread should start execution
123 * @param priority The thread's priority
124 * @param arg User data to pass to the thread
125 * @param processor_id The ID(s) of the processors on which the thread is desired to be run
126 * @param stack_top The address of the thread's stack top
127 * @param owner_process The parent process for the thread, if null, it's a kernel thread
128 * @return A shared pointer to the newly created thread
129 */
130 [[nodiscard]] static ResultVal<std::shared_ptr<KThread>> Create(
131 Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
132 u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process);
133
134 /**
135 * Creates and returns a new thread. The new thread is immediately scheduled
136 * @param system The instance of the whole system
137 * @param name The friendly name desired for the thread
138 * @param entry_point The address at which the thread should start execution
139 * @param priority The thread's priority
140 * @param arg User data to pass to the thread
141 * @param processor_id The ID(s) of the processors on which the thread is desired to be run
142 * @param stack_top The address of the thread's stack top
143 * @param owner_process The parent process for the thread, if null, it's a kernel thread
144 * @param thread_start_func The function where the host context will start.
145 * @param thread_start_parameter The parameter which will passed to host context on init
146 * @return A shared pointer to the newly created thread
147 */
148 [[nodiscard]] static ResultVal<std::shared_ptr<KThread>> Create(
149 Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
150 u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process,
151 std::function<void(void*)>&& thread_start_func, void* thread_start_parameter);
152
153 [[nodiscard]] std::string GetName() const override {
154 return name;
155 }
156
157 void SetName(std::string new_name) {
158 name = std::move(new_name);
159 }
160
161 [[nodiscard]] std::string GetTypeName() const override {
162 return "Thread";
163 }
164
165 static constexpr HandleType HANDLE_TYPE = HandleType::Thread;
166 [[nodiscard]] HandleType GetHandleType() const override {
167 return HANDLE_TYPE;
168 }
169
170 /**
171 * Gets the thread's current priority
172 * @return The current thread's priority
173 */
174 [[nodiscard]] s32 GetPriority() const {
175 return priority;
176 }
177
178 /**
179 * Sets the thread's current priority.
180 * @param priority The new priority.
181 */
182 void SetPriority(s32 value) {
183 priority = value;
184 }
185
186 /**
187 * Gets the thread's nominal priority.
188 * @return The current thread's nominal priority.
189 */
190 [[nodiscard]] s32 GetBasePriority() const {
191 return base_priority;
192 }
193
194 /**
195 * Gets the thread's thread ID
196 * @return The thread's ID
197 */
198 [[nodiscard]] u64 GetThreadID() const {
199 return thread_id;
200 }
201
202 void ContinueIfHasKernelWaiters() {
203 if (GetNumKernelWaiters() > 0) {
204 Continue();
205 }
206 }
207
208 void Wakeup();
209
210 void SetBasePriority(s32 value);
211
212 [[nodiscard]] ResultCode Run();
213
214 void Exit();
215
216 [[nodiscard]] u32 GetSuspendFlags() const {
217 return suspend_allowed_flags & suspend_request_flags;
218 }
219
220 [[nodiscard]] bool IsSuspended() const {
221 return GetSuspendFlags() != 0;
222 }
223
224 [[nodiscard]] bool IsSuspendRequested(SuspendType type) const {
225 return (suspend_request_flags &
226 (1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)))) !=
227 0;
228 }
229
230 [[nodiscard]] bool IsSuspendRequested() const {
231 return suspend_request_flags != 0;
232 }
233
234 void RequestSuspend(SuspendType type);
235
236 void Resume(SuspendType type);
237
238 void TrySuspend();
239
240 void Continue();
241
242 void Suspend();
243
244 void Finalize() override;
245
246 bool IsSignaled() const override;
247
248 void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) {
249 synced_object = obj;
250 wait_result = wait_res;
251 }
252
253 [[nodiscard]] ResultCode GetWaitResult(KSynchronizationObject** out) const {
254 *out = synced_object;
255 return wait_result;
256 }
257
258 /*
259 * Returns the Thread Local Storage address of the current thread
260 * @returns VAddr of the thread's TLS
261 */
262 [[nodiscard]] VAddr GetTLSAddress() const {
263 return tls_address;
264 }
265
266 /*
267 * Returns the value of the TPIDR_EL0 Read/Write system register for this thread.
268 * @returns The value of the TPIDR_EL0 register.
269 */
270 [[nodiscard]] u64 GetTPIDR_EL0() const {
271 return thread_context_64.tpidr;
272 }
273
274 /// Sets the value of the TPIDR_EL0 Read/Write system register for this thread.
275 void SetTPIDR_EL0(u64 value) {
276 thread_context_64.tpidr = value;
277 thread_context_32.tpidr = static_cast<u32>(value);
278 }
279
280 [[nodiscard]] ThreadContext32& GetContext32() {
281 return thread_context_32;
282 }
283
284 [[nodiscard]] const ThreadContext32& GetContext32() const {
285 return thread_context_32;
286 }
287
288 [[nodiscard]] ThreadContext64& GetContext64() {
289 return thread_context_64;
290 }
291
292 [[nodiscard]] const ThreadContext64& GetContext64() const {
293 return thread_context_64;
294 }
295
296 [[nodiscard]] std::shared_ptr<Common::Fiber>& GetHostContext();
297
298 [[nodiscard]] ThreadState GetState() const {
299 return thread_state & ThreadState::Mask;
300 }
301
302 [[nodiscard]] ThreadState GetRawState() const {
303 return thread_state;
304 }
305
306 void SetState(ThreadState state);
307
308 [[nodiscard]] s64 GetLastScheduledTick() const {
309 return last_scheduled_tick;
310 }
311
312 void SetLastScheduledTick(s64 tick) {
313 last_scheduled_tick = tick;
314 }
315
316 void AddCpuTime([[maybe_unused]] s32 core_id_, s64 amount) {
317 cpu_time += amount;
318 // TODO(bunnei): Debug kernels track per-core tick counts. Should we?
319 }
320
321 [[nodiscard]] s64 GetCpuTime() const {
322 return cpu_time;
323 }
324
325 [[nodiscard]] s32 GetActiveCore() const {
326 return core_id;
327 }
328
329 void SetActiveCore(s32 core) {
330 core_id = core;
331 }
332
333 [[nodiscard]] s32 GetCurrentCore() const {
334 return current_core_id;
335 }
336
337 void SetCurrentCore(s32 core) {
338 current_core_id = core;
339 }
340
341 [[nodiscard]] Process* GetOwnerProcess() {
342 return parent;
343 }
344
345 [[nodiscard]] const Process* GetOwnerProcess() const {
346 return parent;
347 }
348
349 [[nodiscard]] bool IsUserThread() const {
350 return parent != nullptr;
351 }
352
353 [[nodiscard]] KThread* GetLockOwner() const {
354 return lock_owner;
355 }
356
357 void SetLockOwner(KThread* owner) {
358 lock_owner = owner;
359 }
360
361 [[nodiscard]] const KAffinityMask& GetAffinityMask() const {
362 return physical_affinity_mask;
363 }
364
365 [[nodiscard]] ResultCode GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
366
367 [[nodiscard]] ResultCode GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
368
369 [[nodiscard]] ResultCode SetCoreMask(s32 core_id, u64 v_affinity_mask);
370
371 [[nodiscard]] ResultCode SetActivity(Svc::ThreadActivity activity);
372
373 [[nodiscard]] ResultCode Sleep(s64 timeout);
374
375 [[nodiscard]] s64 GetYieldScheduleCount() const {
376 return schedule_count;
377 }
378
379 void SetYieldScheduleCount(s64 count) {
380 schedule_count = count;
381 }
382
383 void WaitCancel();
384
385 [[nodiscard]] bool IsWaitCancelled() const {
386 return wait_cancelled;
387 }
388
389 [[nodiscard]] void ClearWaitCancelled() {
390 wait_cancelled = false;
391 }
392
393 [[nodiscard]] bool IsCancellable() const {
394 return cancellable;
395 }
396
397 void SetCancellable() {
398 cancellable = true;
399 }
400
401 void ClearCancellable() {
402 cancellable = false;
403 }
404
405 [[nodiscard]] bool IsTerminationRequested() const {
406 return termination_requested || GetRawState() == ThreadState::Terminated;
407 }
408
409 struct StackParameters {
410 u8 svc_permission[0x10];
411 std::atomic<u8> dpc_flags;
412 u8 current_svc_id;
413 bool is_calling_svc;
414 bool is_in_exception_handler;
415 bool is_pinned;
416 s32 disable_count;
417 KThread* cur_thread;
418 };
419
420 [[nodiscard]] StackParameters& GetStackParameters() {
421 return stack_parameters;
422 }
423
424 [[nodiscard]] const StackParameters& GetStackParameters() const {
425 return stack_parameters;
426 }
427
428 class QueueEntry {
429 public:
430 constexpr QueueEntry() = default;
431
432 constexpr void Initialize() {
433 prev = nullptr;
434 next = nullptr;
435 }
436
437 constexpr KThread* GetPrev() const {
438 return prev;
439 }
440 constexpr KThread* GetNext() const {
441 return next;
442 }
443 constexpr void SetPrev(KThread* thread) {
444 prev = thread;
445 }
446 constexpr void SetNext(KThread* thread) {
447 next = thread;
448 }
449
450 private:
451 KThread* prev{};
452 KThread* next{};
453 };
454
455 [[nodiscard]] QueueEntry& GetPriorityQueueEntry(s32 core) {
456 return per_core_priority_queue_entry[core];
457 }
458
459 [[nodiscard]] const QueueEntry& GetPriorityQueueEntry(s32 core) const {
460 return per_core_priority_queue_entry[core];
461 }
462
463 void SetSleepingQueue(KThreadQueue* q) {
464 sleeping_queue = q;
465 }
466
467 [[nodiscard]] s32 GetDisableDispatchCount() const {
468 return this->GetStackParameters().disable_count;
469 }
470
471 void DisableDispatch() {
472 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
473 this->GetStackParameters().disable_count++;
474 }
475
476 void EnableDispatch() {
477 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
478 this->GetStackParameters().disable_count--;
479 }
480
481 void Pin();
482
483 void Unpin();
484
485 void SetInExceptionHandler() {
486 this->GetStackParameters().is_in_exception_handler = true;
487 }
488
489 void ClearInExceptionHandler() {
490 this->GetStackParameters().is_in_exception_handler = false;
491 }
492
493 [[nodiscard]] bool IsInExceptionHandler() const {
494 return this->GetStackParameters().is_in_exception_handler;
495 }
496
497 void SetIsCallingSvc() {
498 this->GetStackParameters().is_calling_svc = true;
499 }
500
501 void ClearIsCallingSvc() {
502 this->GetStackParameters().is_calling_svc = false;
503 }
504
505 [[nodiscard]] bool IsCallingSvc() const {
506 return this->GetStackParameters().is_calling_svc;
507 }
508
509 [[nodiscard]] u8 GetSvcId() const {
510 return this->GetStackParameters().current_svc_id;
511 }
512
513 void RegisterDpc(DpcFlag flag) {
514 this->GetStackParameters().dpc_flags |= static_cast<u8>(flag);
515 }
516
517 void ClearDpc(DpcFlag flag) {
518 this->GetStackParameters().dpc_flags &= ~static_cast<u8>(flag);
519 }
520
521 [[nodiscard]] u8 GetDpc() const {
522 return this->GetStackParameters().dpc_flags;
523 }
524
525 [[nodiscard]] bool HasDpc() const {
526 return this->GetDpc() != 0;
527 }
528
529 void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) {
530 wait_reason_for_debugging = reason;
531 }
532
533 [[nodiscard]] ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const {
534 return wait_reason_for_debugging;
535 }
536
537 [[nodiscard]] ThreadType GetThreadTypeForDebugging() const {
538 return thread_type_for_debugging;
539 }
540
541 void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) {
542 wait_objects_for_debugging.clear();
543 wait_objects_for_debugging.reserve(objects.size());
544 for (const auto& object : objects) {
545 wait_objects_for_debugging.emplace_back(object);
546 }
547 }
548
549 [[nodiscard]] const std::vector<KSynchronizationObject*>& GetWaitObjectsForDebugging() const {
550 return wait_objects_for_debugging;
551 }
552
553 void SetMutexWaitAddressForDebugging(VAddr address) {
554 mutex_wait_address_for_debugging = address;
555 }
556
557 [[nodiscard]] VAddr GetMutexWaitAddressForDebugging() const {
558 return mutex_wait_address_for_debugging;
559 }
560
561 [[nodiscard]] s32 GetIdealCoreForDebugging() const {
562 return virtual_ideal_core_id;
563 }
564
565 void AddWaiter(KThread* thread);
566
567 void RemoveWaiter(KThread* thread);
568
569 [[nodiscard]] ResultCode GetThreadContext3(std::vector<u8>& out);
570
571 [[nodiscard]] KThread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
572
573 [[nodiscard]] VAddr GetAddressKey() const {
574 return address_key;
575 }
576
577 [[nodiscard]] u32 GetAddressKeyValue() const {
578 return address_key_value;
579 }
580
581 void SetAddressKey(VAddr key) {
582 address_key = key;
583 }
584
585 void SetAddressKey(VAddr key, u32 val) {
586 address_key = key;
587 address_key_value = val;
588 }
589
590 [[nodiscard]] bool HasWaiters() const {
591 return !waiter_list.empty();
592 }
593
594 [[nodiscard]] s32 GetNumKernelWaiters() const {
595 return num_kernel_waiters;
596 }
597
598 [[nodiscard]] u64 GetConditionVariableKey() const {
599 return condvar_key;
600 }
601
602 [[nodiscard]] u64 GetAddressArbiterKey() const {
603 return condvar_key;
604 }
605
606private:
607 static constexpr size_t PriorityInheritanceCountMax = 10;
608 union SyncObjectBuffer {
609 std::array<KSynchronizationObject*, Svc::ArgumentHandleCountMax> sync_objects{};
610 std::array<Handle,
611 Svc::ArgumentHandleCountMax*(sizeof(KSynchronizationObject*) / sizeof(Handle))>
612 handles;
613 constexpr SyncObjectBuffer() {}
614 };
615 static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles));
616
617 struct ConditionVariableComparator {
618 struct LightCompareType {
619 u64 cv_key{};
620 s32 priority{};
621
622 [[nodiscard]] constexpr u64 GetConditionVariableKey() const {
623 return cv_key;
624 }
625
626 [[nodiscard]] constexpr s32 GetPriority() const {
627 return priority;
628 }
629 };
630
631 template <typename T>
632 requires(
633 std::same_as<T, KThread> ||
634 std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs,
635 const KThread& rhs) {
636 const u64 l_key = lhs.GetConditionVariableKey();
637 const u64 r_key = rhs.GetConditionVariableKey();
638
639 if (l_key < r_key) {
640 // Sort first by key
641 return -1;
642 } else if (l_key == r_key && lhs.GetPriority() < rhs.GetPriority()) {
643 // And then by priority.
644 return -1;
645 } else {
646 return 1;
647 }
648 }
649 };
650
651 void AddWaiterImpl(KThread* thread);
652
653 void RemoveWaiterImpl(KThread* thread);
654
655 void StartTermination();
656
657 [[nodiscard]] ResultCode Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
658 s32 prio, s32 virt_core, Process* owner, ThreadType type);
659
660 [[nodiscard]] static ResultCode InitializeThread(KThread* thread, KThreadFunction func,
661 uintptr_t arg, VAddr user_stack_top, s32 prio,
662 s32 core, Process* owner, ThreadType type);
663
664 static void RestorePriority(KernelCore& kernel, KThread* thread);
665
666 // For core KThread implementation
667 ThreadContext32 thread_context_32{};
668 ThreadContext64 thread_context_64{};
669 Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{};
670 s32 priority{};
671 using ConditionVariableThreadTreeTraits =
672 Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<
673 &KThread::condvar_arbiter_tree_node>;
674 using ConditionVariableThreadTree =
675 ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
676 ConditionVariableThreadTree* condvar_tree{};
677 u64 condvar_key{};
678 u64 virtual_affinity_mask{};
679 KAffinityMask physical_affinity_mask{};
680 u64 thread_id{};
681 std::atomic<s64> cpu_time{};
682 KSynchronizationObject* synced_object{};
683 VAddr address_key{};
684 Process* parent{};
685 VAddr kernel_stack_top{};
686 u32* light_ipc_data{};
687 VAddr tls_address{};
688 KLightLock activity_pause_lock;
689 s64 schedule_count{};
690 s64 last_scheduled_tick{};
691 std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
692 KThreadQueue* sleeping_queue{};
693 WaiterList waiter_list{};
694 WaiterList pinned_waiter_list{};
695 KThread* lock_owner{};
696 u32 address_key_value{};
697 u32 suspend_request_flags{};
698 u32 suspend_allowed_flags{};
699 ResultCode wait_result{RESULT_SUCCESS};
700 s32 base_priority{};
701 s32 physical_ideal_core_id{};
702 s32 virtual_ideal_core_id{};
703 s32 num_kernel_waiters{};
704 s32 current_core_id{};
705 s32 core_id{};
706 KAffinityMask original_physical_affinity_mask{};
707 s32 original_physical_ideal_core_id{};
708 s32 num_core_migration_disables{};
709 ThreadState thread_state{};
710 std::atomic<bool> termination_requested{};
711 bool wait_cancelled{};
712 bool cancellable{};
713 bool signaled{};
714 bool initialized{};
715 bool debug_attached{};
716 s8 priority_inheritance_count{};
717 bool resource_limit_release_hint{};
718 StackParameters stack_parameters{};
719 Common::SpinLock context_guard{};
720
721 // For emulation
722 std::shared_ptr<Common::Fiber> host_context{};
723
724 // For debugging
725 std::vector<KSynchronizationObject*> wait_objects_for_debugging;
726 VAddr mutex_wait_address_for_debugging{};
727 ThreadWaitReasonForDebugging wait_reason_for_debugging{};
728 ThreadType thread_type_for_debugging{};
729 std::string name;
730
731public:
732 using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
733
734 void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key,
735 u32 value) {
736 condvar_tree = tree;
737 condvar_key = cv_key;
738 address_key = address;
739 address_key_value = value;
740 }
741
742 void ClearConditionVariable() {
743 condvar_tree = nullptr;
744 }
745
746 [[nodiscard]] bool IsWaitingForConditionVariable() const {
747 return condvar_tree != nullptr;
748 }
749
750 void SetAddressArbiter(ConditionVariableThreadTree* tree, u64 address) {
751 condvar_tree = tree;
752 condvar_key = address;
753 }
754
755 void ClearAddressArbiter() {
756 condvar_tree = nullptr;
757 }
758
759 [[nodiscard]] bool IsWaitingForAddressArbiter() const {
760 return condvar_tree != nullptr;
761 }
762
763 [[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const {
764 return condvar_tree;
765 }
766};
767
768} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread_queue.h b/src/core/hle/kernel/k_thread_queue.h
new file mode 100644
index 000000000..c52eba249
--- /dev/null
+++ b/src/core/hle/kernel/k_thread_queue.h
@@ -0,0 +1,81 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "core/hle/kernel/k_thread.h"
8
9namespace Kernel {
10
11class KThreadQueue {
12public:
13 explicit KThreadQueue(KernelCore& kernel) : kernel{kernel} {}
14
15 bool IsEmpty() const {
16 return wait_list.empty();
17 }
18
19 KThread::WaiterList::iterator begin() {
20 return wait_list.begin();
21 }
22 KThread::WaiterList::iterator end() {
23 return wait_list.end();
24 }
25
26 bool SleepThread(KThread* t) {
27 KScopedSchedulerLock sl{kernel};
28
29 // If the thread needs terminating, don't enqueue it.
30 if (t->IsTerminationRequested()) {
31 return false;
32 }
33
34 // Set the thread's queue and mark it as waiting.
35 t->SetSleepingQueue(this);
36 t->SetState(ThreadState::Waiting);
37
38 // Add the thread to the queue.
39 wait_list.push_back(*t);
40
41 return true;
42 }
43
44 void WakeupThread(KThread* t) {
45 KScopedSchedulerLock sl{kernel};
46
47 // Remove the thread from the queue.
48 wait_list.erase(wait_list.iterator_to(*t));
49
50 // Mark the thread as no longer sleeping.
51 t->SetState(ThreadState::Runnable);
52 t->SetSleepingQueue(nullptr);
53 }
54
55 KThread* WakeupFrontThread() {
56 KScopedSchedulerLock sl{kernel};
57
58 if (wait_list.empty()) {
59 return nullptr;
60 } else {
61 // Remove the thread from the queue.
62 auto it = wait_list.begin();
63 KThread* thread = std::addressof(*it);
64 wait_list.erase(it);
65
66 ASSERT(thread->GetState() == ThreadState::Waiting);
67
68 // Mark the thread as no longer sleeping.
69 thread->SetState(ThreadState::Runnable);
70 thread->SetSleepingQueue(nullptr);
71
72 return thread;
73 }
74 }
75
76private:
77 KernelCore& kernel;
78 KThread::WaiterList wait_list{};
79};
80
81} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index c0ff287a6..df309d523 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -29,6 +29,7 @@
29#include "core/hle/kernel/errors.h" 29#include "core/hle/kernel/errors.h"
30#include "core/hle/kernel/handle_table.h" 30#include "core/hle/kernel/handle_table.h"
31#include "core/hle/kernel/k_scheduler.h" 31#include "core/hle/kernel/k_scheduler.h"
32#include "core/hle/kernel/k_thread.h"
32#include "core/hle/kernel/kernel.h" 33#include "core/hle/kernel/kernel.h"
33#include "core/hle/kernel/memory/memory_layout.h" 34#include "core/hle/kernel/memory/memory_layout.h"
34#include "core/hle/kernel/memory/memory_manager.h" 35#include "core/hle/kernel/memory/memory_manager.h"
@@ -38,7 +39,6 @@
38#include "core/hle/kernel/resource_limit.h" 39#include "core/hle/kernel/resource_limit.h"
39#include "core/hle/kernel/service_thread.h" 40#include "core/hle/kernel/service_thread.h"
40#include "core/hle/kernel/shared_memory.h" 41#include "core/hle/kernel/shared_memory.h"
41#include "core/hle/kernel/thread.h"
42#include "core/hle/kernel/time_manager.h" 42#include "core/hle/kernel/time_manager.h"
43#include "core/hle/lock.h" 43#include "core/hle/lock.h"
44#include "core/hle/result.h" 44#include "core/hle/result.h"
@@ -57,11 +57,13 @@ struct KernelCore::Impl {
57 } 57 }
58 58
59 void Initialize(KernelCore& kernel) { 59 void Initialize(KernelCore& kernel) {
60 global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
61
60 RegisterHostThread(); 62 RegisterHostThread();
61 63
62 global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
63 service_thread_manager = 64 service_thread_manager =
64 std::make_unique<Common::ThreadWorker>(1, "yuzu:ServiceThreadManager"); 65 std::make_unique<Common::ThreadWorker>(1, "yuzu:ServiceThreadManager");
66 is_phantom_mode_for_singlecore = false;
65 67
66 InitializePhysicalCores(); 68 InitializePhysicalCores();
67 InitializeSystemResourceLimit(kernel); 69 InitializeSystemResourceLimit(kernel);
@@ -116,14 +118,14 @@ struct KernelCore::Impl {
116 void InitializePhysicalCores() { 118 void InitializePhysicalCores() {
117 exclusive_monitor = 119 exclusive_monitor =
118 Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); 120 Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
119 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 121 for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
120 schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i); 122 schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i);
121 cores.emplace_back(i, system, *schedulers[i], interrupts); 123 cores.emplace_back(i, system, *schedulers[i], interrupts);
122 } 124 }
123 } 125 }
124 126
125 void InitializeSchedulers() { 127 void InitializeSchedulers() {
126 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 128 for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
127 cores[i].Scheduler().Initialize(); 129 cores[i].Scheduler().Initialize();
128 } 130 }
129 } 131 }
@@ -168,11 +170,9 @@ struct KernelCore::Impl {
168 std::string name = "Suspend Thread Id:" + std::to_string(i); 170 std::string name = "Suspend Thread Id:" + std::to_string(i);
169 std::function<void(void*)> init_func = Core::CpuManager::GetSuspendThreadStartFunc(); 171 std::function<void(void*)> init_func = Core::CpuManager::GetSuspendThreadStartFunc();
170 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); 172 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
171 const auto type = 173 auto thread_res = KThread::Create(system, ThreadType::HighPriority, std::move(name), 0,
172 static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND); 174 0, 0, static_cast<u32>(i), 0, nullptr,
173 auto thread_res = 175 std::move(init_func), init_func_parameter);
174 Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast<u32>(i), 0,
175 nullptr, std::move(init_func), init_func_parameter);
176 176
177 suspend_threads[i] = std::move(thread_res).Unwrap(); 177 suspend_threads[i] = std::move(thread_res).Unwrap();
178 } 178 }
@@ -207,6 +207,17 @@ struct KernelCore::Impl {
207 return host_thread_id; 207 return host_thread_id;
208 } 208 }
209 209
210 // Gets the dummy KThread for the caller, allocating a new one if this is the first time
211 KThread* GetHostDummyThread() {
212 const thread_local auto thread =
213 KThread::Create(
214 system, ThreadType::Main, fmt::format("DummyThread:{}", GetHostThreadId()), 0,
215 KThread::DefaultThreadPriority, 0, static_cast<u32>(3), 0, nullptr,
216 []([[maybe_unused]] void* arg) { UNREACHABLE(); }, nullptr)
217 .Unwrap();
218 return thread.get();
219 }
220
210 /// Registers a CPU core thread by allocating a host thread ID for it 221 /// Registers a CPU core thread by allocating a host thread ID for it
211 void RegisterCoreThread(std::size_t core_id) { 222 void RegisterCoreThread(std::size_t core_id) {
212 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); 223 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
@@ -219,6 +230,7 @@ struct KernelCore::Impl {
219 /// Registers a new host thread by allocating a host thread ID for it 230 /// Registers a new host thread by allocating a host thread ID for it
220 void RegisterHostThread() { 231 void RegisterHostThread() {
221 [[maybe_unused]] const auto this_id = GetHostThreadId(); 232 [[maybe_unused]] const auto this_id = GetHostThreadId();
233 [[maybe_unused]] const auto dummy_thread = GetHostDummyThread();
222 } 234 }
223 235
224 [[nodiscard]] u32 GetCurrentHostThreadID() { 236 [[nodiscard]] u32 GetCurrentHostThreadID() {
@@ -229,20 +241,21 @@ struct KernelCore::Impl {
229 return this_id; 241 return this_id;
230 } 242 }
231 243
232 [[nodiscard]] Core::EmuThreadHandle GetCurrentEmuThreadID() { 244 bool IsPhantomModeForSingleCore() const {
233 Core::EmuThreadHandle result = Core::EmuThreadHandle::InvalidHandle(); 245 return is_phantom_mode_for_singlecore;
234 result.host_handle = GetCurrentHostThreadID(); 246 }
235 if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) { 247
236 return result; 248 void SetIsPhantomModeForSingleCore(bool value) {
237 } 249 ASSERT(!is_multicore);
238 const Kernel::KScheduler& sched = cores[result.host_handle].Scheduler(); 250 is_phantom_mode_for_singlecore = value;
239 const Kernel::Thread* current = sched.GetCurrentThread(); 251 }
240 if (current != nullptr && !current->IsPhantomMode()) { 252
241 result.guest_handle = current->GetGlobalHandle(); 253 KThread* GetCurrentEmuThread() {
242 } else { 254 const auto thread_id = GetCurrentHostThreadID();
243 result.guest_handle = InvalidHandle; 255 if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
256 return GetHostDummyThread();
244 } 257 }
245 return result; 258 return schedulers[thread_id]->GetCurrentThread();
246 } 259 }
247 260
248 void InitializeMemoryLayout() { 261 void InitializeMemoryLayout() {
@@ -342,11 +355,12 @@ struct KernelCore::Impl {
342 // the release of itself 355 // the release of itself
343 std::unique_ptr<Common::ThreadWorker> service_thread_manager; 356 std::unique_ptr<Common::ThreadWorker> service_thread_manager;
344 357
345 std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; 358 std::array<std::shared_ptr<KThread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
346 std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{}; 359 std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
347 std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; 360 std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
348 361
349 bool is_multicore{}; 362 bool is_multicore{};
363 bool is_phantom_mode_for_singlecore{};
350 u32 single_core_thread_id{}; 364 u32 single_core_thread_id{};
351 365
352 std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{}; 366 std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{};
@@ -380,8 +394,8 @@ std::shared_ptr<ResourceLimit> KernelCore::GetSystemResourceLimit() const {
380 return impl->system_resource_limit; 394 return impl->system_resource_limit;
381} 395}
382 396
383std::shared_ptr<Thread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const { 397std::shared_ptr<KThread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const {
384 return impl->global_handle_table.Get<Thread>(handle); 398 return impl->global_handle_table.Get<KThread>(handle);
385} 399}
386 400
387void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) { 401void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) {
@@ -546,8 +560,8 @@ u32 KernelCore::GetCurrentHostThreadID() const {
546 return impl->GetCurrentHostThreadID(); 560 return impl->GetCurrentHostThreadID();
547} 561}
548 562
549Core::EmuThreadHandle KernelCore::GetCurrentEmuThreadID() const { 563KThread* KernelCore::GetCurrentEmuThread() const {
550 return impl->GetCurrentEmuThreadID(); 564 return impl->GetCurrentEmuThread();
551} 565}
552 566
553Memory::MemoryManager& KernelCore::MemoryManager() { 567Memory::MemoryManager& KernelCore::MemoryManager() {
@@ -645,4 +659,12 @@ void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> servi
645 }); 659 });
646} 660}
647 661
662bool KernelCore::IsPhantomModeForSingleCore() const {
663 return impl->IsPhantomModeForSingleCore();
664}
665
666void KernelCore::SetIsPhantomModeForSingleCore(bool value) {
667 impl->SetIsPhantomModeForSingleCore(value);
668}
669
648} // namespace Kernel 670} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 933d9a7d6..e7c77727b 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -43,9 +43,13 @@ class KScheduler;
43class SharedMemory; 43class SharedMemory;
44class ServiceThread; 44class ServiceThread;
45class Synchronization; 45class Synchronization;
46class Thread; 46class KThread;
47class TimeManager; 47class TimeManager;
48 48
49using EmuThreadHandle = uintptr_t;
50constexpr EmuThreadHandle EmuThreadHandleInvalid{};
51constexpr EmuThreadHandle EmuThreadHandleReserved{1ULL << 63};
52
49/// Represents a single instance of the kernel. 53/// Represents a single instance of the kernel.
50class KernelCore { 54class KernelCore {
51private: 55private:
@@ -84,7 +88,7 @@ public:
84 std::shared_ptr<ResourceLimit> GetSystemResourceLimit() const; 88 std::shared_ptr<ResourceLimit> GetSystemResourceLimit() const;
85 89
86 /// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table. 90 /// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table.
87 std::shared_ptr<Thread> RetrieveThreadFromGlobalHandleTable(Handle handle) const; 91 std::shared_ptr<KThread> RetrieveThreadFromGlobalHandleTable(Handle handle) const;
88 92
89 /// Adds the given shared pointer to an internal list of active processes. 93 /// Adds the given shared pointer to an internal list of active processes.
90 void AppendNewProcess(std::shared_ptr<Process> process); 94 void AppendNewProcess(std::shared_ptr<Process> process);
@@ -161,8 +165,8 @@ public:
161 /// Determines whether or not the given port is a valid named port. 165 /// Determines whether or not the given port is a valid named port.
162 bool IsValidNamedPort(NamedPortTable::const_iterator port) const; 166 bool IsValidNamedPort(NamedPortTable::const_iterator port) const;
163 167
164 /// Gets the current host_thread/guest_thread handle. 168 /// Gets the current host_thread/guest_thread pointer.
165 Core::EmuThreadHandle GetCurrentEmuThreadID() const; 169 KThread* GetCurrentEmuThread() const;
166 170
167 /// Gets the current host_thread handle. 171 /// Gets the current host_thread handle.
168 u32 GetCurrentHostThreadID() const; 172 u32 GetCurrentHostThreadID() const;
@@ -237,10 +241,14 @@ public:
237 */ 241 */
238 void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread); 242 void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread);
239 243
244 /// Workaround for single-core mode when preempting threads while idle.
245 bool IsPhantomModeForSingleCore() const;
246 void SetIsPhantomModeForSingleCore(bool value);
247
240private: 248private:
241 friend class Object; 249 friend class Object;
242 friend class Process; 250 friend class Process;
243 friend class Thread; 251 friend class KThread;
244 252
245 /// Creates a new object ID, incrementing the internal object ID counter. 253 /// Creates a new object ID, incrementing the internal object ID counter.
246 u32 CreateNewObjectID(); 254 u32 CreateNewObjectID();
diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h
index 27124ef67..be7fcb5fb 100644
--- a/src/core/hle/kernel/object.h
+++ b/src/core/hle/kernel/object.h
@@ -61,6 +61,8 @@ public:
61 */ 61 */
62 bool IsWaitable() const; 62 bool IsWaitable() const;
63 63
64 virtual void Finalize() = 0;
65
64protected: 66protected:
65 /// The kernel instance this object was created under. 67 /// The kernel instance this object was created under.
66 KernelCore& kernel; 68 KernelCore& kernel;
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 37b77fa6e..0edbfc4cc 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -16,13 +16,13 @@
16#include "core/hle/kernel/code_set.h" 16#include "core/hle/kernel/code_set.h"
17#include "core/hle/kernel/errors.h" 17#include "core/hle/kernel/errors.h"
18#include "core/hle/kernel/k_scheduler.h" 18#include "core/hle/kernel/k_scheduler.h"
19#include "core/hle/kernel/k_thread.h"
19#include "core/hle/kernel/kernel.h" 20#include "core/hle/kernel/kernel.h"
20#include "core/hle/kernel/memory/memory_block_manager.h" 21#include "core/hle/kernel/memory/memory_block_manager.h"
21#include "core/hle/kernel/memory/page_table.h" 22#include "core/hle/kernel/memory/page_table.h"
22#include "core/hle/kernel/memory/slab_heap.h" 23#include "core/hle/kernel/memory/slab_heap.h"
23#include "core/hle/kernel/process.h" 24#include "core/hle/kernel/process.h"
24#include "core/hle/kernel/resource_limit.h" 25#include "core/hle/kernel/resource_limit.h"
25#include "core/hle/kernel/thread.h"
26#include "core/hle/lock.h" 26#include "core/hle/lock.h"
27#include "core/memory.h" 27#include "core/memory.h"
28#include "core/settings.h" 28#include "core/settings.h"
@@ -38,11 +38,10 @@ namespace {
38 */ 38 */
39void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) { 39void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) {
40 const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart(); 40 const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart();
41 ThreadType type = THREADTYPE_USER; 41 auto thread_res = KThread::Create(system, ThreadType::User, "main", entry_point, priority, 0,
42 auto thread_res = Thread::Create(system, type, "main", entry_point, priority, 0, 42 owner_process.GetIdealCoreId(), stack_top, &owner_process);
43 owner_process.GetIdealCore(), stack_top, &owner_process);
44 43
45 std::shared_ptr<Thread> thread = std::move(thread_res).Unwrap(); 44 std::shared_ptr<KThread> thread = std::move(thread_res).Unwrap();
46 45
47 // Register 1 must be a handle to the main thread 46 // Register 1 must be a handle to the main thread
48 const Handle thread_handle = owner_process.GetHandleTable().Create(thread).Unwrap(); 47 const Handle thread_handle = owner_process.GetHandleTable().Create(thread).Unwrap();
@@ -137,6 +136,23 @@ std::shared_ptr<ResourceLimit> Process::GetResourceLimit() const {
137 return resource_limit; 136 return resource_limit;
138} 137}
139 138
139void Process::IncrementThreadCount() {
140 ASSERT(num_threads >= 0);
141 num_created_threads++;
142
143 if (const auto count = ++num_threads; count > peak_num_threads) {
144 peak_num_threads = count;
145 }
146}
147
148void Process::DecrementThreadCount() {
149 ASSERT(num_threads > 0);
150
151 if (const auto count = --num_threads; count == 0) {
152 UNIMPLEMENTED_MSG("Process termination is not implemented!");
153 }
154}
155
140u64 Process::GetTotalPhysicalMemoryAvailable() const { 156u64 Process::GetTotalPhysicalMemoryAvailable() const {
141 const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) + 157 const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) +
142 page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size + 158 page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
@@ -162,11 +178,66 @@ u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
162 return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); 178 return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
163} 179}
164 180
165void Process::RegisterThread(const Thread* thread) { 181bool Process::ReleaseUserException(KThread* thread) {
182 KScopedSchedulerLock sl{kernel};
183
184 if (exception_thread == thread) {
185 exception_thread = nullptr;
186
187 // Remove waiter thread.
188 s32 num_waiters{};
189 KThread* next = thread->RemoveWaiterByKey(
190 std::addressof(num_waiters),
191 reinterpret_cast<uintptr_t>(std::addressof(exception_thread)));
192 if (next != nullptr) {
193 if (next->GetState() == ThreadState::Waiting) {
194 next->SetState(ThreadState::Runnable);
195 } else {
196 KScheduler::SetSchedulerUpdateNeeded(kernel);
197 }
198 }
199
200 return true;
201 } else {
202 return false;
203 }
204}
205
206void Process::PinCurrentThread() {
207 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
208
209 // Get the current thread.
210 const s32 core_id = GetCurrentCoreId(kernel);
211 KThread* cur_thread = GetCurrentThreadPointer(kernel);
212
213 // Pin it.
214 PinThread(core_id, cur_thread);
215 cur_thread->Pin();
216
217 // An update is needed.
218 KScheduler::SetSchedulerUpdateNeeded(kernel);
219}
220
221void Process::UnpinCurrentThread() {
222 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
223
224 // Get the current thread.
225 const s32 core_id = GetCurrentCoreId(kernel);
226 KThread* cur_thread = GetCurrentThreadPointer(kernel);
227
228 // Unpin it.
229 cur_thread->Unpin();
230 UnpinThread(core_id, cur_thread);
231
232 // An update is needed.
233 KScheduler::SetSchedulerUpdateNeeded(kernel);
234}
235
236void Process::RegisterThread(const KThread* thread) {
166 thread_list.push_back(thread); 237 thread_list.push_back(thread);
167} 238}
168 239
169void Process::UnregisterThread(const Thread* thread) { 240void Process::UnregisterThread(const KThread* thread) {
170 thread_list.remove(thread); 241 thread_list.remove(thread);
171} 242}
172 243
@@ -267,7 +338,7 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) {
267void Process::PrepareForTermination() { 338void Process::PrepareForTermination() {
268 ChangeStatus(ProcessStatus::Exiting); 339 ChangeStatus(ProcessStatus::Exiting);
269 340
270 const auto stop_threads = [this](const std::vector<std::shared_ptr<Thread>>& thread_list) { 341 const auto stop_threads = [this](const std::vector<std::shared_ptr<KThread>>& thread_list) {
271 for (auto& thread : thread_list) { 342 for (auto& thread : thread_list) {
272 if (thread->GetOwnerProcess() != this) 343 if (thread->GetOwnerProcess() != this)
273 continue; 344 continue;
@@ -279,7 +350,7 @@ void Process::PrepareForTermination() {
279 ASSERT_MSG(thread->GetState() == ThreadState::Waiting, 350 ASSERT_MSG(thread->GetState() == ThreadState::Waiting,
280 "Exiting processes with non-waiting threads is currently unimplemented"); 351 "Exiting processes with non-waiting threads is currently unimplemented");
281 352
282 thread->Stop(); 353 thread->Exit();
283 } 354 }
284 }; 355 };
285 356
@@ -372,7 +443,7 @@ bool Process::IsSignaled() const {
372Process::Process(Core::System& system) 443Process::Process(Core::System& system)
373 : KSynchronizationObject{system.Kernel()}, 444 : KSynchronizationObject{system.Kernel()},
374 page_table{std::make_unique<Memory::PageTable>(system)}, handle_table{system.Kernel()}, 445 page_table{std::make_unique<Memory::PageTable>(system)}, handle_table{system.Kernel()},
375 address_arbiter{system}, condition_var{system}, system{system} {} 446 address_arbiter{system}, condition_var{system}, state_lock{system.Kernel()}, system{system} {}
376 447
377Process::~Process() = default; 448Process::~Process() = default;
378 449
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index 564e1f27d..26e647743 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -30,7 +30,7 @@ namespace Kernel {
30 30
31class KernelCore; 31class KernelCore;
32class ResourceLimit; 32class ResourceLimit;
33class Thread; 33class KThread;
34class TLSPage; 34class TLSPage;
35 35
36struct CodeSet; 36struct CodeSet;
@@ -173,10 +173,15 @@ public:
173 std::shared_ptr<ResourceLimit> GetResourceLimit() const; 173 std::shared_ptr<ResourceLimit> GetResourceLimit() const;
174 174
175 /// Gets the ideal CPU core ID for this process 175 /// Gets the ideal CPU core ID for this process
176 u8 GetIdealCore() const { 176 u8 GetIdealCoreId() const {
177 return ideal_core; 177 return ideal_core;
178 } 178 }
179 179
180 /// Checks if the specified thread priority is valid.
181 bool CheckThreadPriority(s32 prio) const {
182 return ((1ULL << prio) & GetPriorityMask()) != 0;
183 }
184
180 /// Gets the bitmask of allowed cores that this process' threads can run on. 185 /// Gets the bitmask of allowed cores that this process' threads can run on.
181 u64 GetCoreMask() const { 186 u64 GetCoreMask() const {
182 return capabilities.GetCoreMask(); 187 return capabilities.GetCoreMask();
@@ -212,6 +217,14 @@ public:
212 return is_64bit_process; 217 return is_64bit_process;
213 } 218 }
214 219
220 [[nodiscard]] bool IsSuspended() const {
221 return is_suspended;
222 }
223
224 void SetSuspended(bool suspended) {
225 is_suspended = suspended;
226 }
227
215 /// Gets the total running time of the process instance in ticks. 228 /// Gets the total running time of the process instance in ticks.
216 u64 GetCPUTimeTicks() const { 229 u64 GetCPUTimeTicks() const {
217 return total_process_running_time_ticks; 230 return total_process_running_time_ticks;
@@ -232,6 +245,33 @@ public:
232 ++schedule_count; 245 ++schedule_count;
233 } 246 }
234 247
248 void IncrementThreadCount();
249 void DecrementThreadCount();
250
251 void SetRunningThread(s32 core, KThread* thread, u64 idle_count) {
252 running_threads[core] = thread;
253 running_thread_idle_counts[core] = idle_count;
254 }
255
256 void ClearRunningThread(KThread* thread) {
257 for (size_t i = 0; i < running_threads.size(); ++i) {
258 if (running_threads[i] == thread) {
259 running_threads[i] = nullptr;
260 }
261 }
262 }
263
264 [[nodiscard]] KThread* GetRunningThread(s32 core) const {
265 return running_threads[core];
266 }
267
268 bool ReleaseUserException(KThread* thread);
269
270 [[nodiscard]] KThread* GetPinnedThread(s32 core_id) const {
271 ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
272 return pinned_threads[core_id];
273 }
274
235 /// Gets 8 bytes of random data for svcGetInfo RandomEntropy 275 /// Gets 8 bytes of random data for svcGetInfo RandomEntropy
236 u64 GetRandomEntropy(std::size_t index) const { 276 u64 GetRandomEntropy(std::size_t index) const {
237 return random_entropy.at(index); 277 return random_entropy.at(index);
@@ -252,17 +292,17 @@ public:
252 u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const; 292 u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const;
253 293
254 /// Gets the list of all threads created with this process as their owner. 294 /// Gets the list of all threads created with this process as their owner.
255 const std::list<const Thread*>& GetThreadList() const { 295 const std::list<const KThread*>& GetThreadList() const {
256 return thread_list; 296 return thread_list;
257 } 297 }
258 298
259 /// Registers a thread as being created under this process, 299 /// Registers a thread as being created under this process,
260 /// adding it to this process' thread list. 300 /// adding it to this process' thread list.
261 void RegisterThread(const Thread* thread); 301 void RegisterThread(const KThread* thread);
262 302
263 /// Unregisters a thread from this process, removing it 303 /// Unregisters a thread from this process, removing it
264 /// from this process' thread list. 304 /// from this process' thread list.
265 void UnregisterThread(const Thread* thread); 305 void UnregisterThread(const KThread* thread);
266 306
267 /// Clears the signaled state of the process if and only if it's signaled. 307 /// Clears the signaled state of the process if and only if it's signaled.
268 /// 308 ///
@@ -303,6 +343,15 @@ public:
303 343
304 bool IsSignaled() const override; 344 bool IsSignaled() const override;
305 345
346 void Finalize() override {}
347
348 void PinCurrentThread();
349 void UnpinCurrentThread();
350
351 KLightLock& GetStateLock() {
352 return state_lock;
353 }
354
306 /////////////////////////////////////////////////////////////////////////////////////////////// 355 ///////////////////////////////////////////////////////////////////////////////////////////////
307 // Thread-local storage management 356 // Thread-local storage management
308 357
@@ -313,6 +362,20 @@ public:
313 void FreeTLSRegion(VAddr tls_address); 362 void FreeTLSRegion(VAddr tls_address);
314 363
315private: 364private:
365 void PinThread(s32 core_id, KThread* thread) {
366 ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
367 ASSERT(thread != nullptr);
368 ASSERT(pinned_threads[core_id] == nullptr);
369 pinned_threads[core_id] = thread;
370 }
371
372 void UnpinThread(s32 core_id, KThread* thread) {
373 ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
374 ASSERT(thread != nullptr);
375 ASSERT(pinned_threads[core_id] == thread);
376 pinned_threads[core_id] = nullptr;
377 }
378
316 /// Changes the process status. If the status is different 379 /// Changes the process status. If the status is different
317 /// from the current process status, then this will trigger 380 /// from the current process status, then this will trigger
318 /// a process signal. 381 /// a process signal.
@@ -380,7 +443,7 @@ private:
380 std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{}; 443 std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
381 444
382 /// List of threads that are running with this process as their owner. 445 /// List of threads that are running with this process as their owner.
383 std::list<const Thread*> thread_list; 446 std::list<const KThread*> thread_list;
384 447
385 /// Address of the top of the main thread's stack 448 /// Address of the top of the main thread's stack
386 VAddr main_thread_stack_top{}; 449 VAddr main_thread_stack_top{};
@@ -401,6 +464,19 @@ private:
401 s64 schedule_count{}; 464 s64 schedule_count{};
402 465
403 bool is_signaled{}; 466 bool is_signaled{};
467 bool is_suspended{};
468
469 std::atomic<s32> num_created_threads{};
470 std::atomic<u16> num_threads{};
471 u16 peak_num_threads{};
472
473 std::array<KThread*, Core::Hardware::NUM_CPU_CORES> running_threads{};
474 std::array<u64, Core::Hardware::NUM_CPU_CORES> running_thread_idle_counts{};
475 std::array<KThread*, Core::Hardware::NUM_CPU_CORES> pinned_threads{};
476
477 KThread* exception_thread{};
478
479 KLightLock state_lock;
404 480
405 /// System context 481 /// System context
406 Core::System& system; 482 Core::System& system;
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index 99ed0857e..596d01479 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -7,10 +7,10 @@
7#include "common/logging/log.h" 7#include "common/logging/log.h"
8#include "core/hle/kernel/errors.h" 8#include "core/hle/kernel/errors.h"
9#include "core/hle/kernel/k_scheduler.h" 9#include "core/hle/kernel/k_scheduler.h"
10#include "core/hle/kernel/k_thread.h"
10#include "core/hle/kernel/kernel.h" 11#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/object.h" 12#include "core/hle/kernel/object.h"
12#include "core/hle/kernel/readable_event.h" 13#include "core/hle/kernel/readable_event.h"
13#include "core/hle/kernel/thread.h"
14 14
15namespace Kernel { 15namespace Kernel {
16 16
diff --git a/src/core/hle/kernel/readable_event.h b/src/core/hle/kernel/readable_event.h
index 34e477274..2195710c2 100644
--- a/src/core/hle/kernel/readable_event.h
+++ b/src/core/hle/kernel/readable_event.h
@@ -47,6 +47,8 @@ public:
47 47
48 bool IsSignaled() const override; 48 bool IsSignaled() const override;
49 49
50 void Finalize() override {}
51
50private: 52private:
51 explicit ReadableEvent(KernelCore& kernel); 53 explicit ReadableEvent(KernelCore& kernel);
52 54
diff --git a/src/core/hle/kernel/resource_limit.h b/src/core/hle/kernel/resource_limit.h
index 936cc4d0f..464d4f2a6 100644
--- a/src/core/hle/kernel/resource_limit.h
+++ b/src/core/hle/kernel/resource_limit.h
@@ -85,6 +85,8 @@ public:
85 */ 85 */
86 ResultCode SetLimitValue(ResourceType resource, s64 value); 86 ResultCode SetLimitValue(ResourceType resource, s64 value);
87 87
88 void Finalize() override {}
89
88private: 90private:
89 // TODO(Subv): Increment resource limit current values in their respective Kernel::T::Create 91 // TODO(Subv): Increment resource limit current values in their respective Kernel::T::Create
90 // functions 92 // functions
diff --git a/src/core/hle/kernel/server_port.cpp b/src/core/hle/kernel/server_port.cpp
index 82857f93b..fe7a483c4 100644
--- a/src/core/hle/kernel/server_port.cpp
+++ b/src/core/hle/kernel/server_port.cpp
@@ -6,10 +6,10 @@
6#include "common/assert.h" 6#include "common/assert.h"
7#include "core/hle/kernel/client_port.h" 7#include "core/hle/kernel/client_port.h"
8#include "core/hle/kernel/errors.h" 8#include "core/hle/kernel/errors.h"
9#include "core/hle/kernel/k_thread.h"
9#include "core/hle/kernel/object.h" 10#include "core/hle/kernel/object.h"
10#include "core/hle/kernel/server_port.h" 11#include "core/hle/kernel/server_port.h"
11#include "core/hle/kernel/server_session.h" 12#include "core/hle/kernel/server_session.h"
12#include "core/hle/kernel/thread.h"
13 13
14namespace Kernel { 14namespace Kernel {
15 15
diff --git a/src/core/hle/kernel/server_port.h b/src/core/hle/kernel/server_port.h
index 6470df993..29b4f2509 100644
--- a/src/core/hle/kernel/server_port.h
+++ b/src/core/hle/kernel/server_port.h
@@ -81,6 +81,8 @@ public:
81 81
82 bool IsSignaled() const override; 82 bool IsSignaled() const override;
83 83
84 void Finalize() override {}
85
84private: 86private:
85 /// ServerSessions waiting to be accepted by the port 87 /// ServerSessions waiting to be accepted by the port
86 std::vector<std::shared_ptr<ServerSession>> pending_sessions; 88 std::vector<std::shared_ptr<ServerSession>> pending_sessions;
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp
index 4f2bb7822..790dbb998 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/server_session.cpp
@@ -15,11 +15,11 @@
15#include "core/hle/kernel/handle_table.h" 15#include "core/hle/kernel/handle_table.h"
16#include "core/hle/kernel/hle_ipc.h" 16#include "core/hle/kernel/hle_ipc.h"
17#include "core/hle/kernel/k_scheduler.h" 17#include "core/hle/kernel/k_scheduler.h"
18#include "core/hle/kernel/k_thread.h"
18#include "core/hle/kernel/kernel.h" 19#include "core/hle/kernel/kernel.h"
19#include "core/hle/kernel/process.h" 20#include "core/hle/kernel/process.h"
20#include "core/hle/kernel/server_session.h" 21#include "core/hle/kernel/server_session.h"
21#include "core/hle/kernel/session.h" 22#include "core/hle/kernel/session.h"
22#include "core/hle/kernel/thread.h"
23#include "core/memory.h" 23#include "core/memory.h"
24 24
25namespace Kernel { 25namespace Kernel {
@@ -116,7 +116,7 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con
116 return RESULT_SUCCESS; 116 return RESULT_SUCCESS;
117} 117}
118 118
119ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<Thread> thread, 119ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<KThread> thread,
120 Core::Memory::Memory& memory) { 120 Core::Memory::Memory& memory) {
121 u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))}; 121 u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))};
122 auto context = 122 auto context =
@@ -154,14 +154,14 @@ ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) {
154 KScopedSchedulerLock lock(kernel); 154 KScopedSchedulerLock lock(kernel);
155 if (!context.IsThreadWaiting()) { 155 if (!context.IsThreadWaiting()) {
156 context.GetThread().Wakeup(); 156 context.GetThread().Wakeup();
157 context.GetThread().SetSynchronizationResults(nullptr, result); 157 context.GetThread().SetSyncedObject(nullptr, result);
158 } 158 }
159 } 159 }
160 160
161 return result; 161 return result;
162} 162}
163 163
164ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread, 164ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<KThread> thread,
165 Core::Memory::Memory& memory, 165 Core::Memory::Memory& memory,
166 Core::Timing::CoreTiming& core_timing) { 166 Core::Timing::CoreTiming& core_timing) {
167 return QueueSyncRequest(std::move(thread), memory); 167 return QueueSyncRequest(std::move(thread), memory);
diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h
index 9155cf7f5..c42d5ee59 100644
--- a/src/core/hle/kernel/server_session.h
+++ b/src/core/hle/kernel/server_session.h
@@ -29,7 +29,7 @@ class HLERequestContext;
29class KernelCore; 29class KernelCore;
30class Session; 30class Session;
31class SessionRequestHandler; 31class SessionRequestHandler;
32class Thread; 32class KThread;
33 33
34/** 34/**
35 * Kernel object representing the server endpoint of an IPC session. Sessions are the basic CTR-OS 35 * Kernel object representing the server endpoint of an IPC session. Sessions are the basic CTR-OS
@@ -95,7 +95,7 @@ public:
95 * 95 *
96 * @returns ResultCode from the operation. 96 * @returns ResultCode from the operation.
97 */ 97 */
98 ResultCode HandleSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory, 98 ResultCode HandleSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory,
99 Core::Timing::CoreTiming& core_timing); 99 Core::Timing::CoreTiming& core_timing);
100 100
101 /// Called when a client disconnection occurs. 101 /// Called when a client disconnection occurs.
@@ -126,9 +126,11 @@ public:
126 126
127 bool IsSignaled() const override; 127 bool IsSignaled() const override;
128 128
129 void Finalize() override {}
130
129private: 131private:
130 /// Queues a sync request from the emulated application. 132 /// Queues a sync request from the emulated application.
131 ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory); 133 ResultCode QueueSyncRequest(std::shared_ptr<KThread> thread, Core::Memory::Memory& memory);
132 134
133 /// Completes a sync request from the emulated application. 135 /// Completes a sync request from the emulated application.
134 ResultCode CompleteSyncRequest(HLERequestContext& context); 136 ResultCode CompleteSyncRequest(HLERequestContext& context);
@@ -149,12 +151,12 @@ private:
149 /// List of threads that are pending a response after a sync request. This list is processed in 151 /// List of threads that are pending a response after a sync request. This list is processed in
150 /// a LIFO manner, thus, the last request will be dispatched first. 152 /// a LIFO manner, thus, the last request will be dispatched first.
151 /// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test. 153 /// TODO(Subv): Verify if this is indeed processed in LIFO using a hardware test.
152 std::vector<std::shared_ptr<Thread>> pending_requesting_threads; 154 std::vector<std::shared_ptr<KThread>> pending_requesting_threads;
153 155
154 /// Thread whose request is currently being handled. A request is considered "handled" when a 156 /// Thread whose request is currently being handled. A request is considered "handled" when a
155 /// response is sent via svcReplyAndReceive. 157 /// response is sent via svcReplyAndReceive.
156 /// TODO(Subv): Find a better name for this. 158 /// TODO(Subv): Find a better name for this.
157 std::shared_ptr<Thread> currently_handling; 159 std::shared_ptr<KThread> currently_handling;
158 160
159 /// When set to True, converts the session to a domain at the end of the command 161 /// When set to True, converts the session to a domain at the end of the command
160 bool convert_to_domain{}; 162 bool convert_to_domain{};
diff --git a/src/core/hle/kernel/session.h b/src/core/hle/kernel/session.h
index f6dd2c1d2..fa3c5651a 100644
--- a/src/core/hle/kernel/session.h
+++ b/src/core/hle/kernel/session.h
@@ -39,6 +39,8 @@ public:
39 39
40 bool IsSignaled() const override; 40 bool IsSignaled() const override;
41 41
42 void Finalize() override {}
43
42 std::shared_ptr<ClientSession> Client() { 44 std::shared_ptr<ClientSession> Client() {
43 if (auto result{client.lock()}) { 45 if (auto result{client.lock()}) {
44 return result; 46 return result;
diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h
index 0ef87235c..623bd8b11 100644
--- a/src/core/hle/kernel/shared_memory.h
+++ b/src/core/hle/kernel/shared_memory.h
@@ -71,6 +71,8 @@ public:
71 return device_memory.GetPointer(physical_address + offset); 71 return device_memory.GetPointer(physical_address + offset);
72 } 72 }
73 73
74 void Finalize() override {}
75
74private: 76private:
75 Core::DeviceMemory& device_memory; 77 Core::DeviceMemory& device_memory;
76 Process* owner_process{}; 78 Process* owner_process{};
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index cc8b661af..7fd514e9d 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -29,6 +29,7 @@
29#include "core/hle/kernel/k_scheduler.h" 29#include "core/hle/kernel/k_scheduler.h"
30#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 30#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
31#include "core/hle/kernel/k_synchronization_object.h" 31#include "core/hle/kernel/k_synchronization_object.h"
32#include "core/hle/kernel/k_thread.h"
32#include "core/hle/kernel/kernel.h" 33#include "core/hle/kernel/kernel.h"
33#include "core/hle/kernel/memory/memory_block.h" 34#include "core/hle/kernel/memory/memory_block.h"
34#include "core/hle/kernel/memory/memory_layout.h" 35#include "core/hle/kernel/memory/memory_layout.h"
@@ -42,7 +43,6 @@
42#include "core/hle/kernel/svc_results.h" 43#include "core/hle/kernel/svc_results.h"
43#include "core/hle/kernel/svc_types.h" 44#include "core/hle/kernel/svc_types.h"
44#include "core/hle/kernel/svc_wrap.h" 45#include "core/hle/kernel/svc_wrap.h"
45#include "core/hle/kernel/thread.h"
46#include "core/hle/kernel/time_manager.h" 46#include "core/hle/kernel/time_manager.h"
47#include "core/hle/kernel/transfer_memory.h" 47#include "core/hle/kernel/transfer_memory.h"
48#include "core/hle/kernel/writable_event.h" 48#include "core/hle/kernel/writable_event.h"
@@ -351,7 +351,8 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
351 session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); 351 session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming());
352 } 352 }
353 353
354 return thread->GetSignalingResult(); 354 KSynchronizationObject* dummy{};
355 return thread->GetWaitResult(std::addressof(dummy));
355} 356}
356 357
357static ResultCode SendSyncRequest32(Core::System& system, Handle handle) { 358static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
@@ -359,27 +360,26 @@ static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
359} 360}
360 361
361/// Get the ID for the specified thread. 362/// Get the ID for the specified thread.
362static ResultCode GetThreadId(Core::System& system, u64* thread_id, Handle thread_handle) { 363static ResultCode GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
363 LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle); 364 LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
364 365
366 // Get the thread from its handle.
365 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 367 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
366 const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle); 368 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
367 if (!thread) { 369 R_UNLESS(thread, Svc::ResultInvalidHandle);
368 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", thread_handle);
369 return ERR_INVALID_HANDLE;
370 }
371 370
372 *thread_id = thread->GetThreadID(); 371 // Get the thread's id.
372 *out_thread_id = thread->GetThreadID();
373 return RESULT_SUCCESS; 373 return RESULT_SUCCESS;
374} 374}
375 375
376static ResultCode GetThreadId32(Core::System& system, u32* thread_id_low, u32* thread_id_high, 376static ResultCode GetThreadId32(Core::System& system, u32* out_thread_id_low,
377 Handle thread_handle) { 377 u32* out_thread_id_high, Handle thread_handle) {
378 u64 thread_id{}; 378 u64 out_thread_id{};
379 const ResultCode result{GetThreadId(system, &thread_id, thread_handle)}; 379 const ResultCode result{GetThreadId(system, &out_thread_id, thread_handle)};
380 380
381 *thread_id_low = static_cast<u32>(thread_id >> 32); 381 *out_thread_id_low = static_cast<u32>(out_thread_id >> 32);
382 *thread_id_high = static_cast<u32>(thread_id & std::numeric_limits<u32>::max()); 382 *out_thread_id_high = static_cast<u32>(out_thread_id & std::numeric_limits<u32>::max());
383 383
384 return result; 384 return result;
385} 385}
@@ -395,7 +395,7 @@ static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle han
395 return RESULT_SUCCESS; 395 return RESULT_SUCCESS;
396 } 396 }
397 397
398 const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(handle); 398 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle);
399 if (thread) { 399 if (thread) {
400 const Process* const owner_process = thread->GetOwnerProcess(); 400 const Process* const owner_process = thread->GetOwnerProcess();
401 if (!owner_process) { 401 if (!owner_process) {
@@ -473,15 +473,13 @@ static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u
473static ResultCode CancelSynchronization(Core::System& system, Handle thread_handle) { 473static ResultCode CancelSynchronization(Core::System& system, Handle thread_handle) {
474 LOG_TRACE(Kernel_SVC, "called thread=0x{:X}", thread_handle); 474 LOG_TRACE(Kernel_SVC, "called thread=0x{:X}", thread_handle);
475 475
476 // Get the thread from its handle.
476 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 477 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
477 std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle); 478 std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
478 if (!thread) { 479 R_UNLESS(thread, Svc::ResultInvalidHandle);
479 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
480 thread_handle);
481 return ERR_INVALID_HANDLE;
482 }
483 480
484 thread->CancelWait(); 481 // Cancel the thread's wait.
482 thread->WaitCancel();
485 return RESULT_SUCCESS; 483 return RESULT_SUCCESS;
486} 484}
487 485
@@ -630,7 +628,7 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
630 handle_debug_buffer(info1, info2); 628 handle_debug_buffer(info1, info2);
631 629
632 auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread(); 630 auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
633 const auto thread_processor_id = current_thread->GetProcessorID(); 631 const auto thread_processor_id = current_thread->GetActiveCore();
634 system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace(); 632 system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
635 } 633 }
636} 634}
@@ -872,7 +870,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
872 return ERR_INVALID_COMBINATION; 870 return ERR_INVALID_COMBINATION;
873 } 871 }
874 872
875 const auto thread = system.Kernel().CurrentProcess()->GetHandleTable().Get<Thread>( 873 const auto thread = system.Kernel().CurrentProcess()->GetHandleTable().Get<KThread>(
876 static_cast<Handle>(handle)); 874 static_cast<Handle>(handle));
877 if (!thread) { 875 if (!thread) {
878 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", 876 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}",
@@ -888,7 +886,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
888 const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks(); 886 const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks();
889 u64 out_ticks = 0; 887 u64 out_ticks = 0;
890 if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) { 888 if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
891 const u64 thread_ticks = current_thread->GetTotalCPUTimeTicks(); 889 const u64 thread_ticks = current_thread->GetCpuTime();
892 890
893 out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); 891 out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
894 } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { 892 } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) {
@@ -1025,129 +1023,109 @@ static ResultCode UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size
1025 return UnmapPhysicalMemory(system, addr, size); 1023 return UnmapPhysicalMemory(system, addr, size);
1026} 1024}
1027 1025
1028/// Sets the thread activity 1026constexpr bool IsValidThreadActivity(Svc::ThreadActivity thread_activity) {
1029static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 activity) { 1027 switch (thread_activity) {
1030 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", handle, activity); 1028 case Svc::ThreadActivity::Runnable:
1031 if (activity > static_cast<u32>(ThreadActivity::Paused)) { 1029 case Svc::ThreadActivity::Paused:
1032 return ERR_INVALID_ENUM_VALUE; 1030 return true;
1031 default:
1032 return false;
1033 } 1033 }
1034}
1034 1035
1035 const auto* current_process = system.Kernel().CurrentProcess(); 1036/// Sets the thread activity
1036 const std::shared_ptr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle); 1037static ResultCode SetThreadActivity(Core::System& system, Handle thread_handle,
1037 if (!thread) { 1038 Svc::ThreadActivity thread_activity) {
1038 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle); 1039 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", thread_handle,
1039 return ERR_INVALID_HANDLE; 1040 thread_activity);
1040 }
1041 1041
1042 if (thread->GetOwnerProcess() != current_process) { 1042 // Validate the activity.
1043 LOG_ERROR(Kernel_SVC, 1043 R_UNLESS(IsValidThreadActivity(thread_activity), Svc::ResultInvalidEnumValue);
1044 "The current process does not own the current thread, thread_handle={:08X} "
1045 "thread_pid={}, "
1046 "current_process_pid={}",
1047 handle, thread->GetOwnerProcess()->GetProcessID(),
1048 current_process->GetProcessID());
1049 return ERR_INVALID_HANDLE;
1050 }
1051 1044
1052 if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) { 1045 // Get the thread from its handle.
1053 LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread"); 1046 auto& kernel = system.Kernel();
1054 return ERR_BUSY; 1047 const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
1055 } 1048 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
1049 R_UNLESS(thread, Svc::ResultInvalidHandle);
1050
1051 // Check that the activity is being set on a non-current thread for the current process.
1052 R_UNLESS(thread->GetOwnerProcess() == kernel.CurrentProcess(), Svc::ResultInvalidHandle);
1053 R_UNLESS(thread.get() != GetCurrentThreadPointer(kernel), Svc::ResultBusy);
1054
1055 // Set the activity.
1056 R_TRY(thread->SetActivity(thread_activity));
1056 1057
1057 return thread->SetActivity(static_cast<ThreadActivity>(activity)); 1058 return RESULT_SUCCESS;
1058} 1059}
1059 1060
1060static ResultCode SetThreadActivity32(Core::System& system, Handle handle, u32 activity) { 1061static ResultCode SetThreadActivity32(Core::System& system, Handle thread_handle,
1061 return SetThreadActivity(system, handle, activity); 1062 Svc::ThreadActivity thread_activity) {
1063 return SetThreadActivity(system, thread_handle, thread_activity);
1062} 1064}
1063 1065
1064/// Gets the thread context 1066/// Gets the thread context
1065static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, Handle handle) { 1067static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Handle thread_handle) {
1066 LOG_DEBUG(Kernel_SVC, "called, context=0x{:08X}, thread=0x{:X}", thread_context, handle); 1068 LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context,
1069 thread_handle);
1067 1070
1071 // Get the thread from its handle.
1068 const auto* current_process = system.Kernel().CurrentProcess(); 1072 const auto* current_process = system.Kernel().CurrentProcess();
1069 const std::shared_ptr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle); 1073 const std::shared_ptr<KThread> thread =
1070 if (!thread) { 1074 current_process->GetHandleTable().Get<KThread>(thread_handle);
1071 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle); 1075 R_UNLESS(thread, Svc::ResultInvalidHandle);
1072 return ERR_INVALID_HANDLE;
1073 }
1074 1076
1075 if (thread->GetOwnerProcess() != current_process) { 1077 // Require the handle be to a non-current thread in the current process.
1076 LOG_ERROR(Kernel_SVC, 1078 R_UNLESS(thread->GetOwnerProcess() == current_process, Svc::ResultInvalidHandle);
1077 "The current process does not own the current thread, thread_handle={:08X} " 1079 R_UNLESS(thread.get() != system.Kernel().CurrentScheduler()->GetCurrentThread(),
1078 "thread_pid={}, " 1080 Svc::ResultBusy);
1079 "current_process_pid={}",
1080 handle, thread->GetOwnerProcess()->GetProcessID(),
1081 current_process->GetProcessID());
1082 return ERR_INVALID_HANDLE;
1083 }
1084 1081
1085 if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) { 1082 // Get the thread context.
1086 LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread"); 1083 std::vector<u8> context;
1087 return ERR_BUSY; 1084 R_TRY(thread->GetThreadContext3(context));
1088 }
1089
1090 Core::ARM_Interface::ThreadContext64 ctx = thread->GetContext64();
1091 // Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
1092 ctx.pstate &= 0xFF0FFE20;
1093 1085
1094 // If 64-bit, we can just write the context registers directly and we're good. 1086 // Copy the thread context to user space.
1095 // However, if 32-bit, we have to ensure some registers are zeroed out. 1087 system.Memory().WriteBlock(out_context, context.data(), context.size());
1096 if (!current_process->Is64BitProcess()) {
1097 std::fill(ctx.cpu_registers.begin() + 15, ctx.cpu_registers.end(), 0);
1098 std::fill(ctx.vector_registers.begin() + 16, ctx.vector_registers.end(), u128{});
1099 }
1100 1088
1101 system.Memory().WriteBlock(thread_context, &ctx, sizeof(ctx));
1102 return RESULT_SUCCESS; 1089 return RESULT_SUCCESS;
1103} 1090}
1104 1091
1105static ResultCode GetThreadContext32(Core::System& system, u32 thread_context, Handle handle) { 1092static ResultCode GetThreadContext32(Core::System& system, u32 out_context, Handle thread_handle) {
1106 return GetThreadContext(system, thread_context, handle); 1093 return GetThreadContext(system, out_context, thread_handle);
1107} 1094}
1108 1095
1109/// Gets the priority for the specified thread 1096/// Gets the priority for the specified thread
1110static ResultCode GetThreadPriority(Core::System& system, u32* priority, Handle handle) { 1097static ResultCode GetThreadPriority(Core::System& system, u32* out_priority, Handle handle) {
1111 LOG_TRACE(Kernel_SVC, "called"); 1098 LOG_TRACE(Kernel_SVC, "called");
1112 1099
1100 // Get the thread from its handle.
1113 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1101 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1114 const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(handle); 1102 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle);
1115 if (!thread) { 1103 R_UNLESS(thread, Svc::ResultInvalidHandle);
1116 *priority = 0;
1117 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
1118 return ERR_INVALID_HANDLE;
1119 }
1120 1104
1121 *priority = thread->GetPriority(); 1105 // Get the thread's priority.
1106 *out_priority = thread->GetPriority();
1122 return RESULT_SUCCESS; 1107 return RESULT_SUCCESS;
1123} 1108}
1124 1109
1125static ResultCode GetThreadPriority32(Core::System& system, u32* priority, Handle handle) { 1110static ResultCode GetThreadPriority32(Core::System& system, u32* out_priority, Handle handle) {
1126 return GetThreadPriority(system, priority, handle); 1111 return GetThreadPriority(system, out_priority, handle);
1127} 1112}
1128 1113
1129/// Sets the priority for the specified thread 1114/// Sets the priority for the specified thread
1130static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 priority) { 1115static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 priority) {
1131 LOG_TRACE(Kernel_SVC, "called"); 1116 LOG_TRACE(Kernel_SVC, "called");
1132 1117
1133 if (priority > THREADPRIO_LOWEST) { 1118 // Validate the priority.
1134 LOG_ERROR( 1119 R_UNLESS(Svc::HighestThreadPriority <= priority && priority <= Svc::LowestThreadPriority,
1135 Kernel_SVC, 1120 Svc::ResultInvalidPriority);
1136 "An invalid priority was specified, expected {} but got {} for thread_handle={:08X}",
1137 THREADPRIO_LOWEST, priority, handle);
1138 return ERR_INVALID_THREAD_PRIORITY;
1139 }
1140 1121
1141 const auto* const current_process = system.Kernel().CurrentProcess(); 1122 // Get the thread from its handle.
1142 1123 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1143 std::shared_ptr<Thread> thread = current_process->GetHandleTable().Get<Thread>(handle); 1124 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle);
1144 if (!thread) { 1125 R_UNLESS(thread, Svc::ResultInvalidHandle);
1145 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
1146 return ERR_INVALID_HANDLE;
1147 }
1148 1126
1127 // Set the thread priority.
1149 thread->SetBasePriority(priority); 1128 thread->SetBasePriority(priority);
1150
1151 return RESULT_SUCCESS; 1129 return RESULT_SUCCESS;
1152} 1130}
1153 1131
@@ -1438,62 +1416,50 @@ static void ExitProcess(Core::System& system) {
1438 current_process->PrepareForTermination(); 1416 current_process->PrepareForTermination();
1439 1417
1440 // Kill the current thread 1418 // Kill the current thread
1441 system.Kernel().CurrentScheduler()->GetCurrentThread()->Stop(); 1419 system.Kernel().CurrentScheduler()->GetCurrentThread()->Exit();
1442} 1420}
1443 1421
1444static void ExitProcess32(Core::System& system) { 1422static void ExitProcess32(Core::System& system) {
1445 ExitProcess(system); 1423 ExitProcess(system);
1446} 1424}
1447 1425
1426static constexpr bool IsValidCoreId(int32_t core_id) {
1427 return (0 <= core_id && core_id < static_cast<int32_t>(Core::Hardware::NUM_CPU_CORES));
1428}
1429
1448/// Creates a new thread 1430/// Creates a new thread
1449static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg, 1431static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
1450 VAddr stack_top, u32 priority, s32 processor_id) { 1432 VAddr stack_bottom, u32 priority, s32 core_id) {
1451 LOG_DEBUG(Kernel_SVC, 1433 LOG_DEBUG(Kernel_SVC,
1452 "called entrypoint=0x{:08X}, arg=0x{:08X}, stacktop=0x{:08X}, " 1434 "called entry_point=0x{:08X}, arg=0x{:08X}, stack_bottom=0x{:08X}, "
1453 "threadpriority=0x{:08X}, processorid=0x{:08X} : created handle=0x{:08X}", 1435 "priority=0x{:08X}, core_id=0x{:08X}",
1454 entry_point, arg, stack_top, priority, processor_id, *out_handle); 1436 entry_point, arg, stack_bottom, priority, core_id);
1455 1437
1456 auto* const current_process = system.Kernel().CurrentProcess(); 1438 // Adjust core id, if it's the default magic.
1457 1439 auto& kernel = system.Kernel();
1458 if (processor_id == THREADPROCESSORID_IDEAL) { 1440 auto& process = *kernel.CurrentProcess();
1459 // Set the target CPU to the one specified by the process. 1441 if (core_id == Svc::IdealCoreUseProcessValue) {
1460 processor_id = current_process->GetIdealCore(); 1442 core_id = process.GetIdealCoreId();
1461 ASSERT(processor_id != THREADPROCESSORID_IDEAL);
1462 } 1443 }
1463 1444
1464 if (processor_id < THREADPROCESSORID_0 || processor_id > THREADPROCESSORID_3) { 1445 // Validate arguments.
1465 LOG_ERROR(Kernel_SVC, "Invalid thread processor ID: {}", processor_id); 1446 R_UNLESS(IsValidCoreId(core_id), Svc::ResultInvalidCoreId);
1466 return ERR_INVALID_PROCESSOR_ID; 1447 R_UNLESS(((1ULL << core_id) & process.GetCoreMask()) != 0, Svc::ResultInvalidCoreId);
1467 }
1468 1448
1469 const u64 core_mask = current_process->GetCoreMask(); 1449 R_UNLESS(Svc::HighestThreadPriority <= priority && priority <= Svc::LowestThreadPriority,
1470 if ((core_mask | (1ULL << processor_id)) != core_mask) { 1450 Svc::ResultInvalidPriority);
1471 LOG_ERROR(Kernel_SVC, "Invalid thread core specified ({})", processor_id); 1451 R_UNLESS(process.CheckThreadPriority(priority), Svc::ResultInvalidPriority);
1472 return ERR_INVALID_PROCESSOR_ID;
1473 }
1474 1452
1475 if (priority > THREADPRIO_LOWEST) { 1453 ASSERT(process.GetResourceLimit()->Reserve(ResourceType::Threads, 1));
1476 LOG_ERROR(Kernel_SVC,
1477 "Invalid thread priority specified ({}). Must be within the range 0-64",
1478 priority);
1479 return ERR_INVALID_THREAD_PRIORITY;
1480 }
1481 1454
1482 if (((1ULL << priority) & current_process->GetPriorityMask()) == 0) { 1455 std::shared_ptr<KThread> thread;
1483 LOG_ERROR(Kernel_SVC, "Invalid thread priority specified ({})", priority); 1456 {
1484 return ERR_INVALID_THREAD_PRIORITY; 1457 KScopedLightLock lk{process.GetStateLock()};
1458 CASCADE_RESULT(thread, KThread::Create(system, ThreadType::User, "", entry_point, priority,
1459 arg, core_id, stack_bottom, &process));
1485 } 1460 }
1486 1461
1487 auto& kernel = system.Kernel(); 1462 const auto new_thread_handle = process.GetHandleTable().Create(thread);
1488
1489 ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Threads, 1));
1490
1491 ThreadType type = THREADTYPE_USER;
1492 CASCADE_RESULT(std::shared_ptr<Thread> thread,
1493 Thread::Create(system, type, "", entry_point, priority, arg, processor_id,
1494 stack_top, current_process));
1495
1496 const auto new_thread_handle = current_process->GetHandleTable().Create(thread);
1497 if (new_thread_handle.Failed()) { 1463 if (new_thread_handle.Failed()) {
1498 LOG_ERROR(Kernel_SVC, "Failed to create handle with error=0x{:X}", 1464 LOG_ERROR(Kernel_SVC, "Failed to create handle with error=0x{:X}",
1499 new_thread_handle.Code().raw); 1465 new_thread_handle.Code().raw);
@@ -1517,17 +1483,15 @@ static ResultCode CreateThread32(Core::System& system, Handle* out_handle, u32 p
1517static ResultCode StartThread(Core::System& system, Handle thread_handle) { 1483static ResultCode StartThread(Core::System& system, Handle thread_handle) {
1518 LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle); 1484 LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
1519 1485
1486 // Get the thread from its handle.
1520 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1487 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1521 const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle); 1488 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
1522 if (!thread) { 1489 R_UNLESS(thread, Svc::ResultInvalidHandle);
1523 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
1524 thread_handle);
1525 return ERR_INVALID_HANDLE;
1526 }
1527 1490
1528 ASSERT(thread->GetState() == ThreadState::Initialized); 1491 // Try to start the thread.
1492 R_TRY(thread->Run());
1529 1493
1530 return thread->Start(); 1494 return RESULT_SUCCESS;
1531} 1495}
1532 1496
1533static ResultCode StartThread32(Core::System& system, Handle thread_handle) { 1497static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
@@ -1540,7 +1504,7 @@ static void ExitThread(Core::System& system) {
1540 1504
1541 auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread(); 1505 auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
1542 system.GlobalSchedulerContext().RemoveThread(SharedFrom(current_thread)); 1506 system.GlobalSchedulerContext().RemoveThread(SharedFrom(current_thread));
1543 current_thread->Stop(); 1507 current_thread->Exit();
1544} 1508}
1545 1509
1546static void ExitThread32(Core::System& system) { 1510static void ExitThread32(Core::System& system) {
@@ -1549,34 +1513,28 @@ static void ExitThread32(Core::System& system) {
1549 1513
1550/// Sleep the current thread 1514/// Sleep the current thread
1551static void SleepThread(Core::System& system, s64 nanoseconds) { 1515static void SleepThread(Core::System& system, s64 nanoseconds) {
1552 LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds); 1516 auto& kernel = system.Kernel();
1517 const auto yield_type = static_cast<Svc::YieldType>(nanoseconds);
1553 1518
1554 enum class SleepType : s64 { 1519 LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
1555 YieldWithoutCoreMigration = 0,
1556 YieldWithCoreMigration = -1,
1557 YieldAndWaitForLoadBalancing = -2,
1558 };
1559 1520
1560 auto& scheduler = *system.Kernel().CurrentScheduler(); 1521 // When the input tick is positive, sleep.
1561 if (nanoseconds <= 0) { 1522 if (nanoseconds > 0) {
1562 switch (static_cast<SleepType>(nanoseconds)) { 1523 // Convert the timeout from nanoseconds to ticks.
1563 case SleepType::YieldWithoutCoreMigration: { 1524 // NOTE: Nintendo does not use this conversion logic in WaitSynchronization...
1564 scheduler.YieldWithoutCoreMigration(); 1525
1565 break; 1526 // Sleep.
1566 } 1527 // NOTE: Nintendo does not check the result of this sleep.
1567 case SleepType::YieldWithCoreMigration: { 1528 static_cast<void>(GetCurrentThread(kernel).Sleep(nanoseconds));
1568 scheduler.YieldWithCoreMigration(); 1529 } else if (yield_type == Svc::YieldType::WithoutCoreMigration) {
1569 break; 1530 KScheduler::YieldWithoutCoreMigration(kernel);
1570 } 1531 } else if (yield_type == Svc::YieldType::WithCoreMigration) {
1571 case SleepType::YieldAndWaitForLoadBalancing: { 1532 KScheduler::YieldWithCoreMigration(kernel);
1572 scheduler.YieldToAnyThread(); 1533 } else if (yield_type == Svc::YieldType::ToAnyThread) {
1573 break; 1534 KScheduler::YieldToAnyThread(kernel);
1574 }
1575 default:
1576 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
1577 }
1578 } else { 1535 } else {
1579 scheduler.GetCurrentThread()->Sleep(nanoseconds); 1536 // Nintendo does nothing at all if an otherwise invalid value is passed.
1537 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
1580 } 1538 }
1581} 1539}
1582 1540
@@ -1839,95 +1797,72 @@ static ResultCode CreateTransferMemory32(Core::System& system, Handle* handle, u
1839 return CreateTransferMemory(system, handle, addr, size, permissions); 1797 return CreateTransferMemory(system, handle, addr, size, permissions);
1840} 1798}
1841 1799
1842static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core, 1800static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id,
1843 u64* mask) { 1801 u64* out_affinity_mask) {
1844 LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle); 1802 LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
1845 1803
1804 // Get the thread from its handle.
1846 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1805 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1847 const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle); 1806 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
1848 if (!thread) { 1807 R_UNLESS(thread, Svc::ResultInvalidHandle);
1849 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
1850 thread_handle);
1851 *core = 0;
1852 *mask = 0;
1853 return ERR_INVALID_HANDLE;
1854 }
1855 1808
1856 *core = thread->GetIdealCore(); 1809 // Get the core mask.
1857 *mask = thread->GetAffinityMask().GetAffinityMask(); 1810 R_TRY(thread->GetCoreMask(out_core_id, out_affinity_mask));
1858 1811
1859 return RESULT_SUCCESS; 1812 return RESULT_SUCCESS;
1860} 1813}
1861 1814
1862static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle, u32* core, 1815static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle, s32* out_core_id,
1863 u32* mask_low, u32* mask_high) { 1816 u32* out_affinity_mask_low, u32* out_affinity_mask_high) {
1864 u64 mask{}; 1817 u64 out_affinity_mask{};
1865 const auto result = GetThreadCoreMask(system, thread_handle, core, &mask); 1818 const auto result = GetThreadCoreMask(system, thread_handle, out_core_id, &out_affinity_mask);
1866 *mask_high = static_cast<u32>(mask >> 32); 1819 *out_affinity_mask_high = static_cast<u32>(out_affinity_mask >> 32);
1867 *mask_low = static_cast<u32>(mask); 1820 *out_affinity_mask_low = static_cast<u32>(out_affinity_mask);
1868 return result; 1821 return result;
1869} 1822}
1870 1823
1871static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, u32 core, 1824static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
1872 u64 affinity_mask) { 1825 u64 affinity_mask) {
1873 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core=0x{:X}, affinity_mask=0x{:016X}", 1826 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core_id=0x{:X}, affinity_mask=0x{:016X}",
1874 thread_handle, core, affinity_mask); 1827 thread_handle, core_id, affinity_mask);
1875
1876 const auto* const current_process = system.Kernel().CurrentProcess();
1877 1828
1878 if (core == static_cast<u32>(THREADPROCESSORID_IDEAL)) { 1829 const auto& current_process = *system.Kernel().CurrentProcess();
1879 const u8 ideal_cpu_core = current_process->GetIdealCore();
1880 1830
1881 ASSERT(ideal_cpu_core != static_cast<u8>(THREADPROCESSORID_IDEAL)); 1831 // Determine the core id/affinity mask.
1882 1832 if (core_id == Svc::IdealCoreUseProcessValue) {
1883 // Set the target CPU to the ideal core specified by the process. 1833 core_id = current_process.GetIdealCoreId();
1884 core = ideal_cpu_core; 1834 affinity_mask = (1ULL << core_id);
1885 affinity_mask = 1ULL << core;
1886 } else { 1835 } else {
1887 const u64 core_mask = current_process->GetCoreMask(); 1836 // Validate the affinity mask.
1888 1837 const u64 process_core_mask = current_process.GetCoreMask();
1889 if ((core_mask | affinity_mask) != core_mask) { 1838 R_UNLESS((affinity_mask | process_core_mask) == process_core_mask,
1890 LOG_ERROR( 1839 Svc::ResultInvalidCoreId);
1891 Kernel_SVC, 1840 R_UNLESS(affinity_mask != 0, Svc::ResultInvalidCombination);
1892 "Invalid processor ID specified (core_mask=0x{:08X}, affinity_mask=0x{:016X})", 1841
1893 core_mask, affinity_mask); 1842 // Validate the core id.
1894 return ERR_INVALID_PROCESSOR_ID; 1843 if (IsValidCoreId(core_id)) {
1895 } 1844 R_UNLESS(((1ULL << core_id) & affinity_mask) != 0, Svc::ResultInvalidCombination);
1896 1845 } else {
1897 if (affinity_mask == 0) { 1846 R_UNLESS(core_id == Svc::IdealCoreNoUpdate || core_id == Svc::IdealCoreDontCare,
1898 LOG_ERROR(Kernel_SVC, "Specfified affinity mask is zero."); 1847 Svc::ResultInvalidCoreId);
1899 return ERR_INVALID_COMBINATION;
1900 }
1901
1902 if (core < Core::Hardware::NUM_CPU_CORES) {
1903 if ((affinity_mask & (1ULL << core)) == 0) {
1904 LOG_ERROR(Kernel_SVC,
1905 "Core is not enabled for the current mask, core={}, mask={:016X}", core,
1906 affinity_mask);
1907 return ERR_INVALID_COMBINATION;
1908 }
1909 } else if (core != static_cast<u32>(THREADPROCESSORID_DONT_CARE) &&
1910 core != static_cast<u32>(THREADPROCESSORID_DONT_UPDATE)) {
1911 LOG_ERROR(Kernel_SVC, "Invalid processor ID specified (core={}).", core);
1912 return ERR_INVALID_PROCESSOR_ID;
1913 } 1848 }
1914 } 1849 }
1915 1850
1916 const auto& handle_table = current_process->GetHandleTable(); 1851 // Get the thread from its handle.
1917 const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle); 1852 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1918 if (!thread) { 1853 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
1919 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}", 1854 R_UNLESS(thread, Svc::ResultInvalidHandle);
1920 thread_handle);
1921 return ERR_INVALID_HANDLE;
1922 }
1923 1855
1924 return thread->SetCoreAndAffinityMask(core, affinity_mask); 1856 // Set the core mask.
1857 R_TRY(thread->SetCoreMask(core_id, affinity_mask));
1858
1859 return RESULT_SUCCESS;
1925} 1860}
1926 1861
1927static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core, 1862static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, s32 core_id,
1928 u32 affinity_mask_low, u32 affinity_mask_high) { 1863 u32 affinity_mask_low, u32 affinity_mask_high) {
1929 const auto affinity_mask = u64{affinity_mask_low} | (u64{affinity_mask_high} << 32); 1864 const auto affinity_mask = u64{affinity_mask_low} | (u64{affinity_mask_high} << 32);
1930 return SetThreadCoreMask(system, thread_handle, core, affinity_mask); 1865 return SetThreadCoreMask(system, thread_handle, core_id, affinity_mask);
1931} 1866}
1932 1867
1933static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle* read_handle) { 1868static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle* read_handle) {
@@ -2491,7 +2426,7 @@ void Call(Core::System& system, u32 immediate) {
2491 kernel.EnterSVCProfile(); 2426 kernel.EnterSVCProfile();
2492 2427
2493 auto* thread = kernel.CurrentScheduler()->GetCurrentThread(); 2428 auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
2494 thread->SetContinuousOnSVC(true); 2429 thread->SetIsCallingSvc();
2495 2430
2496 const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate) 2431 const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
2497 : GetSVCInfo32(immediate); 2432 : GetSVCInfo32(immediate);
@@ -2507,7 +2442,7 @@ void Call(Core::System& system, u32 immediate) {
2507 2442
2508 kernel.ExitSVCProfile(); 2443 kernel.ExitSVCProfile();
2509 2444
2510 if (!thread->IsContinuousOnSVC()) { 2445 if (!thread->IsCallingSvc()) {
2511 auto* host_context = thread->GetHostContext().get(); 2446 auto* host_context = thread->GetHostContext().get();
2512 host_context->Rewind(); 2447 host_context->Rewind();
2513 } 2448 }
diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h
index 78282f021..7b897fbce 100644
--- a/src/core/hle/kernel/svc_results.h
+++ b/src/core/hle/kernel/svc_results.h
@@ -8,13 +8,18 @@
8 8
9namespace Kernel::Svc { 9namespace Kernel::Svc {
10 10
11constexpr ResultCode ResultNoSynchronizationObject{ErrorModule::Kernel, 57};
11constexpr ResultCode ResultTerminationRequested{ErrorModule::Kernel, 59}; 12constexpr ResultCode ResultTerminationRequested{ErrorModule::Kernel, 59};
12constexpr ResultCode ResultInvalidAddress{ErrorModule::Kernel, 102}; 13constexpr ResultCode ResultInvalidAddress{ErrorModule::Kernel, 102};
13constexpr ResultCode ResultInvalidCurrentMemory{ErrorModule::Kernel, 106}; 14constexpr ResultCode ResultInvalidCurrentMemory{ErrorModule::Kernel, 106};
15constexpr ResultCode ResultInvalidPriority{ErrorModule::Kernel, 112};
16constexpr ResultCode ResultInvalidCoreId{ErrorModule::Kernel, 113};
14constexpr ResultCode ResultInvalidHandle{ErrorModule::Kernel, 114}; 17constexpr ResultCode ResultInvalidHandle{ErrorModule::Kernel, 114};
18constexpr ResultCode ResultInvalidCombination{ErrorModule::Kernel, 116};
15constexpr ResultCode ResultTimedOut{ErrorModule::Kernel, 117}; 19constexpr ResultCode ResultTimedOut{ErrorModule::Kernel, 117};
16constexpr ResultCode ResultCancelled{ErrorModule::Kernel, 118}; 20constexpr ResultCode ResultCancelled{ErrorModule::Kernel, 118};
17constexpr ResultCode ResultInvalidEnumValue{ErrorModule::Kernel, 120}; 21constexpr ResultCode ResultInvalidEnumValue{ErrorModule::Kernel, 120};
22constexpr ResultCode ResultBusy{ErrorModule::Kernel, 122};
18constexpr ResultCode ResultInvalidState{ErrorModule::Kernel, 125}; 23constexpr ResultCode ResultInvalidState{ErrorModule::Kernel, 125};
19 24
20} // namespace Kernel::Svc 25} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index d623f7a50..ec463b97c 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -77,4 +77,22 @@ enum class ArbitrationType : u32 {
77 WaitIfEqual = 2, 77 WaitIfEqual = 2,
78}; 78};
79 79
80enum class YieldType : s64 {
81 WithoutCoreMigration = 0,
82 WithCoreMigration = -1,
83 ToAnyThread = -2,
84};
85
86enum class ThreadActivity : u32 {
87 Runnable = 0,
88 Paused = 1,
89};
90
91constexpr inline s32 IdealCoreDontCare = -1;
92constexpr inline s32 IdealCoreUseProcessValue = -2;
93constexpr inline s32 IdealCoreNoUpdate = -3;
94
95constexpr inline s32 LowestThreadPriority = 63;
96constexpr inline s32 HighestThreadPriority = 0;
97
80} // namespace Kernel::Svc 98} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index a32750ed7..96afd544b 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -58,6 +58,14 @@ void SvcWrap64(Core::System& system) {
58 func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1))).raw); 58 func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1))).raw);
59} 59}
60 60
61// Used by SetThreadActivity
62template <ResultCode func(Core::System&, Handle, Svc::ThreadActivity)>
63void SvcWrap64(Core::System& system) {
64 FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
65 static_cast<Svc::ThreadActivity>(Param(system, 1)))
66 .raw);
67}
68
61template <ResultCode func(Core::System&, u32, u64, u64, u64)> 69template <ResultCode func(Core::System&, u32, u64, u64, u64)>
62void SvcWrap64(Core::System& system) { 70void SvcWrap64(Core::System& system) {
63 FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), 71 FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1),
@@ -158,9 +166,18 @@ void SvcWrap64(Core::System& system) {
158 .raw); 166 .raw);
159} 167}
160 168
161template <ResultCode func(Core::System&, u32, u32*, u64*)> 169// Used by SetThreadCoreMask
170template <ResultCode func(Core::System&, Handle, s32, u64)>
162void SvcWrap64(Core::System& system) { 171void SvcWrap64(Core::System& system) {
163 u32 param_1 = 0; 172 FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
173 static_cast<s32>(Param(system, 1)), Param(system, 2))
174 .raw);
175}
176
177// Used by GetThreadCoreMask
178template <ResultCode func(Core::System&, Handle, s32*, u64*)>
179void SvcWrap64(Core::System& system) {
180 s32 param_1 = 0;
164 u64 param_2 = 0; 181 u64 param_2 = 0;
165 const ResultCode retval = func(system, static_cast<u32>(Param(system, 2)), &param_1, &param_2); 182 const ResultCode retval = func(system, static_cast<u32>(Param(system, 2)), &param_1, &param_2);
166 183
@@ -473,12 +490,35 @@ void SvcWrap32(Core::System& system) {
473 FuncReturn(system, retval); 490 FuncReturn(system, retval);
474} 491}
475 492
493// Used by GetThreadCoreMask32
494template <ResultCode func(Core::System&, Handle, s32*, u32*, u32*)>
495void SvcWrap32(Core::System& system) {
496 s32 param_1 = 0;
497 u32 param_2 = 0;
498 u32 param_3 = 0;
499
500 const u32 retval = func(system, Param32(system, 2), &param_1, &param_2, &param_3).raw;
501 system.CurrentArmInterface().SetReg(1, param_1);
502 system.CurrentArmInterface().SetReg(2, param_2);
503 system.CurrentArmInterface().SetReg(3, param_3);
504 FuncReturn(system, retval);
505}
506
476// Used by SignalProcessWideKey32 507// Used by SignalProcessWideKey32
477template <void func(Core::System&, u32, s32)> 508template <void func(Core::System&, u32, s32)>
478void SvcWrap32(Core::System& system) { 509void SvcWrap32(Core::System& system) {
479 func(system, static_cast<u32>(Param(system, 0)), static_cast<s32>(Param(system, 1))); 510 func(system, static_cast<u32>(Param(system, 0)), static_cast<s32>(Param(system, 1)));
480} 511}
481 512
513// Used by SetThreadActivity32
514template <ResultCode func(Core::System&, Handle, Svc::ThreadActivity)>
515void SvcWrap32(Core::System& system) {
516 const u32 retval = func(system, static_cast<Handle>(Param(system, 0)),
517 static_cast<Svc::ThreadActivity>(Param(system, 1)))
518 .raw;
519 FuncReturn(system, retval);
520}
521
482// Used by SetThreadPriority32 522// Used by SetThreadPriority32
483template <ResultCode func(Core::System&, Handle, u32)> 523template <ResultCode func(Core::System&, Handle, u32)>
484void SvcWrap32(Core::System& system) { 524void SvcWrap32(Core::System& system) {
@@ -487,7 +527,7 @@ void SvcWrap32(Core::System& system) {
487 FuncReturn(system, retval); 527 FuncReturn(system, retval);
488} 528}
489 529
490// Used by SetThreadCoreMask32 530// Used by SetMemoryAttribute32
491template <ResultCode func(Core::System&, Handle, u32, u32, u32)> 531template <ResultCode func(Core::System&, Handle, u32, u32, u32)>
492void SvcWrap32(Core::System& system) { 532void SvcWrap32(Core::System& system) {
493 const u32 retval = 533 const u32 retval =
@@ -497,6 +537,16 @@ void SvcWrap32(Core::System& system) {
497 FuncReturn(system, retval); 537 FuncReturn(system, retval);
498} 538}
499 539
540// Used by SetThreadCoreMask32
541template <ResultCode func(Core::System&, Handle, s32, u32, u32)>
542void SvcWrap32(Core::System& system) {
543 const u32 retval =
544 func(system, static_cast<Handle>(Param(system, 0)), static_cast<s32>(Param(system, 1)),
545 static_cast<u32>(Param(system, 2)), static_cast<u32>(Param(system, 3)))
546 .raw;
547 FuncReturn(system, retval);
548}
549
500// Used by WaitProcessWideKeyAtomic32 550// Used by WaitProcessWideKeyAtomic32
501template <ResultCode func(Core::System&, u32, u32, Handle, u32, u32)> 551template <ResultCode func(Core::System&, u32, u32, Handle, u32, u32)>
502void SvcWrap32(Core::System& system) { 552void SvcWrap32(Core::System& system) {
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
deleted file mode 100644
index d97323255..000000000
--- a/src/core/hle/kernel/thread.cpp
+++ /dev/null
@@ -1,460 +0,0 @@
1// Copyright 2014 Citra Emulator Project / PPSSPP Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <cinttypes>
7#include <optional>
8#include <vector>
9
10#include "common/assert.h"
11#include "common/common_types.h"
12#include "common/fiber.h"
13#include "common/logging/log.h"
14#include "common/thread_queue_list.h"
15#include "core/core.h"
16#include "core/cpu_manager.h"
17#include "core/hardware_properties.h"
18#include "core/hle/kernel/errors.h"
19#include "core/hle/kernel/handle_table.h"
20#include "core/hle/kernel/k_condition_variable.h"
21#include "core/hle/kernel/k_scheduler.h"
22#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
23#include "core/hle/kernel/kernel.h"
24#include "core/hle/kernel/memory/memory_layout.h"
25#include "core/hle/kernel/object.h"
26#include "core/hle/kernel/process.h"
27#include "core/hle/kernel/thread.h"
28#include "core/hle/kernel/time_manager.h"
29#include "core/hle/result.h"
30#include "core/memory.h"
31
32#ifdef ARCHITECTURE_x86_64
33#include "core/arm/dynarmic/arm_dynarmic_32.h"
34#include "core/arm/dynarmic/arm_dynarmic_64.h"
35#endif
36
37namespace Kernel {
38
39bool Thread::IsSignaled() const {
40 return signaled;
41}
42
43Thread::Thread(KernelCore& kernel) : KSynchronizationObject{kernel} {}
44Thread::~Thread() = default;
45
46void Thread::Stop() {
47 {
48 KScopedSchedulerLock lock(kernel);
49 SetState(ThreadState::Terminated);
50 signaled = true;
51 NotifyAvailable();
52 kernel.GlobalHandleTable().Close(global_handle);
53
54 if (owner_process) {
55 owner_process->UnregisterThread(this);
56
57 // Mark the TLS slot in the thread's page as free.
58 owner_process->FreeTLSRegion(tls_address);
59 }
60 has_exited = true;
61 }
62 global_handle = 0;
63}
64
65void Thread::Wakeup() {
66 KScopedSchedulerLock lock(kernel);
67 SetState(ThreadState::Runnable);
68}
69
70ResultCode Thread::Start() {
71 KScopedSchedulerLock lock(kernel);
72 SetState(ThreadState::Runnable);
73 return RESULT_SUCCESS;
74}
75
76void Thread::CancelWait() {
77 KScopedSchedulerLock lock(kernel);
78 if (GetState() != ThreadState::Waiting || !is_cancellable) {
79 is_sync_cancelled = true;
80 return;
81 }
82 // TODO(Blinkhawk): Implement cancel of server session
83 is_sync_cancelled = false;
84 SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED);
85 SetState(ThreadState::Runnable);
86}
87
88static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
89 u32 entry_point, u32 arg) {
90 context = {};
91 context.cpu_registers[0] = arg;
92 context.cpu_registers[15] = entry_point;
93 context.cpu_registers[13] = stack_top;
94}
95
96static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top,
97 VAddr entry_point, u64 arg) {
98 context = {};
99 context.cpu_registers[0] = arg;
100 context.pc = entry_point;
101 context.sp = stack_top;
102 // TODO(merry): Perform a hardware test to determine the below value.
103 context.fpcr = 0;
104}
105
106std::shared_ptr<Common::Fiber>& Thread::GetHostContext() {
107 return host_context;
108}
109
110ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
111 std::string name, VAddr entry_point, u32 priority,
112 u64 arg, s32 processor_id, VAddr stack_top,
113 Process* owner_process) {
114 std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
115 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
116 return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top,
117 owner_process, std::move(init_func), init_func_parameter);
118}
119
120ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
121 std::string name, VAddr entry_point, u32 priority,
122 u64 arg, s32 processor_id, VAddr stack_top,
123 Process* owner_process,
124 std::function<void(void*)>&& thread_start_func,
125 void* thread_start_parameter) {
126 auto& kernel = system.Kernel();
127 // Check if priority is in ranged. Lowest priority -> highest priority id.
128 if (priority > THREADPRIO_LOWEST && ((type_flags & THREADTYPE_IDLE) == 0)) {
129 LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority);
130 return ERR_INVALID_THREAD_PRIORITY;
131 }
132
133 if (processor_id > THREADPROCESSORID_MAX) {
134 LOG_ERROR(Kernel_SVC, "Invalid processor id: {}", processor_id);
135 return ERR_INVALID_PROCESSOR_ID;
136 }
137
138 if (owner_process) {
139 if (!system.Memory().IsValidVirtualAddress(*owner_process, entry_point)) {
140 LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point);
141 // TODO (bunnei): Find the correct error code to use here
142 return RESULT_UNKNOWN;
143 }
144 }
145
146 std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel);
147
148 thread->thread_id = kernel.CreateNewThreadID();
149 thread->thread_state = ThreadState::Initialized;
150 thread->entry_point = entry_point;
151 thread->stack_top = stack_top;
152 thread->disable_count = 1;
153 thread->tpidr_el0 = 0;
154 thread->current_priority = priority;
155 thread->base_priority = priority;
156 thread->lock_owner = nullptr;
157 thread->schedule_count = -1;
158 thread->last_scheduled_tick = 0;
159 thread->processor_id = processor_id;
160 thread->ideal_core = processor_id;
161 thread->affinity_mask.SetAffinity(processor_id, true);
162 thread->name = std::move(name);
163 thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap();
164 thread->owner_process = owner_process;
165 thread->type = type_flags;
166 thread->signaled = false;
167 if ((type_flags & THREADTYPE_IDLE) == 0) {
168 auto& scheduler = kernel.GlobalSchedulerContext();
169 scheduler.AddThread(thread);
170 }
171 if (owner_process) {
172 thread->tls_address = thread->owner_process->CreateTLSRegion();
173 thread->owner_process->RegisterThread(thread.get());
174 } else {
175 thread->tls_address = 0;
176 }
177
178 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
179 // to initialize the context
180 if ((type_flags & THREADTYPE_HLE) == 0) {
181 ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top),
182 static_cast<u32>(entry_point), static_cast<u32>(arg));
183 ResetThreadContext64(thread->context_64, stack_top, entry_point, arg);
184 }
185 thread->host_context =
186 std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
187
188 return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
189}
190
191void Thread::SetBasePriority(u32 priority) {
192 ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
193 "Invalid priority value.");
194
195 KScopedSchedulerLock lock(kernel);
196
197 // Change our base priority.
198 base_priority = priority;
199
200 // Perform a priority restoration.
201 RestorePriority(kernel, this);
202}
203
204void Thread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) {
205 signaling_object = object;
206 signaling_result = result;
207}
208
209VAddr Thread::GetCommandBufferAddress() const {
210 // Offset from the start of TLS at which the IPC command buffer begins.
211 constexpr u64 command_header_offset = 0x80;
212 return GetTLSAddress() + command_header_offset;
213}
214
215void Thread::SetState(ThreadState state) {
216 KScopedSchedulerLock sl(kernel);
217
218 // Clear debugging state
219 SetMutexWaitAddressForDebugging({});
220 SetWaitReasonForDebugging({});
221
222 const ThreadState old_state = thread_state;
223 thread_state =
224 static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask));
225 if (thread_state != old_state) {
226 KScheduler::OnThreadStateChanged(kernel, this, old_state);
227 }
228}
229
230void Thread::AddWaiterImpl(Thread* thread) {
231 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
232
233 // Find the right spot to insert the waiter.
234 auto it = waiter_list.begin();
235 while (it != waiter_list.end()) {
236 if (it->GetPriority() > thread->GetPriority()) {
237 break;
238 }
239 it++;
240 }
241
242 // Keep track of how many kernel waiters we have.
243 if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
244 ASSERT((num_kernel_waiters++) >= 0);
245 }
246
247 // Insert the waiter.
248 waiter_list.insert(it, *thread);
249 thread->SetLockOwner(this);
250}
251
252void Thread::RemoveWaiterImpl(Thread* thread) {
253 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
254
255 // Keep track of how many kernel waiters we have.
256 if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
257 ASSERT((num_kernel_waiters--) > 0);
258 }
259
260 // Remove the waiter.
261 waiter_list.erase(waiter_list.iterator_to(*thread));
262 thread->SetLockOwner(nullptr);
263}
264
265void Thread::RestorePriority(KernelCore& kernel, Thread* thread) {
266 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
267
268 while (true) {
269 // We want to inherit priority where possible.
270 s32 new_priority = thread->GetBasePriority();
271 if (thread->HasWaiters()) {
272 new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority());
273 }
274
275 // If the priority we would inherit is not different from ours, don't do anything.
276 if (new_priority == thread->GetPriority()) {
277 return;
278 }
279
280 // Ensure we don't violate condition variable red black tree invariants.
281 if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
282 BeforeUpdatePriority(kernel, cv_tree, thread);
283 }
284
285 // Change the priority.
286 const s32 old_priority = thread->GetPriority();
287 thread->SetPriority(new_priority);
288
289 // Restore the condition variable, if relevant.
290 if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
291 AfterUpdatePriority(kernel, cv_tree, thread);
292 }
293
294 // Update the scheduler.
295 KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority);
296
297 // Keep the lock owner up to date.
298 Thread* lock_owner = thread->GetLockOwner();
299 if (lock_owner == nullptr) {
300 return;
301 }
302
303 // Update the thread in the lock owner's sorted list, and continue inheriting.
304 lock_owner->RemoveWaiterImpl(thread);
305 lock_owner->AddWaiterImpl(thread);
306 thread = lock_owner;
307 }
308}
309
310void Thread::AddWaiter(Thread* thread) {
311 AddWaiterImpl(thread);
312 RestorePriority(kernel, this);
313}
314
315void Thread::RemoveWaiter(Thread* thread) {
316 RemoveWaiterImpl(thread);
317 RestorePriority(kernel, this);
318}
319
320Thread* Thread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
321 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
322
323 s32 num_waiters{};
324 Thread* next_lock_owner{};
325 auto it = waiter_list.begin();
326 while (it != waiter_list.end()) {
327 if (it->GetAddressKey() == key) {
328 Thread* thread = std::addressof(*it);
329
330 // Keep track of how many kernel waiters we have.
331 if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
332 ASSERT((num_kernel_waiters--) > 0);
333 }
334 it = waiter_list.erase(it);
335
336 // Update the next lock owner.
337 if (next_lock_owner == nullptr) {
338 next_lock_owner = thread;
339 next_lock_owner->SetLockOwner(nullptr);
340 } else {
341 next_lock_owner->AddWaiterImpl(thread);
342 }
343 num_waiters++;
344 } else {
345 it++;
346 }
347 }
348
349 // Do priority updates, if we have a next owner.
350 if (next_lock_owner) {
351 RestorePriority(kernel, this);
352 RestorePriority(kernel, next_lock_owner);
353 }
354
355 // Return output.
356 *out_num_waiters = num_waiters;
357 return next_lock_owner;
358}
359
360ResultCode Thread::SetActivity(ThreadActivity value) {
361 KScopedSchedulerLock lock(kernel);
362
363 auto sched_status = GetState();
364
365 if (sched_status != ThreadState::Runnable && sched_status != ThreadState::Waiting) {
366 return ERR_INVALID_STATE;
367 }
368
369 if (IsTerminationRequested()) {
370 return RESULT_SUCCESS;
371 }
372
373 if (value == ThreadActivity::Paused) {
374 if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) != 0) {
375 return ERR_INVALID_STATE;
376 }
377 AddSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
378 } else {
379 if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) == 0) {
380 return ERR_INVALID_STATE;
381 }
382 RemoveSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
383 }
384 return RESULT_SUCCESS;
385}
386
387ResultCode Thread::Sleep(s64 nanoseconds) {
388 Handle event_handle{};
389 {
390 KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
391 SetState(ThreadState::Waiting);
392 SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
393 }
394
395 if (event_handle != InvalidHandle) {
396 auto& time_manager = kernel.TimeManager();
397 time_manager.UnscheduleTimeEvent(event_handle);
398 }
399 return RESULT_SUCCESS;
400}
401
402void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
403 const auto old_state = GetRawState();
404 pausing_state |= static_cast<u32>(flag);
405 const auto base_scheduling = GetState();
406 thread_state = base_scheduling | static_cast<ThreadState>(pausing_state);
407 KScheduler::OnThreadStateChanged(kernel, this, old_state);
408}
409
410void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
411 const auto old_state = GetRawState();
412 pausing_state &= ~static_cast<u32>(flag);
413 const auto base_scheduling = GetState();
414 thread_state = base_scheduling | static_cast<ThreadState>(pausing_state);
415 KScheduler::OnThreadStateChanged(kernel, this, old_state);
416}
417
418ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
419 KScopedSchedulerLock lock(kernel);
420 const auto HighestSetCore = [](u64 mask, u32 max_cores) {
421 for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) {
422 if (((mask >> core) & 1) != 0) {
423 return core;
424 }
425 }
426 return -1;
427 };
428
429 const bool use_override = affinity_override_count != 0;
430 if (new_core == THREADPROCESSORID_DONT_UPDATE) {
431 new_core = use_override ? ideal_core_override : ideal_core;
432 if ((new_affinity_mask & (1ULL << new_core)) == 0) {
433 LOG_ERROR(Kernel, "New affinity mask is incorrect! new_core={}, new_affinity_mask={}",
434 new_core, new_affinity_mask);
435 return ERR_INVALID_COMBINATION;
436 }
437 }
438 if (use_override) {
439 ideal_core_override = new_core;
440 } else {
441 const auto old_affinity_mask = affinity_mask;
442 affinity_mask.SetAffinityMask(new_affinity_mask);
443 ideal_core = new_core;
444 if (old_affinity_mask.GetAffinityMask() != new_affinity_mask) {
445 const s32 old_core = processor_id;
446 if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) {
447 if (static_cast<s32>(ideal_core) < 0) {
448 processor_id = HighestSetCore(affinity_mask.GetAffinityMask(),
449 Core::Hardware::NUM_CPU_CORES);
450 } else {
451 processor_id = ideal_core;
452 }
453 }
454 KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_affinity_mask, old_core);
455 }
456 }
457 return RESULT_SUCCESS;
458}
459
460} // namespace Kernel
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
deleted file mode 100644
index 6b66c9a0e..000000000
--- a/src/core/hle/kernel/thread.h
+++ /dev/null
@@ -1,782 +0,0 @@
1// Copyright 2014 Citra Emulator Project / PPSSPP Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <functional>
9#include <span>
10#include <string>
11#include <utility>
12#include <vector>
13
14#include <boost/intrusive/list.hpp>
15
16#include "common/common_types.h"
17#include "common/intrusive_red_black_tree.h"
18#include "common/spin_lock.h"
19#include "core/arm/arm_interface.h"
20#include "core/hle/kernel/k_affinity_mask.h"
21#include "core/hle/kernel/k_synchronization_object.h"
22#include "core/hle/kernel/object.h"
23#include "core/hle/kernel/svc_common.h"
24#include "core/hle/result.h"
25
26namespace Common {
27class Fiber;
28}
29
30namespace Core {
31class ARM_Interface;
32class System;
33} // namespace Core
34
35namespace Kernel {
36
37class GlobalSchedulerContext;
38class KernelCore;
39class Process;
40class KScheduler;
41
42enum ThreadPriority : u32 {
43 THREADPRIO_HIGHEST = 0, ///< Highest thread priority
44 THREADPRIO_MAX_CORE_MIGRATION = 2, ///< Highest priority for a core migration
45 THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps
46 THREADPRIO_DEFAULT = 44, ///< Default thread priority for userland apps
47 THREADPRIO_LOWEST = 63, ///< Lowest thread priority
48 THREADPRIO_COUNT = 64, ///< Total number of possible thread priorities.
49};
50
51enum ThreadType : u32 {
52 THREADTYPE_USER = 0x1,
53 THREADTYPE_KERNEL = 0x2,
54 THREADTYPE_HLE = 0x4,
55 THREADTYPE_IDLE = 0x8,
56 THREADTYPE_SUSPEND = 0x10,
57};
58
59enum ThreadProcessorId : s32 {
60 /// Indicates that no particular processor core is preferred.
61 THREADPROCESSORID_DONT_CARE = -1,
62
63 /// Run thread on the ideal core specified by the process.
64 THREADPROCESSORID_IDEAL = -2,
65
66 /// Indicates that the preferred processor ID shouldn't be updated in
67 /// a core mask setting operation.
68 THREADPROCESSORID_DONT_UPDATE = -3,
69
70 THREADPROCESSORID_0 = 0, ///< Run thread on core 0
71 THREADPROCESSORID_1 = 1, ///< Run thread on core 1
72 THREADPROCESSORID_2 = 2, ///< Run thread on core 2
73 THREADPROCESSORID_3 = 3, ///< Run thread on core 3
74 THREADPROCESSORID_MAX = 4, ///< Processor ID must be less than this
75
76 /// Allowed CPU mask
77 THREADPROCESSORID_DEFAULT_MASK = (1 << THREADPROCESSORID_0) | (1 << THREADPROCESSORID_1) |
78 (1 << THREADPROCESSORID_2) | (1 << THREADPROCESSORID_3)
79};
80
81enum class ThreadState : u16 {
82 Initialized = 0,
83 Waiting = 1,
84 Runnable = 2,
85 Terminated = 3,
86
87 SuspendShift = 4,
88 Mask = (1 << SuspendShift) - 1,
89
90 ProcessSuspended = (1 << (0 + SuspendShift)),
91 ThreadSuspended = (1 << (1 + SuspendShift)),
92 DebugSuspended = (1 << (2 + SuspendShift)),
93 BacktraceSuspended = (1 << (3 + SuspendShift)),
94 InitSuspended = (1 << (4 + SuspendShift)),
95
96 SuspendFlagMask = ((1 << 5) - 1) << SuspendShift,
97};
98DECLARE_ENUM_FLAG_OPERATORS(ThreadState);
99
100enum class ThreadWakeupReason {
101 Signal, // The thread was woken up by WakeupAllWaitingThreads due to an object signal.
102 Timeout // The thread was woken up due to a wait timeout.
103};
104
105enum class ThreadActivity : u32 {
106 Normal = 0,
107 Paused = 1,
108};
109
110enum class ThreadSchedFlags : u32 {
111 ProcessPauseFlag = 1 << 4,
112 ThreadPauseFlag = 1 << 5,
113 ProcessDebugPauseFlag = 1 << 6,
114 KernelInitPauseFlag = 1 << 8,
115};
116
117enum class ThreadWaitReasonForDebugging : u32 {
118 None, ///< Thread is not waiting
119 Sleep, ///< Thread is waiting due to a SleepThread SVC
120 IPC, ///< Thread is waiting for the reply from an IPC request
121 Synchronization, ///< Thread is waiting due to a WaitSynchronization SVC
122 ConditionVar, ///< Thread is waiting due to a WaitProcessWideKey SVC
123 Arbitration, ///< Thread is waiting due to a SignalToAddress/WaitForAddress SVC
124 Suspended, ///< Thread is waiting due to process suspension
125};
126
127class Thread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> {
128 friend class KScheduler;
129 friend class Process;
130
131public:
132 explicit Thread(KernelCore& kernel);
133 ~Thread() override;
134
135 using MutexWaitingThreads = std::vector<std::shared_ptr<Thread>>;
136
137 using ThreadContext32 = Core::ARM_Interface::ThreadContext32;
138 using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
139
140 /**
141 * Creates and returns a new thread. The new thread is immediately scheduled
142 * @param system The instance of the whole system
143 * @param name The friendly name desired for the thread
144 * @param entry_point The address at which the thread should start execution
145 * @param priority The thread's priority
146 * @param arg User data to pass to the thread
147 * @param processor_id The ID(s) of the processors on which the thread is desired to be run
148 * @param stack_top The address of the thread's stack top
149 * @param owner_process The parent process for the thread, if null, it's a kernel thread
150 * @return A shared pointer to the newly created thread
151 */
152 static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags,
153 std::string name, VAddr entry_point,
154 u32 priority, u64 arg, s32 processor_id,
155 VAddr stack_top, Process* owner_process);
156
157 /**
158 * Creates and returns a new thread. The new thread is immediately scheduled
159 * @param system The instance of the whole system
160 * @param name The friendly name desired for the thread
161 * @param entry_point The address at which the thread should start execution
162 * @param priority The thread's priority
163 * @param arg User data to pass to the thread
164 * @param processor_id The ID(s) of the processors on which the thread is desired to be run
165 * @param stack_top The address of the thread's stack top
166 * @param owner_process The parent process for the thread, if null, it's a kernel thread
167 * @param thread_start_func The function where the host context will start.
168 * @param thread_start_parameter The parameter which will passed to host context on init
169 * @return A shared pointer to the newly created thread
170 */
171 static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags,
172 std::string name, VAddr entry_point,
173 u32 priority, u64 arg, s32 processor_id,
174 VAddr stack_top, Process* owner_process,
175 std::function<void(void*)>&& thread_start_func,
176 void* thread_start_parameter);
177
178 std::string GetName() const override {
179 return name;
180 }
181
182 void SetName(std::string new_name) {
183 name = std::move(new_name);
184 }
185
186 std::string GetTypeName() const override {
187 return "Thread";
188 }
189
190 static constexpr HandleType HANDLE_TYPE = HandleType::Thread;
191 HandleType GetHandleType() const override {
192 return HANDLE_TYPE;
193 }
194
195 /**
196 * Gets the thread's current priority
197 * @return The current thread's priority
198 */
199 [[nodiscard]] s32 GetPriority() const {
200 return current_priority;
201 }
202
203 /**
204 * Sets the thread's current priority.
205 * @param priority The new priority.
206 */
207 void SetPriority(s32 priority) {
208 current_priority = priority;
209 }
210
211 /**
212 * Gets the thread's nominal priority.
213 * @return The current thread's nominal priority.
214 */
215 [[nodiscard]] s32 GetBasePriority() const {
216 return base_priority;
217 }
218
219 /**
220 * Sets the thread's nominal priority.
221 * @param priority The new priority.
222 */
223 void SetBasePriority(u32 priority);
224
225 /// Changes the core that the thread is running or scheduled to run on.
226 [[nodiscard]] ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
227
228 /**
229 * Gets the thread's thread ID
230 * @return The thread's ID
231 */
232 [[nodiscard]] u64 GetThreadID() const {
233 return thread_id;
234 }
235
236 /// Resumes a thread from waiting
237 void Wakeup();
238
239 ResultCode Start();
240
241 virtual bool IsSignaled() const override;
242
243 /// Cancels a waiting operation that this thread may or may not be within.
244 ///
245 /// When the thread is within a waiting state, this will set the thread's
246 /// waiting result to signal a canceled wait. The function will then resume
247 /// this thread.
248 ///
249 void CancelWait();
250
251 void SetSynchronizationResults(KSynchronizationObject* object, ResultCode result);
252
253 void SetSyncedObject(KSynchronizationObject* object, ResultCode result) {
254 SetSynchronizationResults(object, result);
255 }
256
257 ResultCode GetWaitResult(KSynchronizationObject** out) const {
258 *out = signaling_object;
259 return signaling_result;
260 }
261
262 ResultCode GetSignalingResult() const {
263 return signaling_result;
264 }
265
266 /**
267 * Stops a thread, invalidating it from further use
268 */
269 void Stop();
270
271 /*
272 * Returns the Thread Local Storage address of the current thread
273 * @returns VAddr of the thread's TLS
274 */
275 VAddr GetTLSAddress() const {
276 return tls_address;
277 }
278
279 /*
280 * Returns the value of the TPIDR_EL0 Read/Write system register for this thread.
281 * @returns The value of the TPIDR_EL0 register.
282 */
283 u64 GetTPIDR_EL0() const {
284 return tpidr_el0;
285 }
286
287 /// Sets the value of the TPIDR_EL0 Read/Write system register for this thread.
288 void SetTPIDR_EL0(u64 value) {
289 tpidr_el0 = value;
290 }
291
292 /*
293 * Returns the address of the current thread's command buffer, located in the TLS.
294 * @returns VAddr of the thread's command buffer.
295 */
296 VAddr GetCommandBufferAddress() const;
297
298 ThreadContext32& GetContext32() {
299 return context_32;
300 }
301
302 const ThreadContext32& GetContext32() const {
303 return context_32;
304 }
305
306 ThreadContext64& GetContext64() {
307 return context_64;
308 }
309
310 const ThreadContext64& GetContext64() const {
311 return context_64;
312 }
313
314 bool IsHLEThread() const {
315 return (type & THREADTYPE_HLE) != 0;
316 }
317
318 bool IsSuspendThread() const {
319 return (type & THREADTYPE_SUSPEND) != 0;
320 }
321
322 bool IsIdleThread() const {
323 return (type & THREADTYPE_IDLE) != 0;
324 }
325
326 bool WasRunning() const {
327 return was_running;
328 }
329
330 void SetWasRunning(bool value) {
331 was_running = value;
332 }
333
334 std::shared_ptr<Common::Fiber>& GetHostContext();
335
336 ThreadState GetState() const {
337 return thread_state & ThreadState::Mask;
338 }
339
340 ThreadState GetRawState() const {
341 return thread_state;
342 }
343
344 void SetState(ThreadState state);
345
346 s64 GetLastScheduledTick() const {
347 return last_scheduled_tick;
348 }
349
350 void SetLastScheduledTick(s64 tick) {
351 last_scheduled_tick = tick;
352 }
353
354 u64 GetTotalCPUTimeTicks() const {
355 return total_cpu_time_ticks;
356 }
357
358 void UpdateCPUTimeTicks(u64 ticks) {
359 total_cpu_time_ticks += ticks;
360 }
361
362 s32 GetProcessorID() const {
363 return processor_id;
364 }
365
366 s32 GetActiveCore() const {
367 return GetProcessorID();
368 }
369
370 void SetProcessorID(s32 new_core) {
371 processor_id = new_core;
372 }
373
374 void SetActiveCore(s32 new_core) {
375 processor_id = new_core;
376 }
377
378 Process* GetOwnerProcess() {
379 return owner_process;
380 }
381
382 const Process* GetOwnerProcess() const {
383 return owner_process;
384 }
385
386 const MutexWaitingThreads& GetMutexWaitingThreads() const {
387 return wait_mutex_threads;
388 }
389
390 Thread* GetLockOwner() const {
391 return lock_owner;
392 }
393
394 void SetLockOwner(Thread* owner) {
395 lock_owner = owner;
396 }
397
398 u32 GetIdealCore() const {
399 return ideal_core;
400 }
401
402 const KAffinityMask& GetAffinityMask() const {
403 return affinity_mask;
404 }
405
406 ResultCode SetActivity(ThreadActivity value);
407
408 /// Sleeps this thread for the given amount of nanoseconds.
409 ResultCode Sleep(s64 nanoseconds);
410
411 s64 GetYieldScheduleCount() const {
412 return schedule_count;
413 }
414
415 void SetYieldScheduleCount(s64 count) {
416 schedule_count = count;
417 }
418
419 bool IsRunning() const {
420 return is_running;
421 }
422
423 void SetIsRunning(bool value) {
424 is_running = value;
425 }
426
427 bool IsWaitCancelled() const {
428 return is_sync_cancelled;
429 }
430
431 void ClearWaitCancelled() {
432 is_sync_cancelled = false;
433 }
434
435 Handle GetGlobalHandle() const {
436 return global_handle;
437 }
438
439 bool IsCancellable() const {
440 return is_cancellable;
441 }
442
443 void SetCancellable() {
444 is_cancellable = true;
445 }
446
447 void ClearCancellable() {
448 is_cancellable = false;
449 }
450
451 bool IsTerminationRequested() const {
452 return will_be_terminated || GetRawState() == ThreadState::Terminated;
453 }
454
455 bool IsPaused() const {
456 return pausing_state != 0;
457 }
458
459 bool IsContinuousOnSVC() const {
460 return is_continuous_on_svc;
461 }
462
463 void SetContinuousOnSVC(bool is_continuous) {
464 is_continuous_on_svc = is_continuous;
465 }
466
467 bool IsPhantomMode() const {
468 return is_phantom_mode;
469 }
470
471 void SetPhantomMode(bool phantom) {
472 is_phantom_mode = phantom;
473 }
474
475 bool HasExited() const {
476 return has_exited;
477 }
478
479 class QueueEntry {
480 public:
481 constexpr QueueEntry() = default;
482
483 constexpr void Initialize() {
484 prev = nullptr;
485 next = nullptr;
486 }
487
488 constexpr Thread* GetPrev() const {
489 return prev;
490 }
491 constexpr Thread* GetNext() const {
492 return next;
493 }
494 constexpr void SetPrev(Thread* thread) {
495 prev = thread;
496 }
497 constexpr void SetNext(Thread* thread) {
498 next = thread;
499 }
500
501 private:
502 Thread* prev{};
503 Thread* next{};
504 };
505
506 QueueEntry& GetPriorityQueueEntry(s32 core) {
507 return per_core_priority_queue_entry[core];
508 }
509
510 const QueueEntry& GetPriorityQueueEntry(s32 core) const {
511 return per_core_priority_queue_entry[core];
512 }
513
514 s32 GetDisableDispatchCount() const {
515 return disable_count;
516 }
517
518 void DisableDispatch() {
519 ASSERT(GetDisableDispatchCount() >= 0);
520 disable_count++;
521 }
522
523 void EnableDispatch() {
524 ASSERT(GetDisableDispatchCount() > 0);
525 disable_count--;
526 }
527
528 void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) {
529 wait_reason_for_debugging = reason;
530 }
531
532 [[nodiscard]] ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const {
533 return wait_reason_for_debugging;
534 }
535
536 void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) {
537 wait_objects_for_debugging.clear();
538 wait_objects_for_debugging.reserve(objects.size());
539 for (const auto& object : objects) {
540 wait_objects_for_debugging.emplace_back(object);
541 }
542 }
543
544 [[nodiscard]] const std::vector<KSynchronizationObject*>& GetWaitObjectsForDebugging() const {
545 return wait_objects_for_debugging;
546 }
547
548 void SetMutexWaitAddressForDebugging(VAddr address) {
549 mutex_wait_address_for_debugging = address;
550 }
551
552 [[nodiscard]] VAddr GetMutexWaitAddressForDebugging() const {
553 return mutex_wait_address_for_debugging;
554 }
555
556 void AddWaiter(Thread* thread);
557
558 void RemoveWaiter(Thread* thread);
559
560 [[nodiscard]] Thread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
561
562 [[nodiscard]] VAddr GetAddressKey() const {
563 return address_key;
564 }
565
566 [[nodiscard]] u32 GetAddressKeyValue() const {
567 return address_key_value;
568 }
569
570 void SetAddressKey(VAddr key) {
571 address_key = key;
572 }
573
574 void SetAddressKey(VAddr key, u32 val) {
575 address_key = key;
576 address_key_value = val;
577 }
578
579private:
580 static constexpr size_t PriorityInheritanceCountMax = 10;
581 union SyncObjectBuffer {
582 std::array<KSynchronizationObject*, Svc::ArgumentHandleCountMax> sync_objects{};
583 std::array<Handle,
584 Svc::ArgumentHandleCountMax*(sizeof(KSynchronizationObject*) / sizeof(Handle))>
585 handles;
586 constexpr SyncObjectBuffer() {}
587 };
588 static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles));
589
590 struct ConditionVariableComparator {
591 struct LightCompareType {
592 u64 cv_key{};
593 s32 priority{};
594
595 [[nodiscard]] constexpr u64 GetConditionVariableKey() const {
596 return cv_key;
597 }
598
599 [[nodiscard]] constexpr s32 GetPriority() const {
600 return priority;
601 }
602 };
603
604 template <typename T>
605 requires(
606 std::same_as<T, Thread> ||
607 std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs,
608 const Thread& rhs) {
609 const uintptr_t l_key = lhs.GetConditionVariableKey();
610 const uintptr_t r_key = rhs.GetConditionVariableKey();
611
612 if (l_key < r_key) {
613 // Sort first by key
614 return -1;
615 } else if (l_key == r_key && lhs.GetPriority() < rhs.GetPriority()) {
616 // And then by priority.
617 return -1;
618 } else {
619 return 1;
620 }
621 }
622 };
623
624 Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{};
625
626 using ConditionVariableThreadTreeTraits =
627 Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&Thread::condvar_arbiter_tree_node>;
628 using ConditionVariableThreadTree =
629 ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
630
631public:
632 using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
633
634 [[nodiscard]] uintptr_t GetConditionVariableKey() const {
635 return condvar_key;
636 }
637
638 [[nodiscard]] uintptr_t GetAddressArbiterKey() const {
639 return condvar_key;
640 }
641
642 void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, uintptr_t cv_key,
643 u32 value) {
644 condvar_tree = tree;
645 condvar_key = cv_key;
646 address_key = address;
647 address_key_value = value;
648 }
649
650 void ClearConditionVariable() {
651 condvar_tree = nullptr;
652 }
653
654 [[nodiscard]] bool IsWaitingForConditionVariable() const {
655 return condvar_tree != nullptr;
656 }
657
658 void SetAddressArbiter(ConditionVariableThreadTree* tree, uintptr_t address) {
659 condvar_tree = tree;
660 condvar_key = address;
661 }
662
663 void ClearAddressArbiter() {
664 condvar_tree = nullptr;
665 }
666
667 [[nodiscard]] bool IsWaitingForAddressArbiter() const {
668 return condvar_tree != nullptr;
669 }
670
671 [[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const {
672 return condvar_tree;
673 }
674
675 [[nodiscard]] bool HasWaiters() const {
676 return !waiter_list.empty();
677 }
678
679private:
680 void AddSchedulingFlag(ThreadSchedFlags flag);
681 void RemoveSchedulingFlag(ThreadSchedFlags flag);
682 void AddWaiterImpl(Thread* thread);
683 void RemoveWaiterImpl(Thread* thread);
684 static void RestorePriority(KernelCore& kernel, Thread* thread);
685
686 Common::SpinLock context_guard{};
687 ThreadContext32 context_32{};
688 ThreadContext64 context_64{};
689 std::shared_ptr<Common::Fiber> host_context{};
690
691 ThreadState thread_state = ThreadState::Initialized;
692
693 u64 thread_id = 0;
694
695 VAddr entry_point = 0;
696 VAddr stack_top = 0;
697 std::atomic_int disable_count = 0;
698
699 ThreadType type;
700
701 /// Nominal thread priority, as set by the emulated application.
702 /// The nominal priority is the thread priority without priority
703 /// inheritance taken into account.
704 s32 base_priority{};
705
706 /// Current thread priority. This may change over the course of the
707 /// thread's lifetime in order to facilitate priority inheritance.
708 s32 current_priority{};
709
710 u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
711 s64 schedule_count{};
712 s64 last_scheduled_tick{};
713
714 s32 processor_id = 0;
715
716 VAddr tls_address = 0; ///< Virtual address of the Thread Local Storage of the thread
717 u64 tpidr_el0 = 0; ///< TPIDR_EL0 read/write system register.
718
719 /// Process that owns this thread
720 Process* owner_process;
721
722 /// Objects that the thread is waiting on, in the same order as they were
723 /// passed to WaitSynchronization. This is used for debugging only.
724 std::vector<KSynchronizationObject*> wait_objects_for_debugging;
725
726 /// The current mutex wait address. This is used for debugging only.
727 VAddr mutex_wait_address_for_debugging{};
728
729 /// The reason the thread is waiting. This is used for debugging only.
730 ThreadWaitReasonForDebugging wait_reason_for_debugging{};
731
732 KSynchronizationObject* signaling_object;
733 ResultCode signaling_result{RESULT_SUCCESS};
734
735 /// List of threads that are waiting for a mutex that is held by this thread.
736 MutexWaitingThreads wait_mutex_threads;
737
738 /// Thread that owns the lock that this thread is waiting for.
739 Thread* lock_owner{};
740
741 /// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
742 Handle global_handle = 0;
743
744 KScheduler* scheduler = nullptr;
745
746 std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
747
748 u32 ideal_core{0xFFFFFFFF};
749 KAffinityMask affinity_mask{};
750
751 s32 ideal_core_override = -1;
752 u32 affinity_override_count = 0;
753
754 u32 pausing_state = 0;
755 bool is_running = false;
756 bool is_cancellable = false;
757 bool is_sync_cancelled = false;
758
759 bool is_continuous_on_svc = false;
760
761 bool will_be_terminated = false;
762 bool is_phantom_mode = false;
763 bool has_exited = false;
764
765 bool was_running = false;
766
767 bool signaled{};
768
769 ConditionVariableThreadTree* condvar_tree{};
770 uintptr_t condvar_key{};
771 VAddr address_key{};
772 u32 address_key_value{};
773 s32 num_kernel_waiters{};
774
775 using WaiterList = boost::intrusive::list<Thread>;
776 WaiterList waiter_list{};
777 WaiterList pinned_waiter_list{};
778
779 std::string name;
780};
781
782} // namespace Kernel
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
index 832edd629..fd0630019 100644
--- a/src/core/hle/kernel/time_manager.cpp
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -8,8 +8,8 @@
8#include "core/core_timing_util.h" 8#include "core/core_timing_util.h"
9#include "core/hle/kernel/handle_table.h" 9#include "core/hle/kernel/handle_table.h"
10#include "core/hle/kernel/k_scheduler.h" 10#include "core/hle/kernel/k_scheduler.h"
11#include "core/hle/kernel/k_thread.h"
11#include "core/hle/kernel/kernel.h" 12#include "core/hle/kernel/kernel.h"
12#include "core/hle/kernel/thread.h"
13#include "core/hle/kernel/time_manager.h" 13#include "core/hle/kernel/time_manager.h"
14 14
15namespace Kernel { 15namespace Kernel {
@@ -18,50 +18,30 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
18 time_manager_event_type = Core::Timing::CreateEvent( 18 time_manager_event_type = Core::Timing::CreateEvent(
19 "Kernel::TimeManagerCallback", 19 "Kernel::TimeManagerCallback",
20 [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { 20 [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
21 std::shared_ptr<Thread> thread; 21 std::shared_ptr<KThread> thread;
22 { 22 {
23 std::lock_guard lock{mutex}; 23 std::lock_guard lock{mutex};
24 const auto proper_handle = static_cast<Handle>(thread_handle); 24 thread = SharedFrom<KThread>(reinterpret_cast<KThread*>(thread_handle));
25 if (cancelled_events[proper_handle]) {
26 return;
27 }
28 thread = system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
29 }
30
31 if (thread) {
32 // Thread can be null if process has exited
33 thread->Wakeup();
34 } 25 }
26 thread->Wakeup();
35 }); 27 });
36} 28}
37 29
38void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds) { 30void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) {
39 std::lock_guard lock{mutex}; 31 std::lock_guard lock{mutex};
40 event_handle = timetask->GetGlobalHandle();
41 if (nanoseconds > 0) { 32 if (nanoseconds > 0) {
42 ASSERT(timetask); 33 ASSERT(thread);
43 ASSERT(timetask->GetState() != ThreadState::Runnable); 34 ASSERT(thread->GetState() != ThreadState::Runnable);
44 system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds}, 35 system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds},
45 time_manager_event_type, event_handle); 36 time_manager_event_type,
46 } else { 37 reinterpret_cast<uintptr_t>(thread));
47 event_handle = InvalidHandle;
48 }
49 cancelled_events[event_handle] = false;
50}
51
52void TimeManager::UnscheduleTimeEvent(Handle event_handle) {
53 std::lock_guard lock{mutex};
54 if (event_handle == InvalidHandle) {
55 return;
56 } 38 }
57 system.CoreTiming().UnscheduleEvent(time_manager_event_type, event_handle);
58 cancelled_events[event_handle] = true;
59} 39}
60 40
61void TimeManager::CancelTimeEvent(Thread* time_task) { 41void TimeManager::UnscheduleTimeEvent(KThread* thread) {
62 std::lock_guard lock{mutex}; 42 std::lock_guard lock{mutex};
63 const Handle event_handle = time_task->GetGlobalHandle(); 43 system.CoreTiming().UnscheduleEvent(time_manager_event_type,
64 UnscheduleTimeEvent(event_handle); 44 reinterpret_cast<uintptr_t>(thread));
65} 45}
66 46
67} // namespace Kernel 47} // namespace Kernel
diff --git a/src/core/hle/kernel/time_manager.h b/src/core/hle/kernel/time_manager.h
index f39df39a0..0d7f05f30 100644
--- a/src/core/hle/kernel/time_manager.h
+++ b/src/core/hle/kernel/time_manager.h
@@ -20,7 +20,7 @@ struct EventType;
20 20
21namespace Kernel { 21namespace Kernel {
22 22
23class Thread; 23class KThread;
24 24
25/** 25/**
26 * The `TimeManager` takes care of scheduling time events on threads and executes their TimeUp 26 * The `TimeManager` takes care of scheduling time events on threads and executes their TimeUp
@@ -31,18 +31,14 @@ public:
31 explicit TimeManager(Core::System& system); 31 explicit TimeManager(Core::System& system);
32 32
33 /// Schedule a time event on `timetask` thread that will expire in 'nanoseconds' 33 /// Schedule a time event on `timetask` thread that will expire in 'nanoseconds'
34 /// returns a non-invalid handle in `event_handle` if correctly scheduled 34 void ScheduleTimeEvent(KThread* time_task, s64 nanoseconds);
35 void ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds);
36 35
37 /// Unschedule an existing time event 36 /// Unschedule an existing time event
38 void UnscheduleTimeEvent(Handle event_handle); 37 void UnscheduleTimeEvent(KThread* thread);
39
40 void CancelTimeEvent(Thread* time_task);
41 38
42private: 39private:
43 Core::System& system; 40 Core::System& system;
44 std::shared_ptr<Core::Timing::EventType> time_manager_event_type; 41 std::shared_ptr<Core::Timing::EventType> time_manager_event_type;
45 std::unordered_map<Handle, bool> cancelled_events;
46 std::mutex mutex; 42 std::mutex mutex;
47}; 43};
48 44
diff --git a/src/core/hle/kernel/transfer_memory.h b/src/core/hle/kernel/transfer_memory.h
index 05e9f7464..777799d12 100644
--- a/src/core/hle/kernel/transfer_memory.h
+++ b/src/core/hle/kernel/transfer_memory.h
@@ -72,6 +72,8 @@ public:
72 /// is closed. 72 /// is closed.
73 ResultCode Reset(); 73 ResultCode Reset();
74 74
75 void Finalize() override {}
76
75private: 77private:
76 /// The base address for the memory managed by this instance. 78 /// The base address for the memory managed by this instance.
77 VAddr base_address{}; 79 VAddr base_address{};
diff --git a/src/core/hle/kernel/writable_event.cpp b/src/core/hle/kernel/writable_event.cpp
index fc2f7c424..142212ee4 100644
--- a/src/core/hle/kernel/writable_event.cpp
+++ b/src/core/hle/kernel/writable_event.cpp
@@ -4,10 +4,10 @@
4 4
5#include <algorithm> 5#include <algorithm>
6#include "common/assert.h" 6#include "common/assert.h"
7#include "core/hle/kernel/k_thread.h"
7#include "core/hle/kernel/kernel.h" 8#include "core/hle/kernel/kernel.h"
8#include "core/hle/kernel/object.h" 9#include "core/hle/kernel/object.h"
9#include "core/hle/kernel/readable_event.h" 10#include "core/hle/kernel/readable_event.h"
10#include "core/hle/kernel/thread.h"
11#include "core/hle/kernel/writable_event.h" 11#include "core/hle/kernel/writable_event.h"
12 12
13namespace Kernel { 13namespace Kernel {
@@ -38,8 +38,4 @@ void WritableEvent::Clear() {
38 readable->Clear(); 38 readable->Clear();
39} 39}
40 40
41bool WritableEvent::IsSignaled() const {
42 return readable->IsSignaled();
43}
44
45} // namespace Kernel 41} // namespace Kernel
diff --git a/src/core/hle/kernel/writable_event.h b/src/core/hle/kernel/writable_event.h
index 6189cf65c..467eb2c21 100644
--- a/src/core/hle/kernel/writable_event.h
+++ b/src/core/hle/kernel/writable_event.h
@@ -46,7 +46,8 @@ public:
46 46
47 void Signal(); 47 void Signal();
48 void Clear(); 48 void Clear();
49 bool IsSignaled() const; 49
50 void Finalize() override {}
50 51
51private: 52private:
52 explicit WritableEvent(KernelCore& kernel); 53 explicit WritableEvent(KernelCore& kernel);
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index 41bd1497c..52b034fae 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -1213,7 +1213,7 @@ IApplicationFunctions::IApplicationFunctions(Core::System& system_)
1213 {124, nullptr, "EnableApplicationAllThreadDumpOnCrash"}, 1213 {124, nullptr, "EnableApplicationAllThreadDumpOnCrash"},
1214 {130, &IApplicationFunctions::GetGpuErrorDetectedSystemEvent, "GetGpuErrorDetectedSystemEvent"}, 1214 {130, &IApplicationFunctions::GetGpuErrorDetectedSystemEvent, "GetGpuErrorDetectedSystemEvent"},
1215 {140, &IApplicationFunctions::GetFriendInvitationStorageChannelEvent, "GetFriendInvitationStorageChannelEvent"}, 1215 {140, &IApplicationFunctions::GetFriendInvitationStorageChannelEvent, "GetFriendInvitationStorageChannelEvent"},
1216 {141, nullptr, "TryPopFromFriendInvitationStorageChannel"}, 1216 {141, &IApplicationFunctions::TryPopFromFriendInvitationStorageChannel, "TryPopFromFriendInvitationStorageChannel"},
1217 {150, nullptr, "GetNotificationStorageChannelEvent"}, 1217 {150, nullptr, "GetNotificationStorageChannelEvent"},
1218 {151, nullptr, "TryPopFromNotificationStorageChannel"}, 1218 {151, nullptr, "TryPopFromNotificationStorageChannel"},
1219 {160, nullptr, "GetHealthWarningDisappearedSystemEvent"}, 1219 {160, nullptr, "GetHealthWarningDisappearedSystemEvent"},
@@ -1631,6 +1631,14 @@ void IApplicationFunctions::GetFriendInvitationStorageChannelEvent(Kernel::HLERe
1631 rb.PushCopyObjects(friend_invitation_storage_channel_event.readable); 1631 rb.PushCopyObjects(friend_invitation_storage_channel_event.readable);
1632} 1632}
1633 1633
1634void IApplicationFunctions::TryPopFromFriendInvitationStorageChannel(
1635 Kernel::HLERequestContext& ctx) {
1636 LOG_WARNING(Service_AM, "(STUBBED) called");
1637
1638 IPC::ResponseBuilder rb{ctx, 2};
1639 rb.Push(ERR_NO_DATA_IN_CHANNEL);
1640}
1641
1634void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger, 1642void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger,
1635 Core::System& system) { 1643 Core::System& system) {
1636 auto message_queue = std::make_shared<AppletMessageQueue>(system.Kernel()); 1644 auto message_queue = std::make_shared<AppletMessageQueue>(system.Kernel());
diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h
index 50737432c..a5401a4d2 100644
--- a/src/core/hle/service/am/am.h
+++ b/src/core/hle/service/am/am.h
@@ -288,6 +288,7 @@ private:
288 void GetPreviousProgramIndex(Kernel::HLERequestContext& ctx); 288 void GetPreviousProgramIndex(Kernel::HLERequestContext& ctx);
289 void GetGpuErrorDetectedSystemEvent(Kernel::HLERequestContext& ctx); 289 void GetGpuErrorDetectedSystemEvent(Kernel::HLERequestContext& ctx);
290 void GetFriendInvitationStorageChannelEvent(Kernel::HLERequestContext& ctx); 290 void GetFriendInvitationStorageChannelEvent(Kernel::HLERequestContext& ctx);
291 void TryPopFromFriendInvitationStorageChannel(Kernel::HLERequestContext& ctx);
291 292
292 bool launch_popped_application_specific = false; 293 bool launch_popped_application_specific = false;
293 bool launch_popped_account_preselect = false; 294 bool launch_popped_account_preselect = false;
diff --git a/src/core/hle/service/nfp/nfp.cpp b/src/core/hle/service/nfp/nfp.cpp
index 641bcadea..a515fdc60 100644
--- a/src/core/hle/service/nfp/nfp.cpp
+++ b/src/core/hle/service/nfp/nfp.cpp
@@ -8,9 +8,9 @@
8#include "common/logging/log.h" 8#include "common/logging/log.h"
9#include "core/core.h" 9#include "core/core.h"
10#include "core/hle/ipc_helpers.h" 10#include "core/hle/ipc_helpers.h"
11#include "core/hle/kernel/k_thread.h"
11#include "core/hle/kernel/kernel.h" 12#include "core/hle/kernel/kernel.h"
12#include "core/hle/kernel/readable_event.h" 13#include "core/hle/kernel/readable_event.h"
13#include "core/hle/kernel/thread.h"
14#include "core/hle/kernel/writable_event.h" 14#include "core/hle/kernel/writable_event.h"
15#include "core/hle/lock.h" 15#include "core/hle/lock.h"
16#include "core/hle/service/nfp/nfp.h" 16#include "core/hle/service/nfp/nfp.h"
diff --git a/src/core/hle/service/nvdrv/interface.cpp b/src/core/hle/service/nvdrv/interface.cpp
index cc23b001c..1328b64d0 100644
--- a/src/core/hle/service/nvdrv/interface.cpp
+++ b/src/core/hle/service/nvdrv/interface.cpp
@@ -6,9 +6,9 @@
6#include "common/logging/log.h" 6#include "common/logging/log.h"
7#include "core/core.h" 7#include "core/core.h"
8#include "core/hle/ipc_helpers.h" 8#include "core/hle/ipc_helpers.h"
9#include "core/hle/kernel/k_thread.h"
9#include "core/hle/kernel/kernel.h" 10#include "core/hle/kernel/kernel.h"
10#include "core/hle/kernel/readable_event.h" 11#include "core/hle/kernel/readable_event.h"
11#include "core/hle/kernel/thread.h"
12#include "core/hle/kernel/writable_event.h" 12#include "core/hle/kernel/writable_event.h"
13#include "core/hle/service/nvdrv/interface.h" 13#include "core/hle/service/nvdrv/interface.h"
14#include "core/hle/service/nvdrv/nvdata.h" 14#include "core/hle/service/nvdrv/nvdata.h"
diff --git a/src/core/hle/service/prepo/prepo.cpp b/src/core/hle/service/prepo/prepo.cpp
index 6edd45455..86ecc5b97 100644
--- a/src/core/hle/service/prepo/prepo.cpp
+++ b/src/core/hle/service/prepo/prepo.cpp
@@ -27,7 +27,7 @@ public:
27 {10105, &PlayReport::SaveReportWithUser<Core::Reporter::PlayReportType::New>, "SaveReportWithUser"}, 27 {10105, &PlayReport::SaveReportWithUser<Core::Reporter::PlayReportType::New>, "SaveReportWithUser"},
28 {10200, nullptr, "RequestImmediateTransmission"}, 28 {10200, nullptr, "RequestImmediateTransmission"},
29 {10300, nullptr, "GetTransmissionStatus"}, 29 {10300, nullptr, "GetTransmissionStatus"},
30 {10400, nullptr, "GetSystemSessionId"}, 30 {10400, &PlayReport::GetSystemSessionId, "GetSystemSessionId"},
31 {20100, &PlayReport::SaveSystemReport, "SaveSystemReport"}, 31 {20100, &PlayReport::SaveSystemReport, "SaveSystemReport"},
32 {20101, &PlayReport::SaveSystemReportWithUser, "SaveSystemReportWithUser"}, 32 {20101, &PlayReport::SaveSystemReportWithUser, "SaveSystemReportWithUser"},
33 {20200, nullptr, "SetOperationMode"}, 33 {20200, nullptr, "SetOperationMode"},
@@ -108,6 +108,15 @@ private:
108 rb.Push(RESULT_SUCCESS); 108 rb.Push(RESULT_SUCCESS);
109 } 109 }
110 110
111 void GetSystemSessionId(Kernel::HLERequestContext& ctx) {
112 LOG_WARNING(Service_PREPO, "(STUBBED) called");
113
114 constexpr u64 system_session_id = 0;
115 IPC::ResponseBuilder rb{ctx, 4};
116 rb.Push(RESULT_SUCCESS);
117 rb.Push(system_session_id);
118 }
119
111 void SaveSystemReport(Kernel::HLERequestContext& ctx) { 120 void SaveSystemReport(Kernel::HLERequestContext& ctx) {
112 IPC::RequestParser rp{ctx}; 121 IPC::RequestParser rp{ctx};
113 const auto title_id = rp.PopRaw<u64>(); 122 const auto title_id = rp.PopRaw<u64>();
diff --git a/src/core/hle/service/service.cpp b/src/core/hle/service/service.cpp
index ff2a5b1db..1da56bc27 100644
--- a/src/core/hle/service/service.cpp
+++ b/src/core/hle/service/service.cpp
@@ -11,10 +11,10 @@
11#include "core/hle/ipc.h" 11#include "core/hle/ipc.h"
12#include "core/hle/ipc_helpers.h" 12#include "core/hle/ipc_helpers.h"
13#include "core/hle/kernel/client_port.h" 13#include "core/hle/kernel/client_port.h"
14#include "core/hle/kernel/k_thread.h"
14#include "core/hle/kernel/kernel.h" 15#include "core/hle/kernel/kernel.h"
15#include "core/hle/kernel/process.h" 16#include "core/hle/kernel/process.h"
16#include "core/hle/kernel/server_port.h" 17#include "core/hle/kernel/server_port.h"
17#include "core/hle/kernel/thread.h"
18#include "core/hle/service/acc/acc.h" 18#include "core/hle/service/acc/acc.h"
19#include "core/hle/service/am/am.h" 19#include "core/hle/service/am/am.h"
20#include "core/hle/service/aoc/aoc_u.h" 20#include "core/hle/service/aoc/aoc_u.h"
diff --git a/src/core/hle/service/sockets/bsd.cpp b/src/core/hle/service/sockets/bsd.cpp
index d85df6af1..0b306b87a 100644
--- a/src/core/hle/service/sockets/bsd.cpp
+++ b/src/core/hle/service/sockets/bsd.cpp
@@ -13,7 +13,7 @@
13#include "common/microprofile.h" 13#include "common/microprofile.h"
14#include "common/thread.h" 14#include "common/thread.h"
15#include "core/hle/ipc_helpers.h" 15#include "core/hle/ipc_helpers.h"
16#include "core/hle/kernel/thread.h" 16#include "core/hle/kernel/k_thread.h"
17#include "core/hle/service/sockets/bsd.h" 17#include "core/hle/service/sockets/bsd.h"
18#include "core/hle/service/sockets/sockets_translate.h" 18#include "core/hle/service/sockets/sockets_translate.h"
19#include "core/network/network.h" 19#include "core/network/network.h"
@@ -263,11 +263,15 @@ void BSD::GetSockOpt(Kernel::HLERequestContext& ctx) {
263 263
264 LOG_WARNING(Service, "(STUBBED) called. fd={} level={} optname=0x{:x}", fd, level, optname); 264 LOG_WARNING(Service, "(STUBBED) called. fd={} level={} optname=0x{:x}", fd, level, optname);
265 265
266 std::vector<u8> optval(ctx.GetWriteBufferSize());
267
268 ctx.WriteBuffer(optval);
269
266 IPC::ResponseBuilder rb{ctx, 5}; 270 IPC::ResponseBuilder rb{ctx, 5};
267 rb.Push(RESULT_SUCCESS); 271 rb.Push(RESULT_SUCCESS);
268 rb.Push<s32>(-1); 272 rb.Push<s32>(-1);
269 rb.PushEnum(Errno::NOTCONN); 273 rb.PushEnum(Errno::NOTCONN);
270 rb.Push<u32>(0); 274 rb.Push<u32>(static_cast<u32>(optval.size()));
271} 275}
272 276
273void BSD::Listen(Kernel::HLERequestContext& ctx) { 277void BSD::Listen(Kernel::HLERequestContext& ctx) {
@@ -416,6 +420,16 @@ void BSD::Close(Kernel::HLERequestContext& ctx) {
416 BuildErrnoResponse(ctx, CloseImpl(fd)); 420 BuildErrnoResponse(ctx, CloseImpl(fd));
417} 421}
418 422
423void BSD::EventFd(Kernel::HLERequestContext& ctx) {
424 IPC::RequestParser rp{ctx};
425 const u64 initval = rp.Pop<u64>();
426 const u32 flags = rp.Pop<u32>();
427
428 LOG_WARNING(Service, "(STUBBED) called. initval={}, flags={}", initval, flags);
429
430 BuildErrnoResponse(ctx, Errno::SUCCESS);
431}
432
419template <typename Work> 433template <typename Work>
420void BSD::ExecuteWork(Kernel::HLERequestContext& ctx, Work work) { 434void BSD::ExecuteWork(Kernel::HLERequestContext& ctx, Work work) {
421 work.Execute(this); 435 work.Execute(this);
@@ -841,7 +855,7 @@ BSD::BSD(Core::System& system_, const char* name) : ServiceFramework{system_, na
841 {28, nullptr, "GetResourceStatistics"}, 855 {28, nullptr, "GetResourceStatistics"},
842 {29, nullptr, "RecvMMsg"}, 856 {29, nullptr, "RecvMMsg"},
843 {30, nullptr, "SendMMsg"}, 857 {30, nullptr, "SendMMsg"},
844 {31, nullptr, "EventFd"}, 858 {31, &BSD::EventFd, "EventFd"},
845 {32, nullptr, "RegisterResourceStatisticsName"}, 859 {32, nullptr, "RegisterResourceStatisticsName"},
846 {33, nullptr, "Initialize2"}, 860 {33, nullptr, "Initialize2"},
847 }; 861 };
diff --git a/src/core/hle/service/sockets/bsd.h b/src/core/hle/service/sockets/bsd.h
index f5831dd48..1d2df9c61 100644
--- a/src/core/hle/service/sockets/bsd.h
+++ b/src/core/hle/service/sockets/bsd.h
@@ -136,6 +136,7 @@ private:
136 void SendTo(Kernel::HLERequestContext& ctx); 136 void SendTo(Kernel::HLERequestContext& ctx);
137 void Write(Kernel::HLERequestContext& ctx); 137 void Write(Kernel::HLERequestContext& ctx);
138 void Close(Kernel::HLERequestContext& ctx); 138 void Close(Kernel::HLERequestContext& ctx);
139 void EventFd(Kernel::HLERequestContext& ctx);
139 140
140 template <typename Work> 141 template <typename Work>
141 void ExecuteWork(Kernel::HLERequestContext& ctx, Work work); 142 void ExecuteWork(Kernel::HLERequestContext& ctx, Work work);
diff --git a/src/core/hle/service/time/time.cpp b/src/core/hle/service/time/time.cpp
index abc753d5d..18629dd7e 100644
--- a/src/core/hle/service/time/time.cpp
+++ b/src/core/hle/service/time/time.cpp
@@ -121,7 +121,7 @@ private:
121}; 121};
122 122
123ResultCode Module::Interface::GetClockSnapshotFromSystemClockContextInternal( 123ResultCode Module::Interface::GetClockSnapshotFromSystemClockContextInternal(
124 Kernel::Thread* thread, Clock::SystemClockContext user_context, 124 Kernel::KThread* thread, Clock::SystemClockContext user_context,
125 Clock::SystemClockContext network_context, u8 type, Clock::ClockSnapshot& clock_snapshot) { 125 Clock::SystemClockContext network_context, u8 type, Clock::ClockSnapshot& clock_snapshot) {
126 126
127 auto& time_manager{system.GetTimeManager()}; 127 auto& time_manager{system.GetTimeManager()};
diff --git a/src/core/hle/service/time/time.h b/src/core/hle/service/time/time.h
index 975a8ae5b..4154c7ee9 100644
--- a/src/core/hle/service/time/time.h
+++ b/src/core/hle/service/time/time.h
@@ -39,7 +39,7 @@ public:
39 39
40 private: 40 private:
41 ResultCode GetClockSnapshotFromSystemClockContextInternal( 41 ResultCode GetClockSnapshotFromSystemClockContextInternal(
42 Kernel::Thread* thread, Clock::SystemClockContext user_context, 42 Kernel::KThread* thread, Clock::SystemClockContext user_context,
43 Clock::SystemClockContext network_context, u8 type, 43 Clock::SystemClockContext network_context, u8 type,
44 Clock::ClockSnapshot& cloc_snapshot); 44 Clock::ClockSnapshot& cloc_snapshot);
45 45
diff --git a/src/core/hle/service/time/time_sharedmemory.h b/src/core/hle/service/time/time_sharedmemory.h
index 5976b2046..e0c3e63da 100644
--- a/src/core/hle/service/time/time_sharedmemory.h
+++ b/src/core/hle/service/time/time_sharedmemory.h
@@ -6,8 +6,8 @@
6 6
7#include "common/common_types.h" 7#include "common/common_types.h"
8#include "common/uuid.h" 8#include "common/uuid.h"
9#include "core/hle/kernel/k_thread.h"
9#include "core/hle/kernel/shared_memory.h" 10#include "core/hle/kernel/shared_memory.h"
10#include "core/hle/kernel/thread.h"
11#include "core/hle/service/time/clock_types.h" 11#include "core/hle/service/time/clock_types.h"
12 12
13namespace Service::Time { 13namespace Service::Time {
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index 968cd16b6..f3de2c428 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -18,8 +18,8 @@
18#include "common/swap.h" 18#include "common/swap.h"
19#include "core/core_timing.h" 19#include "core/core_timing.h"
20#include "core/hle/ipc_helpers.h" 20#include "core/hle/ipc_helpers.h"
21#include "core/hle/kernel/k_thread.h"
21#include "core/hle/kernel/readable_event.h" 22#include "core/hle/kernel/readable_event.h"
22#include "core/hle/kernel/thread.h"
23#include "core/hle/kernel/writable_event.h" 23#include "core/hle/kernel/writable_event.h"
24#include "core/hle/service/nvdrv/nvdata.h" 24#include "core/hle/service/nvdrv/nvdata.h"
25#include "core/hle/service/nvdrv/nvdrv.h" 25#include "core/hle/service/nvdrv/nvdrv.h"
diff --git a/src/core/loader/nro.cpp b/src/core/loader/nro.cpp
index ccf8cc153..f976d0a9c 100644
--- a/src/core/loader/nro.cpp
+++ b/src/core/loader/nro.cpp
@@ -15,9 +15,9 @@
15#include "core/file_sys/romfs_factory.h" 15#include "core/file_sys/romfs_factory.h"
16#include "core/file_sys/vfs_offset.h" 16#include "core/file_sys/vfs_offset.h"
17#include "core/hle/kernel/code_set.h" 17#include "core/hle/kernel/code_set.h"
18#include "core/hle/kernel/k_thread.h"
18#include "core/hle/kernel/memory/page_table.h" 19#include "core/hle/kernel/memory/page_table.h"
19#include "core/hle/kernel/process.h" 20#include "core/hle/kernel/process.h"
20#include "core/hle/kernel/thread.h"
21#include "core/hle/service/filesystem/filesystem.h" 21#include "core/hle/service/filesystem/filesystem.h"
22#include "core/loader/nro.h" 22#include "core/loader/nro.h"
23#include "core/loader/nso.h" 23#include "core/loader/nso.h"
@@ -219,8 +219,8 @@ AppLoader_NRO::LoadResult AppLoader_NRO::Load(Kernel::Process& process, Core::Sy
219 } 219 }
220 220
221 is_loaded = true; 221 is_loaded = true;
222 return {ResultStatus::Success, 222 return {ResultStatus::Success, LoadParameters{Kernel::KThread::DefaultThreadPriority,
223 LoadParameters{Kernel::THREADPRIO_DEFAULT, Core::Memory::DEFAULT_STACK_SIZE}}; 223 Core::Memory::DEFAULT_STACK_SIZE}};
224} 224}
225 225
226ResultStatus AppLoader_NRO::ReadIcon(std::vector<u8>& buffer) { 226ResultStatus AppLoader_NRO::ReadIcon(std::vector<u8>& buffer) {
diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp
index 95b6f339a..ea347ea83 100644
--- a/src/core/loader/nso.cpp
+++ b/src/core/loader/nso.cpp
@@ -15,9 +15,9 @@
15#include "core/core.h" 15#include "core/core.h"
16#include "core/file_sys/patch_manager.h" 16#include "core/file_sys/patch_manager.h"
17#include "core/hle/kernel/code_set.h" 17#include "core/hle/kernel/code_set.h"
18#include "core/hle/kernel/k_thread.h"
18#include "core/hle/kernel/memory/page_table.h" 19#include "core/hle/kernel/memory/page_table.h"
19#include "core/hle/kernel/process.h" 20#include "core/hle/kernel/process.h"
20#include "core/hle/kernel/thread.h"
21#include "core/loader/nso.h" 21#include "core/loader/nso.h"
22#include "core/memory.h" 22#include "core/memory.h"
23#include "core/settings.h" 23#include "core/settings.h"
@@ -179,8 +179,8 @@ AppLoader_NSO::LoadResult AppLoader_NSO::Load(Kernel::Process& process, Core::Sy
179 LOG_DEBUG(Loader, "loaded module {} @ 0x{:X}", file->GetName(), base_address); 179 LOG_DEBUG(Loader, "loaded module {} @ 0x{:X}", file->GetName(), base_address);
180 180
181 is_loaded = true; 181 is_loaded = true;
182 return {ResultStatus::Success, 182 return {ResultStatus::Success, LoadParameters{Kernel::KThread::DefaultThreadPriority,
183 LoadParameters{Kernel::THREADPRIO_DEFAULT, Core::Memory::DEFAULT_STACK_SIZE}}; 183 Core::Memory::DEFAULT_STACK_SIZE}};
184} 184}
185 185
186ResultStatus AppLoader_NSO::ReadNSOModules(Modules& modules) { 186ResultStatus AppLoader_NSO::ReadNSOModules(Modules& modules) {
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index f5cdf548e..c841f3cd7 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -4,6 +4,7 @@
4 4
5#include "common/alignment.h" 5#include "common/alignment.h"
6#include "common/assert.h" 6#include "common/assert.h"
7#include "common/logging/log.h"
7#include "core/core.h" 8#include "core/core.h"
8#include "core/hle/kernel/memory/page_table.h" 9#include "core/hle/kernel/memory/page_table.h"
9#include "core/hle/kernel/process.h" 10#include "core/hle/kernel/process.h"
@@ -38,6 +39,12 @@ GPUVAddr MemoryManager::UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std
38} 39}
39 40
40GPUVAddr MemoryManager::Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size) { 41GPUVAddr MemoryManager::Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size) {
42 const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first);
43 if (it != map_ranges.end() && it->first == gpu_addr) {
44 it->second = size;
45 } else {
46 map_ranges.insert(it, MapRange{gpu_addr, size});
47 }
41 return UpdateRange(gpu_addr, cpu_addr, size); 48 return UpdateRange(gpu_addr, cpu_addr, size);
42} 49}
43 50
@@ -52,10 +59,16 @@ GPUVAddr MemoryManager::MapAllocate32(VAddr cpu_addr, std::size_t size) {
52} 59}
53 60
54void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { 61void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
55 if (!size) { 62 if (size == 0) {
56 return; 63 return;
57 } 64 }
58 65 const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first);
66 if (it != map_ranges.end()) {
67 ASSERT(it->first == gpu_addr);
68 map_ranges.erase(it);
69 } else {
70 UNREACHABLE_MSG("Unmapping non-existent GPU address=0x{:x}", gpu_addr);
71 }
59 // Flush and invalidate through the GPU interface, to be asynchronous if possible. 72 // Flush and invalidate through the GPU interface, to be asynchronous if possible.
60 const std::optional<VAddr> cpu_addr = GpuToCpuAddress(gpu_addr); 73 const std::optional<VAddr> cpu_addr = GpuToCpuAddress(gpu_addr);
61 ASSERT(cpu_addr); 74 ASSERT(cpu_addr);
@@ -218,6 +231,12 @@ const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const {
218 return system.Memory().GetPointer(*address); 231 return system.Memory().GetPointer(*address);
219} 232}
220 233
234size_t MemoryManager::BytesToMapEnd(GPUVAddr gpu_addr) const noexcept {
235 auto it = std::ranges::upper_bound(map_ranges, gpu_addr, {}, &MapRange::first);
236 --it;
237 return it->second - (gpu_addr - it->first);
238}
239
221void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const { 240void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const {
222 std::size_t remaining_size{size}; 241 std::size_t remaining_size{size};
223 std::size_t page_index{gpu_src_addr >> page_bits}; 242 std::size_t page_index{gpu_src_addr >> page_bits};
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index a52fbbd8c..b468a67de 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -85,6 +85,9 @@ public:
85 [[nodiscard]] u8* GetPointer(GPUVAddr addr); 85 [[nodiscard]] u8* GetPointer(GPUVAddr addr);
86 [[nodiscard]] const u8* GetPointer(GPUVAddr addr) const; 86 [[nodiscard]] const u8* GetPointer(GPUVAddr addr) const;
87 87
88 /// Returns the number of bytes until the end of the memory map containing the given GPU address
89 [[nodiscard]] size_t BytesToMapEnd(GPUVAddr gpu_addr) const noexcept;
90
88 /** 91 /**
89 * ReadBlock and WriteBlock are full read and write operations over virtual 92 * ReadBlock and WriteBlock are full read and write operations over virtual
90 * GPU Memory. It's important to use these when GPU memory may not be continuous 93 * GPU Memory. It's important to use these when GPU memory may not be continuous
@@ -151,6 +154,9 @@ private:
151 VideoCore::RasterizerInterface* rasterizer = nullptr; 154 VideoCore::RasterizerInterface* rasterizer = nullptr;
152 155
153 std::vector<PageEntry> page_table; 156 std::vector<PageEntry> page_table;
157
158 using MapRange = std::pair<GPUVAddr, size_t>;
159 std::vector<MapRange> map_ranges;
154}; 160};
155 161
156} // namespace Tegra 162} // namespace Tegra
diff --git a/src/yuzu/configuration/configure_input.cpp b/src/yuzu/configuration/configure_input.cpp
index 567a36d9b..422022d02 100644
--- a/src/yuzu/configuration/configure_input.cpp
+++ b/src/yuzu/configuration/configure_input.cpp
@@ -190,12 +190,16 @@ void ConfigureInput::ApplyConfiguration() {
190 // This emulates a delay between disconnecting and reconnecting controllers as some games 190 // This emulates a delay between disconnecting and reconnecting controllers as some games
191 // do not respond to a change in controller type if it was instantaneous. 191 // do not respond to a change in controller type if it was instantaneous.
192 using namespace std::chrono_literals; 192 using namespace std::chrono_literals;
193 std::this_thread::sleep_for(60ms); 193 std::this_thread::sleep_for(150ms);
194 194
195 for (auto* controller : player_controllers) { 195 for (auto* controller : player_controllers) {
196 controller->TryConnectSelectedController(); 196 controller->TryConnectSelectedController();
197 } 197 }
198 198
199 // This emulates a delay between disconnecting and reconnecting controllers as some games
200 // do not respond to a change in controller type if it was instantaneous.
201 std::this_thread::sleep_for(150ms);
202
199 advanced->ApplyConfiguration(); 203 advanced->ApplyConfiguration();
200 204
201 const bool pre_docked_mode = Settings::values.use_docked_mode.GetValue(); 205 const bool pre_docked_mode = Settings::values.use_docked_mode.GetValue();
diff --git a/src/yuzu/configuration/configure_input_player.cpp b/src/yuzu/configuration/configure_input_player.cpp
index 13f0351d4..fbe36046b 100644
--- a/src/yuzu/configuration/configure_input_player.cpp
+++ b/src/yuzu/configuration/configure_input_player.cpp
@@ -579,11 +579,11 @@ void ConfigureInputPlayer::ApplyConfiguration() {
579 // Apply configuration for handheld 579 // Apply configuration for handheld
580 if (player_index == 0) { 580 if (player_index == 0) {
581 auto& handheld = Settings::values.players.GetValue()[HANDHELD_INDEX]; 581 auto& handheld = Settings::values.players.GetValue()[HANDHELD_INDEX];
582 const auto handheld_connected = handheld.connected;
582 if (player.controller_type == Settings::ControllerType::Handheld) { 583 if (player.controller_type == Settings::ControllerType::Handheld) {
583 handheld = player; 584 handheld = player;
584 } 585 }
585 handheld.connected = ui->groupConnectedController->isChecked() && 586 handheld.connected = handheld_connected;
586 player.controller_type == Settings::ControllerType::Handheld;
587 } 587 }
588} 588}
589 589
@@ -595,6 +595,18 @@ void ConfigureInputPlayer::TryConnectSelectedController() {
595 const auto player_connected = ui->groupConnectedController->isChecked() && 595 const auto player_connected = ui->groupConnectedController->isChecked() &&
596 controller_type != Settings::ControllerType::Handheld; 596 controller_type != Settings::ControllerType::Handheld;
597 597
598 // Connect Handheld depending on Player 1's controller configuration.
599 if (player_index == 0 && controller_type == Settings::ControllerType::Handheld) {
600 auto& handheld = Settings::values.players.GetValue()[HANDHELD_INDEX];
601 const auto handheld_connected = ui->groupConnectedController->isChecked() &&
602 controller_type == Settings::ControllerType::Handheld;
603 // Connect only if handheld is going from disconnected to connected
604 if (!handheld.connected && handheld_connected) {
605 UpdateController(controller_type, HANDHELD_INDEX, true);
606 }
607 handheld.connected = handheld_connected;
608 }
609
598 if (player.controller_type == controller_type && player.connected == player_connected) { 610 if (player.controller_type == controller_type && player.connected == player_connected) {
599 // Set vibration devices in the event that the input device has changed. 611 // Set vibration devices in the event that the input device has changed.
600 ConfigureVibration::SetVibrationDevices(player_index); 612 ConfigureVibration::SetVibrationDevices(player_index);
@@ -606,22 +618,11 @@ void ConfigureInputPlayer::TryConnectSelectedController() {
606 618
607 ConfigureVibration::SetVibrationDevices(player_index); 619 ConfigureVibration::SetVibrationDevices(player_index);
608 620
609 // Connect/Disconnect Handheld depending on Player 1's controller configuration.
610 if (player_index == 0) {
611 auto& handheld = Settings::values.players.GetValue()[HANDHELD_INDEX];
612 if (controller_type == Settings::ControllerType::Handheld) {
613 handheld = player;
614 }
615 handheld.connected = ui->groupConnectedController->isChecked() &&
616 controller_type == Settings::ControllerType::Handheld;
617 UpdateController(Settings::ControllerType::Handheld, HANDHELD_INDEX, handheld.connected);
618 }
619
620 if (!player.connected) { 621 if (!player.connected) {
621 return; 622 return;
622 } 623 }
623 624
624 UpdateController(controller_type, player_index, player_connected); 625 UpdateController(controller_type, player_index, true);
625} 626}
626 627
627void ConfigureInputPlayer::TryDisconnectSelectedController() { 628void ConfigureInputPlayer::TryDisconnectSelectedController() {
@@ -632,11 +633,28 @@ void ConfigureInputPlayer::TryDisconnectSelectedController() {
632 const auto player_connected = ui->groupConnectedController->isChecked() && 633 const auto player_connected = ui->groupConnectedController->isChecked() &&
633 controller_type != Settings::ControllerType::Handheld; 634 controller_type != Settings::ControllerType::Handheld;
634 635
636 // Disconnect Handheld depending on Player 1's controller configuration.
637 if (player_index == 0 && player.controller_type == Settings::ControllerType::Handheld) {
638 const auto& handheld = Settings::values.players.GetValue()[HANDHELD_INDEX];
639 const auto handheld_connected = ui->groupConnectedController->isChecked() &&
640 controller_type == Settings::ControllerType::Handheld;
641 // Disconnect only if handheld is going from connected to disconnected
642 if (handheld.connected && !handheld_connected) {
643 UpdateController(controller_type, HANDHELD_INDEX, false);
644 }
645 return;
646 }
647
635 // Do not do anything if the controller configuration has not changed. 648 // Do not do anything if the controller configuration has not changed.
636 if (player.controller_type == controller_type && player.connected == player_connected) { 649 if (player.controller_type == controller_type && player.connected == player_connected) {
637 return; 650 return;
638 } 651 }
639 652
653 // Do not disconnect if the controller is already disconnected
654 if (!player.connected) {
655 return;
656 }
657
640 // Disconnect the controller first. 658 // Disconnect the controller first.
641 UpdateController(controller_type, player_index, false); 659 UpdateController(controller_type, player_index, false);
642} 660}
diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp
index a93b5d3c2..0e5156dcc 100644
--- a/src/yuzu/debugger/wait_tree.cpp
+++ b/src/yuzu/debugger/wait_tree.cpp
@@ -15,10 +15,11 @@
15#include "core/hle/kernel/handle_table.h" 15#include "core/hle/kernel/handle_table.h"
16#include "core/hle/kernel/k_scheduler.h" 16#include "core/hle/kernel/k_scheduler.h"
17#include "core/hle/kernel/k_synchronization_object.h" 17#include "core/hle/kernel/k_synchronization_object.h"
18#include "core/hle/kernel/k_thread.h"
18#include "core/hle/kernel/process.h" 19#include "core/hle/kernel/process.h"
19#include "core/hle/kernel/readable_event.h" 20#include "core/hle/kernel/readable_event.h"
20#include "core/hle/kernel/svc_common.h" 21#include "core/hle/kernel/svc_common.h"
21#include "core/hle/kernel/thread.h" 22#include "core/hle/kernel/svc_types.h"
22#include "core/memory.h" 23#include "core/memory.h"
23 24
24namespace { 25namespace {
@@ -90,9 +91,9 @@ std::size_t WaitTreeItem::Row() const {
90std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList() { 91std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList() {
91 std::vector<std::unique_ptr<WaitTreeThread>> item_list; 92 std::vector<std::unique_ptr<WaitTreeThread>> item_list;
92 std::size_t row = 0; 93 std::size_t row = 0;
93 auto add_threads = [&](const std::vector<std::shared_ptr<Kernel::Thread>>& threads) { 94 auto add_threads = [&](const std::vector<std::shared_ptr<Kernel::KThread>>& threads) {
94 for (std::size_t i = 0; i < threads.size(); ++i) { 95 for (std::size_t i = 0; i < threads.size(); ++i) {
95 if (!threads[i]->IsHLEThread()) { 96 if (threads[i]->GetThreadTypeForDebugging() == Kernel::ThreadType::User) {
96 item_list.push_back(std::make_unique<WaitTreeThread>(*threads[i])); 97 item_list.push_back(std::make_unique<WaitTreeThread>(*threads[i]));
97 item_list.back()->row = row; 98 item_list.back()->row = row;
98 } 99 }
@@ -117,7 +118,7 @@ WaitTreeMutexInfo::WaitTreeMutexInfo(VAddr mutex_address, const Kernel::HandleTa
117 : mutex_address(mutex_address) { 118 : mutex_address(mutex_address) {
118 mutex_value = Core::System::GetInstance().Memory().Read32(mutex_address); 119 mutex_value = Core::System::GetInstance().Memory().Read32(mutex_address);
119 owner_handle = static_cast<Kernel::Handle>(mutex_value & Kernel::Svc::HandleWaitMask); 120 owner_handle = static_cast<Kernel::Handle>(mutex_value & Kernel::Svc::HandleWaitMask);
120 owner = handle_table.Get<Kernel::Thread>(owner_handle); 121 owner = handle_table.Get<Kernel::KThread>(owner_handle);
121} 122}
122 123
123WaitTreeMutexInfo::~WaitTreeMutexInfo() = default; 124WaitTreeMutexInfo::~WaitTreeMutexInfo() = default;
@@ -139,7 +140,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutexInfo::GetChildren() cons
139 return list; 140 return list;
140} 141}
141 142
142WaitTreeCallstack::WaitTreeCallstack(const Kernel::Thread& thread) : thread(thread) {} 143WaitTreeCallstack::WaitTreeCallstack(const Kernel::KThread& thread) : thread(thread) {}
143WaitTreeCallstack::~WaitTreeCallstack() = default; 144WaitTreeCallstack::~WaitTreeCallstack() = default;
144 145
145QString WaitTreeCallstack::GetText() const { 146QString WaitTreeCallstack::GetText() const {
@@ -149,7 +150,7 @@ QString WaitTreeCallstack::GetText() const {
149std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() const { 150std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() const {
150 std::vector<std::unique_ptr<WaitTreeItem>> list; 151 std::vector<std::unique_ptr<WaitTreeItem>> list;
151 152
152 if (thread.IsHLEThread()) { 153 if (thread.GetThreadTypeForDebugging() != Kernel::ThreadType::User) {
153 return list; 154 return list;
154 } 155 }
155 156
@@ -194,7 +195,7 @@ std::unique_ptr<WaitTreeSynchronizationObject> WaitTreeSynchronizationObject::ma
194 case Kernel::HandleType::ReadableEvent: 195 case Kernel::HandleType::ReadableEvent:
195 return std::make_unique<WaitTreeEvent>(static_cast<const Kernel::ReadableEvent&>(object)); 196 return std::make_unique<WaitTreeEvent>(static_cast<const Kernel::ReadableEvent&>(object));
196 case Kernel::HandleType::Thread: 197 case Kernel::HandleType::Thread:
197 return std::make_unique<WaitTreeThread>(static_cast<const Kernel::Thread&>(object)); 198 return std::make_unique<WaitTreeThread>(static_cast<const Kernel::KThread&>(object));
198 default: 199 default:
199 return std::make_unique<WaitTreeSynchronizationObject>(object); 200 return std::make_unique<WaitTreeSynchronizationObject>(object);
200 } 201 }
@@ -231,21 +232,17 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeObjectList::GetChildren() con
231 return list; 232 return list;
232} 233}
233 234
234WaitTreeThread::WaitTreeThread(const Kernel::Thread& thread) 235WaitTreeThread::WaitTreeThread(const Kernel::KThread& thread)
235 : WaitTreeSynchronizationObject(thread) {} 236 : WaitTreeSynchronizationObject(thread) {}
236WaitTreeThread::~WaitTreeThread() = default; 237WaitTreeThread::~WaitTreeThread() = default;
237 238
238QString WaitTreeThread::GetText() const { 239QString WaitTreeThread::GetText() const {
239 const auto& thread = static_cast<const Kernel::Thread&>(object); 240 const auto& thread = static_cast<const Kernel::KThread&>(object);
240 QString status; 241 QString status;
241 switch (thread.GetState()) { 242 switch (thread.GetState()) {
242 case Kernel::ThreadState::Runnable: 243 case Kernel::ThreadState::Runnable:
243 if (!thread.IsPaused()) { 244 if (!thread.IsSuspended()) {
244 if (thread.WasRunning()) { 245 status = tr("runnable");
245 status = tr("running");
246 } else {
247 status = tr("ready");
248 }
249 } else { 246 } else {
250 status = tr("paused"); 247 status = tr("paused");
251 } 248 }
@@ -297,15 +294,11 @@ QString WaitTreeThread::GetText() const {
297QColor WaitTreeThread::GetColor() const { 294QColor WaitTreeThread::GetColor() const {
298 const std::size_t color_index = IsDarkTheme() ? 1 : 0; 295 const std::size_t color_index = IsDarkTheme() ? 1 : 0;
299 296
300 const auto& thread = static_cast<const Kernel::Thread&>(object); 297 const auto& thread = static_cast<const Kernel::KThread&>(object);
301 switch (thread.GetState()) { 298 switch (thread.GetState()) {
302 case Kernel::ThreadState::Runnable: 299 case Kernel::ThreadState::Runnable:
303 if (!thread.IsPaused()) { 300 if (!thread.IsSuspended()) {
304 if (thread.WasRunning()) { 301 return QColor(WaitTreeColors[0][color_index]);
305 return QColor(WaitTreeColors[0][color_index]);
306 } else {
307 return QColor(WaitTreeColors[1][color_index]);
308 }
309 } else { 302 } else {
310 return QColor(WaitTreeColors[2][color_index]); 303 return QColor(WaitTreeColors[2][color_index]);
311 } 304 }
@@ -336,27 +329,21 @@ QColor WaitTreeThread::GetColor() const {
336std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const { 329std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
337 std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeSynchronizationObject::GetChildren()); 330 std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeSynchronizationObject::GetChildren());
338 331
339 const auto& thread = static_cast<const Kernel::Thread&>(object); 332 const auto& thread = static_cast<const Kernel::KThread&>(object);
340 333
341 QString processor; 334 QString processor;
342 switch (thread.GetProcessorID()) { 335 switch (thread.GetActiveCore()) {
343 case Kernel::ThreadProcessorId::THREADPROCESSORID_IDEAL: 336 case Kernel::Svc::IdealCoreUseProcessValue:
344 processor = tr("ideal"); 337 processor = tr("ideal");
345 break; 338 break;
346 case Kernel::ThreadProcessorId::THREADPROCESSORID_0:
347 case Kernel::ThreadProcessorId::THREADPROCESSORID_1:
348 case Kernel::ThreadProcessorId::THREADPROCESSORID_2:
349 case Kernel::ThreadProcessorId::THREADPROCESSORID_3:
350 processor = tr("core %1").arg(thread.GetProcessorID());
351 break;
352 default: 339 default:
353 processor = tr("Unknown processor %1").arg(thread.GetProcessorID()); 340 processor = tr("core %1").arg(thread.GetActiveCore());
354 break; 341 break;
355 } 342 }
356 343
357 list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor))); 344 list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor)));
358 list.push_back( 345 list.push_back(std::make_unique<WaitTreeText>(
359 std::make_unique<WaitTreeText>(tr("ideal core = %1").arg(thread.GetIdealCore()))); 346 tr("ideal core = %1").arg(thread.GetIdealCoreForDebugging())));
360 list.push_back(std::make_unique<WaitTreeText>( 347 list.push_back(std::make_unique<WaitTreeText>(
361 tr("affinity mask = %1").arg(thread.GetAffinityMask().GetAffinityMask()))); 348 tr("affinity mask = %1").arg(thread.GetAffinityMask().GetAffinityMask())));
362 list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID()))); 349 list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID())));
@@ -390,7 +377,7 @@ WaitTreeEvent::WaitTreeEvent(const Kernel::ReadableEvent& object)
390 : WaitTreeSynchronizationObject(object) {} 377 : WaitTreeSynchronizationObject(object) {}
391WaitTreeEvent::~WaitTreeEvent() = default; 378WaitTreeEvent::~WaitTreeEvent() = default;
392 379
393WaitTreeThreadList::WaitTreeThreadList(const std::vector<Kernel::Thread*>& list) 380WaitTreeThreadList::WaitTreeThreadList(const std::vector<Kernel::KThread*>& list)
394 : thread_list(list) {} 381 : thread_list(list) {}
395WaitTreeThreadList::~WaitTreeThreadList() = default; 382WaitTreeThreadList::~WaitTreeThreadList() = default;
396 383
diff --git a/src/yuzu/debugger/wait_tree.h b/src/yuzu/debugger/wait_tree.h
index cf96911ea..b202c5567 100644
--- a/src/yuzu/debugger/wait_tree.h
+++ b/src/yuzu/debugger/wait_tree.h
@@ -19,8 +19,8 @@ class EmuThread;
19namespace Kernel { 19namespace Kernel {
20class HandleTable; 20class HandleTable;
21class KSynchronizationObject; 21class KSynchronizationObject;
22class KThread;
22class ReadableEvent; 23class ReadableEvent;
23class Thread;
24} // namespace Kernel 24} // namespace Kernel
25 25
26class WaitTreeThread; 26class WaitTreeThread;
@@ -83,20 +83,20 @@ private:
83 VAddr mutex_address; 83 VAddr mutex_address;
84 u32 mutex_value; 84 u32 mutex_value;
85 Kernel::Handle owner_handle; 85 Kernel::Handle owner_handle;
86 std::shared_ptr<Kernel::Thread> owner; 86 std::shared_ptr<Kernel::KThread> owner;
87}; 87};
88 88
89class WaitTreeCallstack : public WaitTreeExpandableItem { 89class WaitTreeCallstack : public WaitTreeExpandableItem {
90 Q_OBJECT 90 Q_OBJECT
91public: 91public:
92 explicit WaitTreeCallstack(const Kernel::Thread& thread); 92 explicit WaitTreeCallstack(const Kernel::KThread& thread);
93 ~WaitTreeCallstack() override; 93 ~WaitTreeCallstack() override;
94 94
95 QString GetText() const override; 95 QString GetText() const override;
96 std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override; 96 std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
97 97
98private: 98private:
99 const Kernel::Thread& thread; 99 const Kernel::KThread& thread;
100}; 100};
101 101
102class WaitTreeSynchronizationObject : public WaitTreeExpandableItem { 102class WaitTreeSynchronizationObject : public WaitTreeExpandableItem {
@@ -131,7 +131,7 @@ private:
131class WaitTreeThread : public WaitTreeSynchronizationObject { 131class WaitTreeThread : public WaitTreeSynchronizationObject {
132 Q_OBJECT 132 Q_OBJECT
133public: 133public:
134 explicit WaitTreeThread(const Kernel::Thread& thread); 134 explicit WaitTreeThread(const Kernel::KThread& thread);
135 ~WaitTreeThread() override; 135 ~WaitTreeThread() override;
136 136
137 QString GetText() const override; 137 QString GetText() const override;
@@ -149,14 +149,14 @@ public:
149class WaitTreeThreadList : public WaitTreeExpandableItem { 149class WaitTreeThreadList : public WaitTreeExpandableItem {
150 Q_OBJECT 150 Q_OBJECT
151public: 151public:
152 explicit WaitTreeThreadList(const std::vector<Kernel::Thread*>& list); 152 explicit WaitTreeThreadList(const std::vector<Kernel::KThread*>& list);
153 ~WaitTreeThreadList() override; 153 ~WaitTreeThreadList() override;
154 154
155 QString GetText() const override; 155 QString GetText() const override;
156 std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override; 156 std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
157 157
158private: 158private:
159 const std::vector<Kernel::Thread*>& thread_list; 159 const std::vector<Kernel::KThread*>& thread_list;
160}; 160};
161 161
162class WaitTreeModel : public QAbstractItemModel { 162class WaitTreeModel : public QAbstractItemModel {
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index e76141125..886e6e9d2 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -1039,8 +1039,6 @@ bool GMainWindow::LoadROM(const QString& filename, std::size_t program_index) {
1039 std::make_unique<QtWebBrowser>(*this), // Web Browser 1039 std::make_unique<QtWebBrowser>(*this), // Web Browser
1040 }); 1040 });
1041 1041
1042 system.RegisterHostThread();
1043
1044 const Core::System::ResultStatus result{ 1042 const Core::System::ResultStatus result{
1045 system.Load(*render_window, filename.toStdString(), program_index)}; 1043 system.Load(*render_window, filename.toStdString(), program_index)};
1046 1044