summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt2
m---------externals/cubeb0
-rw-r--r--src/core/CMakeLists.txt2
-rw-r--r--src/core/core.cpp6
-rw-r--r--src/core/core.h3
-rw-r--r--src/core/cpu_manager.cpp23
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp92
-rw-r--r--src/core/hle/kernel/k_auto_object.h4
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp245
-rw-r--r--src/core/hle/kernel/k_condition_variable.h2
-rw-r--r--src/core/hle/kernel/k_handle_table.cpp6
-rw-r--r--src/core/hle/kernel/k_handle_table.h2
-rw-r--r--src/core/hle/kernel/k_light_condition_variable.cpp80
-rw-r--r--src/core/hle/kernel/k_light_condition_variable.h58
-rw-r--r--src/core/hle/kernel/k_light_lock.cpp72
-rw-r--r--src/core/hle/kernel/k_light_lock.h2
-rw-r--r--src/core/hle/kernel/k_process.cpp28
-rw-r--r--src/core/hle/kernel/k_process.h1
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp101
-rw-r--r--src/core/hle/kernel/k_scheduler.h2
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h10
-rw-r--r--src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h1
-rw-r--r--src/core/hle/kernel/k_server_session.cpp3
-rw-r--r--src/core/hle/kernel/k_synchronization_object.cpp151
-rw-r--r--src/core/hle/kernel/k_synchronization_object.h32
-rw-r--r--src/core/hle/kernel/k_thread.cpp246
-rw-r--r--src/core/hle/kernel/k_thread.h72
-rw-r--r--src/core/hle/kernel/k_thread_queue.cpp51
-rw-r--r--src/core/hle/kernel/k_thread_queue.h74
-rw-r--r--src/core/hle/kernel/kernel.cpp81
-rw-r--r--src/core/hle/kernel/kernel.h5
-rw-r--r--src/core/hle/kernel/service_thread.cpp33
-rw-r--r--src/core/hle/kernel/svc.cpp35
-rw-r--r--src/core/hle/kernel/time_manager.cpp6
34 files changed, 896 insertions, 635 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index c834e9396..a810e11c2 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -131,7 +131,7 @@ add_definitions(-DBOOST_ASIO_DISABLE_CONCEPTS)
131if (MSVC) 131if (MSVC)
132 add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/std:c++latest>) 132 add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/std:c++latest>)
133 133
134 # cubeb and boost still make use of deprecated result_of. 134 # boost still makes use of deprecated result_of.
135 add_definitions(-D_HAS_DEPRECATED_RESULT_OF) 135 add_definitions(-D_HAS_DEPRECATED_RESULT_OF)
136else() 136else()
137 set(CMAKE_CXX_STANDARD 20) 137 set(CMAKE_CXX_STANDARD 20)
diff --git a/externals/cubeb b/externals/cubeb
Subproject 1d66483ad2b93f0e00e175f9480c771af90003a Subproject 75d9d125ee655ef80f3bfcd97ae5a805931042b
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 506885659..49bed614a 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -187,6 +187,7 @@ add_library(core STATIC
187 hle/kernel/k_event.h 187 hle/kernel/k_event.h
188 hle/kernel/k_handle_table.cpp 188 hle/kernel/k_handle_table.cpp
189 hle/kernel/k_handle_table.h 189 hle/kernel/k_handle_table.h
190 hle/kernel/k_light_condition_variable.cpp
190 hle/kernel/k_light_condition_variable.h 191 hle/kernel/k_light_condition_variable.h
191 hle/kernel/k_light_lock.cpp 192 hle/kernel/k_light_lock.cpp
192 hle/kernel/k_light_lock.h 193 hle/kernel/k_light_lock.h
@@ -239,6 +240,7 @@ add_library(core STATIC
239 hle/kernel/k_system_control.h 240 hle/kernel/k_system_control.h
240 hle/kernel/k_thread.cpp 241 hle/kernel/k_thread.cpp
241 hle/kernel/k_thread.h 242 hle/kernel/k_thread.h
243 hle/kernel/k_thread_queue.cpp
242 hle/kernel/k_thread_queue.h 244 hle/kernel/k_thread_queue.h
243 hle/kernel/k_trace.h 245 hle/kernel/k_trace.h
244 hle/kernel/k_transfer_memory.cpp 246 hle/kernel/k_transfer_memory.cpp
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 473ab9f81..aa96f709b 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -521,12 +521,6 @@ const ARM_Interface& System::CurrentArmInterface() const {
521 return impl->kernel.CurrentPhysicalCore().ArmInterface(); 521 return impl->kernel.CurrentPhysicalCore().ArmInterface();
522} 522}
523 523
524std::size_t System::CurrentCoreIndex() const {
525 std::size_t core = impl->kernel.GetCurrentHostThreadID();
526 ASSERT(core < Core::Hardware::NUM_CPU_CORES);
527 return core;
528}
529
530Kernel::PhysicalCore& System::CurrentPhysicalCore() { 524Kernel::PhysicalCore& System::CurrentPhysicalCore() {
531 return impl->kernel.CurrentPhysicalCore(); 525 return impl->kernel.CurrentPhysicalCore();
532} 526}
diff --git a/src/core/core.h b/src/core/core.h
index 645e5c241..52ff90359 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -208,9 +208,6 @@ public:
208 /// Gets an ARM interface to the CPU core that is currently running 208 /// Gets an ARM interface to the CPU core that is currently running
209 [[nodiscard]] const ARM_Interface& CurrentArmInterface() const; 209 [[nodiscard]] const ARM_Interface& CurrentArmInterface() const;
210 210
211 /// Gets the index of the currently running CPU core
212 [[nodiscard]] std::size_t CurrentCoreIndex() const;
213
214 /// Gets the physical core for the CPU core that is currently running 211 /// Gets the physical core for the CPU core that is currently running
215 [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore(); 212 [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore();
216 213
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp
index 5d43c6e5d..cbcc54891 100644
--- a/src/core/cpu_manager.cpp
+++ b/src/core/cpu_manager.cpp
@@ -117,17 +117,18 @@ void CpuManager::MultiCoreRunGuestLoop() {
117 physical_core = &kernel.CurrentPhysicalCore(); 117 physical_core = &kernel.CurrentPhysicalCore();
118 } 118 }
119 system.ExitDynarmicProfile(); 119 system.ExitDynarmicProfile();
120 physical_core->ArmInterface().ClearExclusiveState(); 120 {
121 kernel.CurrentScheduler()->RescheduleCurrentCore(); 121 Kernel::KScopedDisableDispatch dd(kernel);
122 physical_core->ArmInterface().ClearExclusiveState();
123 }
122 } 124 }
123} 125}
124 126
125void CpuManager::MultiCoreRunIdleThread() { 127void CpuManager::MultiCoreRunIdleThread() {
126 auto& kernel = system.Kernel(); 128 auto& kernel = system.Kernel();
127 while (true) { 129 while (true) {
128 auto& physical_core = kernel.CurrentPhysicalCore(); 130 Kernel::KScopedDisableDispatch dd(kernel);
129 physical_core.Idle(); 131 kernel.CurrentPhysicalCore().Idle();
130 kernel.CurrentScheduler()->RescheduleCurrentCore();
131 } 132 }
132} 133}
133 134
@@ -135,12 +136,12 @@ void CpuManager::MultiCoreRunSuspendThread() {
135 auto& kernel = system.Kernel(); 136 auto& kernel = system.Kernel();
136 kernel.CurrentScheduler()->OnThreadStart(); 137 kernel.CurrentScheduler()->OnThreadStart();
137 while (true) { 138 while (true) {
138 auto core = kernel.GetCurrentHostThreadID(); 139 auto core = kernel.CurrentPhysicalCoreIndex();
139 auto& scheduler = *kernel.CurrentScheduler(); 140 auto& scheduler = *kernel.CurrentScheduler();
140 Kernel::KThread* current_thread = scheduler.GetCurrentThread(); 141 Kernel::KThread* current_thread = scheduler.GetCurrentThread();
141 Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context); 142 Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
142 ASSERT(scheduler.ContextSwitchPending()); 143 ASSERT(scheduler.ContextSwitchPending());
143 ASSERT(core == kernel.GetCurrentHostThreadID()); 144 ASSERT(core == kernel.CurrentPhysicalCoreIndex());
144 scheduler.RescheduleCurrentCore(); 145 scheduler.RescheduleCurrentCore();
145 } 146 }
146} 147}
@@ -346,13 +347,9 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
346 sc_sync_first_use = false; 347 sc_sync_first_use = false;
347 } 348 }
348 349
349 // Abort if emulation was killed before the session really starts 350 // Emulation was stopped
350 if (!system.IsPoweredOn()) {
351 return;
352 }
353
354 if (stop_token.stop_requested()) { 351 if (stop_token.stop_requested()) {
355 break; 352 return;
356 } 353 }
357 354
358 auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread(); 355 auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
index 1b429bc1e..783c69858 100644
--- a/src/core/hle/kernel/k_address_arbiter.cpp
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -8,6 +8,7 @@
8#include "core/hle/kernel/k_scheduler.h" 8#include "core/hle/kernel/k_scheduler.h"
9#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 9#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
10#include "core/hle/kernel/k_thread.h" 10#include "core/hle/kernel/k_thread.h"
11#include "core/hle/kernel/k_thread_queue.h"
11#include "core/hle/kernel/kernel.h" 12#include "core/hle/kernel/kernel.h"
12#include "core/hle/kernel/svc_results.h" 13#include "core/hle/kernel/svc_results.h"
13#include "core/hle/kernel/time_manager.h" 14#include "core/hle/kernel/time_manager.h"
@@ -28,7 +29,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
28 29
29bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { 30bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
30 auto& monitor = system.Monitor(); 31 auto& monitor = system.Monitor();
31 const auto current_core = system.CurrentCoreIndex(); 32 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
32 33
33 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. 34 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
34 // TODO(bunnei): We should call CanAccessAtomic(..) here. 35 // TODO(bunnei): We should call CanAccessAtomic(..) here.
@@ -58,7 +59,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
58 59
59bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { 60bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
60 auto& monitor = system.Monitor(); 61 auto& monitor = system.Monitor();
61 const auto current_core = system.CurrentCoreIndex(); 62 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
62 63
63 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. 64 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
64 // TODO(bunnei): We should call CanAccessAtomic(..) here. 65 // TODO(bunnei): We should call CanAccessAtomic(..) here.
@@ -85,6 +86,27 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32
85 return true; 86 return true;
86} 87}
87 88
89class ThreadQueueImplForKAddressArbiter final : public KThreadQueue {
90public:
91 explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t)
92 : KThreadQueue(kernel_), m_tree(t) {}
93
94 void CancelWait(KThread* waiting_thread, ResultCode wait_result,
95 bool cancel_timer_task) override {
96 // If the thread is waiting on an address arbiter, remove it from the tree.
97 if (waiting_thread->IsWaitingForAddressArbiter()) {
98 m_tree->erase(m_tree->iterator_to(*waiting_thread));
99 waiting_thread->ClearAddressArbiter();
100 }
101
102 // Invoke the base cancel wait handler.
103 KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
104 }
105
106private:
107 KAddressArbiter::ThreadTree* m_tree;
108};
109
88} // namespace 110} // namespace
89 111
90ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) { 112ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
@@ -96,14 +118,14 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
96 auto it = thread_tree.nfind_light({addr, -1}); 118 auto it = thread_tree.nfind_light({addr, -1});
97 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && 119 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
98 (it->GetAddressArbiterKey() == addr)) { 120 (it->GetAddressArbiterKey() == addr)) {
121 // End the thread's wait.
99 KThread* target_thread = std::addressof(*it); 122 KThread* target_thread = std::addressof(*it);
100 target_thread->SetSyncedObject(nullptr, ResultSuccess); 123 target_thread->EndWait(ResultSuccess);
101 124
102 ASSERT(target_thread->IsWaitingForAddressArbiter()); 125 ASSERT(target_thread->IsWaitingForAddressArbiter());
103 target_thread->Wakeup(); 126 target_thread->ClearAddressArbiter();
104 127
105 it = thread_tree.erase(it); 128 it = thread_tree.erase(it);
106 target_thread->ClearAddressArbiter();
107 ++num_waiters; 129 ++num_waiters;
108 } 130 }
109 } 131 }
@@ -129,14 +151,14 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32
129 auto it = thread_tree.nfind_light({addr, -1}); 151 auto it = thread_tree.nfind_light({addr, -1});
130 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && 152 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
131 (it->GetAddressArbiterKey() == addr)) { 153 (it->GetAddressArbiterKey() == addr)) {
154 // End the thread's wait.
132 KThread* target_thread = std::addressof(*it); 155 KThread* target_thread = std::addressof(*it);
133 target_thread->SetSyncedObject(nullptr, ResultSuccess); 156 target_thread->EndWait(ResultSuccess);
134 157
135 ASSERT(target_thread->IsWaitingForAddressArbiter()); 158 ASSERT(target_thread->IsWaitingForAddressArbiter());
136 target_thread->Wakeup(); 159 target_thread->ClearAddressArbiter();
137 160
138 it = thread_tree.erase(it); 161 it = thread_tree.erase(it);
139 target_thread->ClearAddressArbiter();
140 ++num_waiters; 162 ++num_waiters;
141 } 163 }
142 } 164 }
@@ -197,14 +219,14 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
197 219
198 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && 220 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
199 (it->GetAddressArbiterKey() == addr)) { 221 (it->GetAddressArbiterKey() == addr)) {
222 // End the thread's wait.
200 KThread* target_thread = std::addressof(*it); 223 KThread* target_thread = std::addressof(*it);
201 target_thread->SetSyncedObject(nullptr, ResultSuccess); 224 target_thread->EndWait(ResultSuccess);
202 225
203 ASSERT(target_thread->IsWaitingForAddressArbiter()); 226 ASSERT(target_thread->IsWaitingForAddressArbiter());
204 target_thread->Wakeup(); 227 target_thread->ClearAddressArbiter();
205 228
206 it = thread_tree.erase(it); 229 it = thread_tree.erase(it);
207 target_thread->ClearAddressArbiter();
208 ++num_waiters; 230 ++num_waiters;
209 } 231 }
210 } 232 }
@@ -214,6 +236,7 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
214ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) { 236ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
215 // Prepare to wait. 237 // Prepare to wait.
216 KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); 238 KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
239 ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
217 240
218 { 241 {
219 KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; 242 KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
@@ -224,9 +247,6 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
224 return ResultTerminationRequested; 247 return ResultTerminationRequested;
225 } 248 }
226 249
227 // Set the synced object.
228 cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
229
230 // Read the value from userspace. 250 // Read the value from userspace.
231 s32 user_value{}; 251 s32 user_value{};
232 bool succeeded{}; 252 bool succeeded{};
@@ -256,31 +276,20 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
256 // Set the arbiter. 276 // Set the arbiter.
257 cur_thread->SetAddressArbiter(&thread_tree, addr); 277 cur_thread->SetAddressArbiter(&thread_tree, addr);
258 thread_tree.insert(*cur_thread); 278 thread_tree.insert(*cur_thread);
259 cur_thread->SetState(ThreadState::Waiting);
260 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
261 }
262
263 // Cancel the timer wait.
264 kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
265 279
266 // Remove from the address arbiter. 280 // Wait for the thread to finish.
267 { 281 cur_thread->BeginWait(std::addressof(wait_queue));
268 KScopedSchedulerLock sl(kernel); 282 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
269
270 if (cur_thread->IsWaitingForAddressArbiter()) {
271 thread_tree.erase(thread_tree.iterator_to(*cur_thread));
272 cur_thread->ClearAddressArbiter();
273 }
274 } 283 }
275 284
276 // Get the result. 285 // Get the result.
277 KSynchronizationObject* dummy{}; 286 return cur_thread->GetWaitResult();
278 return cur_thread->GetWaitResult(&dummy);
279} 287}
280 288
281ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { 289ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
282 // Prepare to wait. 290 // Prepare to wait.
283 KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); 291 KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
292 ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
284 293
285 { 294 {
286 KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; 295 KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
@@ -291,9 +300,6 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
291 return ResultTerminationRequested; 300 return ResultTerminationRequested;
292 } 301 }
293 302
294 // Set the synced object.
295 cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
296
297 // Read the value from userspace. 303 // Read the value from userspace.
298 s32 user_value{}; 304 s32 user_value{};
299 if (!ReadFromUser(system, &user_value, addr)) { 305 if (!ReadFromUser(system, &user_value, addr)) {
@@ -316,26 +322,14 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
316 // Set the arbiter. 322 // Set the arbiter.
317 cur_thread->SetAddressArbiter(&thread_tree, addr); 323 cur_thread->SetAddressArbiter(&thread_tree, addr);
318 thread_tree.insert(*cur_thread); 324 thread_tree.insert(*cur_thread);
319 cur_thread->SetState(ThreadState::Waiting);
320 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
321 }
322
323 // Cancel the timer wait.
324 kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
325 325
326 // Remove from the address arbiter. 326 // Wait for the thread to finish.
327 { 327 cur_thread->BeginWait(std::addressof(wait_queue));
328 KScopedSchedulerLock sl(kernel); 328 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
329
330 if (cur_thread->IsWaitingForAddressArbiter()) {
331 thread_tree.erase(thread_tree.iterator_to(*cur_thread));
332 cur_thread->ClearAddressArbiter();
333 }
334 } 329 }
335 330
336 // Get the result. 331 // Get the result.
337 KSynchronizationObject* dummy{}; 332 return cur_thread->GetWaitResult();
338 return cur_thread->GetWaitResult(&dummy);
339} 333}
340 334
341} // namespace Kernel 335} // namespace Kernel
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h
index e4fcdbc67..165b76747 100644
--- a/src/core/hle/kernel/k_auto_object.h
+++ b/src/core/hle/kernel/k_auto_object.h
@@ -170,6 +170,10 @@ public:
170 } 170 }
171 } 171 }
172 172
173 const std::string& GetName() const {
174 return name;
175 }
176
173private: 177private:
174 void RegisterWithKernel(); 178 void RegisterWithKernel();
175 void UnregisterWithKernel(); 179 void UnregisterWithKernel();
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index 7fa9b8cc3..aadcc297a 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -11,6 +11,7 @@
11#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 11#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
12#include "core/hle/kernel/k_synchronization_object.h" 12#include "core/hle/kernel/k_synchronization_object.h"
13#include "core/hle/kernel/k_thread.h" 13#include "core/hle/kernel/k_thread.h"
14#include "core/hle/kernel/k_thread_queue.h"
14#include "core/hle/kernel/kernel.h" 15#include "core/hle/kernel/kernel.h"
15#include "core/hle/kernel/svc_common.h" 16#include "core/hle/kernel/svc_common.h"
16#include "core/hle/kernel/svc_results.h" 17#include "core/hle/kernel/svc_results.h"
@@ -33,7 +34,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
33bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, 34bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
34 u32 new_orr_mask) { 35 u32 new_orr_mask) {
35 auto& monitor = system.Monitor(); 36 auto& monitor = system.Monitor();
36 const auto current_core = system.CurrentCoreIndex(); 37 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
37 38
38 // Load the value from the address. 39 // Load the value from the address.
39 const auto expected = monitor.ExclusiveRead32(current_core, address); 40 const auto expected = monitor.ExclusiveRead32(current_core, address);
@@ -57,6 +58,48 @@ bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero
57 return true; 58 return true;
58} 59}
59 60
61class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue {
62public:
63 explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_)
64 : KThreadQueue(kernel_) {}
65
66 void CancelWait(KThread* waiting_thread, ResultCode wait_result,
67 bool cancel_timer_task) override {
68 // Remove the thread as a waiter from its owner.
69 waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
70
71 // Invoke the base cancel wait handler.
72 KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
73 }
74};
75
76class ThreadQueueImplForKConditionVariableWaitConditionVariable final : public KThreadQueue {
77private:
78 KConditionVariable::ThreadTree* m_tree;
79
80public:
81 explicit ThreadQueueImplForKConditionVariableWaitConditionVariable(
82 KernelCore& kernel_, KConditionVariable::ThreadTree* t)
83 : KThreadQueue(kernel_), m_tree(t) {}
84
85 void CancelWait(KThread* waiting_thread, ResultCode wait_result,
86 bool cancel_timer_task) override {
87 // Remove the thread as a waiter from its owner.
88 if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
89 owner->RemoveWaiter(waiting_thread);
90 }
91
92 // If the thread is waiting on a condvar, remove it from the tree.
93 if (waiting_thread->IsWaitingForConditionVariable()) {
94 m_tree->erase(m_tree->iterator_to(*waiting_thread));
95 waiting_thread->ClearConditionVariable();
96 }
97
98 // Invoke the base cancel wait handler.
99 KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
100 }
101};
102
60} // namespace 103} // namespace
61 104
62KConditionVariable::KConditionVariable(Core::System& system_) 105KConditionVariable::KConditionVariable(Core::System& system_)
@@ -78,84 +121,77 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
78 121
79 // Determine the next tag. 122 // Determine the next tag.
80 u32 next_value{}; 123 u32 next_value{};
81 if (next_owner_thread) { 124 if (next_owner_thread != nullptr) {
82 next_value = next_owner_thread->GetAddressKeyValue(); 125 next_value = next_owner_thread->GetAddressKeyValue();
83 if (num_waiters > 1) { 126 if (num_waiters > 1) {
84 next_value |= Svc::HandleWaitMask; 127 next_value |= Svc::HandleWaitMask;
85 } 128 }
86 129
87 next_owner_thread->SetSyncedObject(nullptr, ResultSuccess); 130 // Write the value to userspace.
88 next_owner_thread->Wakeup(); 131 ResultCode result{ResultSuccess};
89 } 132 if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
90 133 result = ResultSuccess;
91 // Write the value to userspace. 134 } else {
92 if (!WriteToUser(system, addr, std::addressof(next_value))) { 135 result = ResultInvalidCurrentMemory;
93 if (next_owner_thread) {
94 next_owner_thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory);
95 } 136 }
96 137
97 return ResultInvalidCurrentMemory; 138 // Signal the next owner thread.
139 next_owner_thread->EndWait(result);
140 return result;
141 } else {
142 // Just write the value to userspace.
143 R_UNLESS(WriteToUser(system, addr, std::addressof(next_value)),
144 ResultInvalidCurrentMemory);
145
146 return ResultSuccess;
98 } 147 }
99 } 148 }
100
101 return ResultSuccess;
102} 149}
103 150
104ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) { 151ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
105 KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); 152 KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
153 ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
106 154
107 // Wait for the address. 155 // Wait for the address.
156 KThread* owner_thread{};
108 { 157 {
109 KScopedAutoObject<KThread> owner_thread; 158 KScopedSchedulerLock sl(kernel);
110 ASSERT(owner_thread.IsNull());
111 {
112 KScopedSchedulerLock sl(kernel);
113 cur_thread->SetSyncedObject(nullptr, ResultSuccess);
114 159
115 // Check if the thread should terminate. 160 // Check if the thread should terminate.
116 R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested); 161 R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
117 162
118 { 163 // Read the tag from userspace.
119 // Read the tag from userspace. 164 u32 test_tag{};
120 u32 test_tag{}; 165 R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), ResultInvalidCurrentMemory);
121 R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr),
122 ResultInvalidCurrentMemory);
123
124 // If the tag isn't the handle (with wait mask), we're done.
125 R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), ResultSuccess);
126
127 // Get the lock owner thread.
128 owner_thread =
129 kernel.CurrentProcess()->GetHandleTable().GetObjectWithoutPseudoHandle<KThread>(
130 handle);
131 R_UNLESS(owner_thread.IsNotNull(), ResultInvalidHandle);
132
133 // Update the lock.
134 cur_thread->SetAddressKey(addr, value);
135 owner_thread->AddWaiter(cur_thread);
136 cur_thread->SetState(ThreadState::Waiting);
137 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
138 cur_thread->SetMutexWaitAddressForDebugging(addr);
139 }
140 }
141 ASSERT(owner_thread.IsNotNull());
142 }
143 166
144 // Remove the thread as a waiter from the lock owner. 167 // If the tag isn't the handle (with wait mask), we're done.
145 { 168 R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
146 KScopedSchedulerLock sl(kernel); 169
147 KThread* owner_thread = cur_thread->GetLockOwner(); 170 // Get the lock owner thread.
148 if (owner_thread != nullptr) { 171 owner_thread = kernel.CurrentProcess()
149 owner_thread->RemoveWaiter(cur_thread); 172 ->GetHandleTable()
150 } 173 .GetObjectWithoutPseudoHandle<KThread>(handle)
174 .ReleasePointerUnsafe();
175 R_UNLESS(owner_thread != nullptr, ResultInvalidHandle);
176
177 // Update the lock.
178 cur_thread->SetAddressKey(addr, value);
179 owner_thread->AddWaiter(cur_thread);
180
181 // Begin waiting.
182 cur_thread->BeginWait(std::addressof(wait_queue));
183 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
184 cur_thread->SetMutexWaitAddressForDebugging(addr);
151 } 185 }
152 186
187 // Close our reference to the owner thread, now that the wait is over.
188 owner_thread->Close();
189
153 // Get the wait result. 190 // Get the wait result.
154 KSynchronizationObject* dummy{}; 191 return cur_thread->GetWaitResult();
155 return cur_thread->GetWaitResult(std::addressof(dummy));
156} 192}
157 193
158KThread* KConditionVariable::SignalImpl(KThread* thread) { 194void KConditionVariable::SignalImpl(KThread* thread) {
159 // Check pre-conditions. 195 // Check pre-conditions.
160 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 196 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
161 197
@@ -169,18 +205,16 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) {
169 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. 205 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
170 // TODO(bunnei): We should call CanAccessAtomic(..) here. 206 // TODO(bunnei): We should call CanAccessAtomic(..) here.
171 can_access = true; 207 can_access = true;
172 if (can_access) { 208 if (can_access) [[likely]] {
173 UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag, 209 UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
174 Svc::HandleWaitMask); 210 Svc::HandleWaitMask);
175 } 211 }
176 } 212 }
177 213
178 KThread* thread_to_close = nullptr; 214 if (can_access) [[likely]] {
179 if (can_access) {
180 if (prev_tag == Svc::InvalidHandle) { 215 if (prev_tag == Svc::InvalidHandle) {
181 // If nobody held the lock previously, we're all good. 216 // If nobody held the lock previously, we're all good.
182 thread->SetSyncedObject(nullptr, ResultSuccess); 217 thread->EndWait(ResultSuccess);
183 thread->Wakeup();
184 } else { 218 } else {
185 // Get the previous owner. 219 // Get the previous owner.
186 KThread* owner_thread = kernel.CurrentProcess() 220 KThread* owner_thread = kernel.CurrentProcess()
@@ -189,33 +223,22 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) {
189 static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask)) 223 static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask))
190 .ReleasePointerUnsafe(); 224 .ReleasePointerUnsafe();
191 225
192 if (owner_thread) { 226 if (owner_thread) [[likely]] {
193 // Add the thread as a waiter on the owner. 227 // Add the thread as a waiter on the owner.
194 owner_thread->AddWaiter(thread); 228 owner_thread->AddWaiter(thread);
195 thread_to_close = owner_thread; 229 owner_thread->Close();
196 } else { 230 } else {
197 // The lock was tagged with a thread that doesn't exist. 231 // The lock was tagged with a thread that doesn't exist.
198 thread->SetSyncedObject(nullptr, ResultInvalidState); 232 thread->EndWait(ResultInvalidState);
199 thread->Wakeup();
200 } 233 }
201 } 234 }
202 } else { 235 } else {
203 // If the address wasn't accessible, note so. 236 // If the address wasn't accessible, note so.
204 thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory); 237 thread->EndWait(ResultInvalidCurrentMemory);
205 thread->Wakeup();
206 } 238 }
207
208 return thread_to_close;
209} 239}
210 240
211void KConditionVariable::Signal(u64 cv_key, s32 count) { 241void KConditionVariable::Signal(u64 cv_key, s32 count) {
212 // Prepare for signaling.
213 constexpr int MaxThreads = 16;
214
215 KLinkedList<KThread> thread_list{kernel};
216 std::array<KThread*, MaxThreads> thread_array;
217 s32 num_to_close{};
218
219 // Perform signaling. 242 // Perform signaling.
220 s32 num_waiters{}; 243 s32 num_waiters{};
221 { 244 {
@@ -226,14 +249,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
226 (it->GetConditionVariableKey() == cv_key)) { 249 (it->GetConditionVariableKey() == cv_key)) {
227 KThread* target_thread = std::addressof(*it); 250 KThread* target_thread = std::addressof(*it);
228 251
229 if (KThread* thread = SignalImpl(target_thread); thread != nullptr) { 252 this->SignalImpl(target_thread);
230 if (num_to_close < MaxThreads) {
231 thread_array[num_to_close++] = thread;
232 } else {
233 thread_list.push_back(*thread);
234 }
235 }
236
237 it = thread_tree.erase(it); 253 it = thread_tree.erase(it);
238 target_thread->ClearConditionVariable(); 254 target_thread->ClearConditionVariable();
239 ++num_waiters; 255 ++num_waiters;
@@ -245,27 +261,16 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
245 WriteToUser(system, cv_key, std::addressof(has_waiter_flag)); 261 WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
246 } 262 }
247 } 263 }
248
249 // Close threads in the array.
250 for (auto i = 0; i < num_to_close; ++i) {
251 thread_array[i]->Close();
252 }
253
254 // Close threads in the list.
255 for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) {
256 (*it).Close();
257 }
258} 264}
259 265
260ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { 266ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
261 // Prepare to wait. 267 // Prepare to wait.
262 KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); 268 KThread* cur_thread = GetCurrentThreadPointer(kernel);
269 ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(
270 kernel, std::addressof(thread_tree));
263 271
264 { 272 {
265 KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; 273 KScopedSchedulerLockAndSleep slp(kernel, cur_thread, timeout);
266
267 // Set the synced object.
268 cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
269 274
270 // Check that the thread isn't terminating. 275 // Check that the thread isn't terminating.
271 if (cur_thread->IsTerminationRequested()) { 276 if (cur_thread->IsTerminationRequested()) {
@@ -290,8 +295,7 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
290 } 295 }
291 296
292 // Wake up the next owner. 297 // Wake up the next owner.
293 next_owner_thread->SetSyncedObject(nullptr, ResultSuccess); 298 next_owner_thread->EndWait(ResultSuccess);
294 next_owner_thread->Wakeup();
295 } 299 }
296 300
297 // Write to the cv key. 301 // Write to the cv key.
@@ -308,40 +312,21 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
308 } 312 }
309 } 313 }
310 314
311 // Update condition variable tracking. 315 // If timeout is zero, time out.
312 { 316 R_UNLESS(timeout != 0, ResultTimedOut);
313 cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
314 thread_tree.insert(*cur_thread);
315 }
316 317
317 // If the timeout is non-zero, set the thread as waiting. 318 // Update condition variable tracking.
318 if (timeout != 0) { 319 cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
319 cur_thread->SetState(ThreadState::Waiting); 320 thread_tree.insert(*cur_thread);
320 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
321 cur_thread->SetMutexWaitAddressForDebugging(addr);
322 }
323 }
324
325 // Cancel the timer wait.
326 kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
327
328 // Remove from the condition variable.
329 {
330 KScopedSchedulerLock sl(kernel);
331
332 if (KThread* owner = cur_thread->GetLockOwner(); owner != nullptr) {
333 owner->RemoveWaiter(cur_thread);
334 }
335 321
336 if (cur_thread->IsWaitingForConditionVariable()) { 322 // Begin waiting.
337 thread_tree.erase(thread_tree.iterator_to(*cur_thread)); 323 cur_thread->BeginWait(std::addressof(wait_queue));
338 cur_thread->ClearConditionVariable(); 324 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
339 } 325 cur_thread->SetMutexWaitAddressForDebugging(addr);
340 } 326 }
341 327
342 // Get the result. 328 // Get the wait result.
343 KSynchronizationObject* dummy{}; 329 return cur_thread->GetWaitResult();
344 return cur_thread->GetWaitResult(std::addressof(dummy));
345} 330}
346 331
347} // namespace Kernel 332} // namespace Kernel
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h
index 861dbd420..5e4815d08 100644
--- a/src/core/hle/kernel/k_condition_variable.h
+++ b/src/core/hle/kernel/k_condition_variable.h
@@ -34,7 +34,7 @@ public:
34 [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout); 34 [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
35 35
36private: 36private:
37 [[nodiscard]] KThread* SignalImpl(KThread* thread); 37 void SignalImpl(KThread* thread);
38 38
39 ThreadTree thread_tree; 39 ThreadTree thread_tree;
40 40
diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp
index e90fc0628..cf95f0852 100644
--- a/src/core/hle/kernel/k_handle_table.cpp
+++ b/src/core/hle/kernel/k_handle_table.cpp
@@ -13,6 +13,7 @@ ResultCode KHandleTable::Finalize() {
13 // Get the table and clear our record of it. 13 // Get the table and clear our record of it.
14 u16 saved_table_size = 0; 14 u16 saved_table_size = 0;
15 { 15 {
16 KScopedDisableDispatch dd(kernel);
16 KScopedSpinLock lk(m_lock); 17 KScopedSpinLock lk(m_lock);
17 18
18 std::swap(m_table_size, saved_table_size); 19 std::swap(m_table_size, saved_table_size);
@@ -43,6 +44,7 @@ bool KHandleTable::Remove(Handle handle) {
43 // Find the object and free the entry. 44 // Find the object and free the entry.
44 KAutoObject* obj = nullptr; 45 KAutoObject* obj = nullptr;
45 { 46 {
47 KScopedDisableDispatch dd(kernel);
46 KScopedSpinLock lk(m_lock); 48 KScopedSpinLock lk(m_lock);
47 49
48 if (this->IsValidHandle(handle)) { 50 if (this->IsValidHandle(handle)) {
@@ -62,6 +64,7 @@ bool KHandleTable::Remove(Handle handle) {
62} 64}
63 65
64ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { 66ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
67 KScopedDisableDispatch dd(kernel);
65 KScopedSpinLock lk(m_lock); 68 KScopedSpinLock lk(m_lock);
66 69
67 // Never exceed our capacity. 70 // Never exceed our capacity.
@@ -84,6 +87,7 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
84} 87}
85 88
86ResultCode KHandleTable::Reserve(Handle* out_handle) { 89ResultCode KHandleTable::Reserve(Handle* out_handle) {
90 KScopedDisableDispatch dd(kernel);
87 KScopedSpinLock lk(m_lock); 91 KScopedSpinLock lk(m_lock);
88 92
89 // Never exceed our capacity. 93 // Never exceed our capacity.
@@ -94,6 +98,7 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) {
94} 98}
95 99
96void KHandleTable::Unreserve(Handle handle) { 100void KHandleTable::Unreserve(Handle handle) {
101 KScopedDisableDispatch dd(kernel);
97 KScopedSpinLock lk(m_lock); 102 KScopedSpinLock lk(m_lock);
98 103
99 // Unpack the handle. 104 // Unpack the handle.
@@ -112,6 +117,7 @@ void KHandleTable::Unreserve(Handle handle) {
112} 117}
113 118
114void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { 119void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) {
120 KScopedDisableDispatch dd(kernel);
115 KScopedSpinLock lk(m_lock); 121 KScopedSpinLock lk(m_lock);
116 122
117 // Unpack the handle. 123 // Unpack the handle.
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h
index 95ec905ae..4b114ec2f 100644
--- a/src/core/hle/kernel/k_handle_table.h
+++ b/src/core/hle/kernel/k_handle_table.h
@@ -68,6 +68,7 @@ public:
68 template <typename T = KAutoObject> 68 template <typename T = KAutoObject>
69 KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { 69 KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
70 // Lock and look up in table. 70 // Lock and look up in table.
71 KScopedDisableDispatch dd(kernel);
71 KScopedSpinLock lk(m_lock); 72 KScopedSpinLock lk(m_lock);
72 73
73 if constexpr (std::is_same_v<T, KAutoObject>) { 74 if constexpr (std::is_same_v<T, KAutoObject>) {
@@ -122,6 +123,7 @@ public:
122 size_t num_opened; 123 size_t num_opened;
123 { 124 {
124 // Lock the table. 125 // Lock the table.
126 KScopedDisableDispatch dd(kernel);
125 KScopedSpinLock lk(m_lock); 127 KScopedSpinLock lk(m_lock);
126 for (num_opened = 0; num_opened < num_handles; num_opened++) { 128 for (num_opened = 0; num_opened < num_handles; num_opened++) {
127 // Get the current handle. 129 // Get the current handle.
diff --git a/src/core/hle/kernel/k_light_condition_variable.cpp b/src/core/hle/kernel/k_light_condition_variable.cpp
new file mode 100644
index 000000000..a8001fffc
--- /dev/null
+++ b/src/core/hle/kernel/k_light_condition_variable.cpp
@@ -0,0 +1,80 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/hle/kernel/k_light_condition_variable.h"
6#include "core/hle/kernel/k_scheduler.h"
7#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
8#include "core/hle/kernel/k_thread_queue.h"
9#include "core/hle/kernel/svc_results.h"
10
11namespace Kernel {
12
13namespace {
14
15class ThreadQueueImplForKLightConditionVariable final : public KThreadQueue {
16public:
17 ThreadQueueImplForKLightConditionVariable(KernelCore& kernel_, KThread::WaiterList* wl,
18 bool term)
19 : KThreadQueue(kernel_), m_wait_list(wl), m_allow_terminating_thread(term) {}
20
21 void CancelWait(KThread* waiting_thread, ResultCode wait_result,
22 bool cancel_timer_task) override {
23 // Only process waits if we're allowed to.
24 if (ResultTerminationRequested == wait_result && m_allow_terminating_thread) {
25 return;
26 }
27
28 // Remove the thread from the waiting thread from the light condition variable.
29 m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
30
31 // Invoke the base cancel wait handler.
32 KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
33 }
34
35private:
36 KThread::WaiterList* m_wait_list;
37 bool m_allow_terminating_thread;
38};
39
40} // namespace
41
42void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_terminating_thread) {
43 // Create thread queue.
44 KThread* owner = GetCurrentThreadPointer(kernel);
45
46 ThreadQueueImplForKLightConditionVariable wait_queue(kernel, std::addressof(wait_list),
47 allow_terminating_thread);
48
49 // Sleep the thread.
50 {
51 KScopedSchedulerLockAndSleep lk(kernel, owner, timeout);
52
53 if (!allow_terminating_thread && owner->IsTerminationRequested()) {
54 lk.CancelSleep();
55 return;
56 }
57
58 lock->Unlock();
59
60 // Add the thread to the queue.
61 wait_list.push_back(*owner);
62
63 // Begin waiting.
64 owner->BeginWait(std::addressof(wait_queue));
65 }
66
67 // Re-acquire the lock.
68 lock->Lock();
69}
70
71void KLightConditionVariable::Broadcast() {
72 KScopedSchedulerLock lk(kernel);
73
74 // Signal all threads.
75 for (auto it = wait_list.begin(); it != wait_list.end(); it = wait_list.erase(it)) {
76 it->EndWait(ResultSuccess);
77 }
78}
79
80} // namespace Kernel
diff --git a/src/core/hle/kernel/k_light_condition_variable.h b/src/core/hle/kernel/k_light_condition_variable.h
index fb0ad783a..5d6d7f128 100644
--- a/src/core/hle/kernel/k_light_condition_variable.h
+++ b/src/core/hle/kernel/k_light_condition_variable.h
@@ -2,72 +2,24 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7
8#pragma once 5#pragma once
9 6
10#include "common/common_types.h" 7#include "common/common_types.h"
11#include "core/hle/kernel/k_scheduler.h" 8#include "core/hle/kernel/k_thread.h"
12#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
13#include "core/hle/kernel/time_manager.h"
14 9
15namespace Kernel { 10namespace Kernel {
11
16class KernelCore; 12class KernelCore;
13class KLightLock;
17 14
18class KLightConditionVariable { 15class KLightConditionVariable {
19public: 16public:
20 explicit KLightConditionVariable(KernelCore& kernel_) : kernel{kernel_} {} 17 explicit KLightConditionVariable(KernelCore& kernel_) : kernel{kernel_} {}
21 18
22 void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true) { 19 void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true);
23 WaitImpl(lock, timeout, allow_terminating_thread); 20 void Broadcast();
24 }
25
26 void Broadcast() {
27 KScopedSchedulerLock lk{kernel};
28
29 // Signal all threads.
30 for (auto& thread : wait_list) {
31 thread.SetState(ThreadState::Runnable);
32 }
33 }
34 21
35private: 22private:
36 void WaitImpl(KLightLock* lock, s64 timeout, bool allow_terminating_thread) {
37 KThread* owner = GetCurrentThreadPointer(kernel);
38
39 // Sleep the thread.
40 {
41 KScopedSchedulerLockAndSleep lk{kernel, owner, timeout};
42
43 if (!allow_terminating_thread && owner->IsTerminationRequested()) {
44 lk.CancelSleep();
45 return;
46 }
47
48 lock->Unlock();
49
50 // Set the thread as waiting.
51 GetCurrentThread(kernel).SetState(ThreadState::Waiting);
52
53 // Add the thread to the queue.
54 wait_list.push_back(GetCurrentThread(kernel));
55 }
56
57 // Remove the thread from the wait list.
58 {
59 KScopedSchedulerLock sl{kernel};
60
61 wait_list.erase(wait_list.iterator_to(GetCurrentThread(kernel)));
62 }
63
64 // Cancel the task that the sleep setup.
65 kernel.TimeManager().UnscheduleTimeEvent(owner);
66
67 // Re-acquire the lock.
68 lock->Lock();
69 }
70
71 KernelCore& kernel; 23 KernelCore& kernel;
72 KThread::WaiterList wait_list{}; 24 KThread::WaiterList wait_list{};
73}; 25};
diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp
index 0896e705f..4620342eb 100644
--- a/src/core/hle/kernel/k_light_lock.cpp
+++ b/src/core/hle/kernel/k_light_lock.cpp
@@ -5,44 +5,59 @@
5#include "core/hle/kernel/k_light_lock.h" 5#include "core/hle/kernel/k_light_lock.h"
6#include "core/hle/kernel/k_scheduler.h" 6#include "core/hle/kernel/k_scheduler.h"
7#include "core/hle/kernel/k_thread.h" 7#include "core/hle/kernel/k_thread.h"
8#include "core/hle/kernel/k_thread_queue.h"
8#include "core/hle/kernel/kernel.h" 9#include "core/hle/kernel/kernel.h"
9 10
10namespace Kernel { 11namespace Kernel {
11 12
13namespace {
14
15class ThreadQueueImplForKLightLock final : public KThreadQueue {
16public:
17 explicit ThreadQueueImplForKLightLock(KernelCore& kernel_) : KThreadQueue(kernel_) {}
18
19 void CancelWait(KThread* waiting_thread, ResultCode wait_result,
20 bool cancel_timer_task) override {
21 // Remove the thread as a waiter from its owner.
22 if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
23 owner->RemoveWaiter(waiting_thread);
24 }
25
26 // Invoke the base cancel wait handler.
27 KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
28 }
29};
30
31} // namespace
32
12void KLightLock::Lock() { 33void KLightLock::Lock() {
13 const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)); 34 const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
14 const uintptr_t cur_thread_tag = (cur_thread | 1);
15 35
16 while (true) { 36 while (true) {
17 uintptr_t old_tag = tag.load(std::memory_order_relaxed); 37 uintptr_t old_tag = tag.load(std::memory_order_relaxed);
18 38
19 while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1, 39 while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1),
20 std::memory_order_acquire)) { 40 std::memory_order_acquire)) {
21 if ((old_tag | 1) == cur_thread_tag) {
22 return;
23 }
24 } 41 }
25 42
26 if ((old_tag == 0) || ((old_tag | 1) == cur_thread_tag)) { 43 if (old_tag == 0 || this->LockSlowPath(old_tag | 1, cur_thread)) {
27 break; 44 break;
28 } 45 }
29
30 LockSlowPath(old_tag | 1, cur_thread);
31 } 46 }
32} 47}
33 48
34void KLightLock::Unlock() { 49void KLightLock::Unlock() {
35 const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)); 50 const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
51
36 uintptr_t expected = cur_thread; 52 uintptr_t expected = cur_thread;
37 do { 53 if (!tag.compare_exchange_strong(expected, 0, std::memory_order_release)) {
38 if (expected != cur_thread) { 54 this->UnlockSlowPath(cur_thread);
39 return UnlockSlowPath(cur_thread); 55 }
40 }
41 } while (!tag.compare_exchange_weak(expected, 0, std::memory_order_release));
42} 56}
43 57
44void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) { 58bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
45 KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread); 59 KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread);
60 ThreadQueueImplForKLightLock wait_queue(kernel);
46 61
47 // Pend the current thread waiting on the owner thread. 62 // Pend the current thread waiting on the owner thread.
48 { 63 {
@@ -50,7 +65,7 @@ void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
50 65
51 // Ensure we actually have locking to do. 66 // Ensure we actually have locking to do.
52 if (tag.load(std::memory_order_relaxed) != _owner) { 67 if (tag.load(std::memory_order_relaxed) != _owner) {
53 return; 68 return false;
54 } 69 }
55 70
56 // Add the current thread as a waiter on the owner. 71 // Add the current thread as a waiter on the owner.
@@ -58,22 +73,15 @@ void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
58 cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag))); 73 cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
59 owner_thread->AddWaiter(cur_thread); 74 owner_thread->AddWaiter(cur_thread);
60 75
61 // Set thread states. 76 // Begin waiting to hold the lock.
62 cur_thread->SetState(ThreadState::Waiting); 77 cur_thread->BeginWait(std::addressof(wait_queue));
63 78
64 if (owner_thread->IsSuspended()) { 79 if (owner_thread->IsSuspended()) {
65 owner_thread->ContinueIfHasKernelWaiters(); 80 owner_thread->ContinueIfHasKernelWaiters();
66 } 81 }
67 } 82 }
68 83
69 // We're no longer waiting on the lock owner. 84 return true;
70 {
71 KScopedSchedulerLock sl{kernel};
72
73 if (KThread* owner_thread = cur_thread->GetLockOwner(); owner_thread != nullptr) {
74 owner_thread->RemoveWaiter(cur_thread);
75 }
76 }
77} 85}
78 86
79void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) { 87void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
@@ -81,22 +89,20 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
81 89
82 // Unlock. 90 // Unlock.
83 { 91 {
84 KScopedSchedulerLock sl{kernel}; 92 KScopedSchedulerLock sl(kernel);
85 93
86 // Get the next owner. 94 // Get the next owner.
87 s32 num_waiters = 0; 95 s32 num_waiters;
88 KThread* next_owner = owner_thread->RemoveWaiterByKey( 96 KThread* next_owner = owner_thread->RemoveWaiterByKey(
89 std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag))); 97 std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag)));
90 98
91 // Pass the lock to the next owner. 99 // Pass the lock to the next owner.
92 uintptr_t next_tag = 0; 100 uintptr_t next_tag = 0;
93 if (next_owner != nullptr) { 101 if (next_owner != nullptr) {
94 next_tag = reinterpret_cast<uintptr_t>(next_owner); 102 next_tag =
95 if (num_waiters > 1) { 103 reinterpret_cast<uintptr_t>(next_owner) | static_cast<uintptr_t>(num_waiters > 1);
96 next_tag |= 0x1;
97 }
98 104
99 next_owner->SetState(ThreadState::Runnable); 105 next_owner->EndWait(ResultSuccess);
100 106
101 if (next_owner->IsSuspended()) { 107 if (next_owner->IsSuspended()) {
102 next_owner->ContinueIfHasKernelWaiters(); 108 next_owner->ContinueIfHasKernelWaiters();
@@ -110,7 +116,7 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
110 } 116 }
111 117
112 // Write the new tag value. 118 // Write the new tag value.
113 tag.store(next_tag); 119 tag.store(next_tag, std::memory_order_release);
114 } 120 }
115} 121}
116 122
diff --git a/src/core/hle/kernel/k_light_lock.h b/src/core/hle/kernel/k_light_lock.h
index ad853661d..4163b8a85 100644
--- a/src/core/hle/kernel/k_light_lock.h
+++ b/src/core/hle/kernel/k_light_lock.h
@@ -20,7 +20,7 @@ public:
20 20
21 void Unlock(); 21 void Unlock();
22 22
23 void LockSlowPath(uintptr_t owner, uintptr_t cur_thread); 23 bool LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
24 24
25 void UnlockSlowPath(uintptr_t cur_thread); 25 void UnlockSlowPath(uintptr_t cur_thread);
26 26
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 1aad061e1..90dda40dc 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -60,6 +60,7 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
60 thread->GetContext64().cpu_registers[0] = 0; 60 thread->GetContext64().cpu_registers[0] = 0;
61 thread->GetContext32().cpu_registers[1] = thread_handle; 61 thread->GetContext32().cpu_registers[1] = thread_handle;
62 thread->GetContext64().cpu_registers[1] = thread_handle; 62 thread->GetContext64().cpu_registers[1] = thread_handle;
63 thread->DisableDispatch();
63 64
64 auto& kernel = system.Kernel(); 65 auto& kernel = system.Kernel();
65 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires 66 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
@@ -227,12 +228,15 @@ void KProcess::PinCurrentThread() {
227 const s32 core_id = GetCurrentCoreId(kernel); 228 const s32 core_id = GetCurrentCoreId(kernel);
228 KThread* cur_thread = GetCurrentThreadPointer(kernel); 229 KThread* cur_thread = GetCurrentThreadPointer(kernel);
229 230
230 // Pin it. 231 // If the thread isn't terminated, pin it.
231 PinThread(core_id, cur_thread); 232 if (!cur_thread->IsTerminationRequested()) {
232 cur_thread->Pin(); 233 // Pin it.
234 PinThread(core_id, cur_thread);
235 cur_thread->Pin();
233 236
234 // An update is needed. 237 // An update is needed.
235 KScheduler::SetSchedulerUpdateNeeded(kernel); 238 KScheduler::SetSchedulerUpdateNeeded(kernel);
239 }
236} 240}
237 241
238void KProcess::UnpinCurrentThread() { 242void KProcess::UnpinCurrentThread() {
@@ -250,6 +254,20 @@ void KProcess::UnpinCurrentThread() {
250 KScheduler::SetSchedulerUpdateNeeded(kernel); 254 KScheduler::SetSchedulerUpdateNeeded(kernel);
251} 255}
252 256
257void KProcess::UnpinThread(KThread* thread) {
258 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
259
260 // Get the thread's core id.
261 const auto core_id = thread->GetActiveCore();
262
263 // Unpin it.
264 UnpinThread(core_id, thread);
265 thread->Unpin();
266
267 // An update is needed.
268 KScheduler::SetSchedulerUpdateNeeded(kernel);
269}
270
253ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, 271ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
254 [[maybe_unused]] size_t size) { 272 [[maybe_unused]] size_t size) {
255 // Lock ourselves, to prevent concurrent access. 273 // Lock ourselves, to prevent concurrent access.
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 8a8c1fcbb..cb93c7e24 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -347,6 +347,7 @@ public:
347 347
348 void PinCurrentThread(); 348 void PinCurrentThread();
349 void UnpinCurrentThread(); 349 void UnpinCurrentThread();
350 void UnpinThread(KThread* thread);
350 351
351 KLightLock& GetStateLock() { 352 KLightLock& GetStateLock() {
352 return state_lock; 353 return state_lock;
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 6a7d80d03..277201de4 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -240,8 +240,8 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s3
240 240
241 // If the thread is runnable, we want to change its priority in the queue. 241 // If the thread is runnable, we want to change its priority in the queue.
242 if (thread->GetRawState() == ThreadState::Runnable) { 242 if (thread->GetRawState() == ThreadState::Runnable) {
243 GetPriorityQueue(kernel).ChangePriority( 243 GetPriorityQueue(kernel).ChangePriority(old_priority,
244 old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread); 244 thread == kernel.GetCurrentEmuThread(), thread);
245 IncrementScheduledCount(thread); 245 IncrementScheduledCount(thread);
246 SetSchedulerUpdateNeeded(kernel); 246 SetSchedulerUpdateNeeded(kernel);
247 } 247 }
@@ -360,7 +360,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
360} 360}
361 361
362bool KScheduler::CanSchedule(KernelCore& kernel) { 362bool KScheduler::CanSchedule(KernelCore& kernel) {
363 return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1; 363 return kernel.GetCurrentEmuThread()->GetDisableDispatchCount() <= 1;
364} 364}
365 365
366bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) { 366bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
@@ -376,20 +376,30 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
376} 376}
377 377
378void KScheduler::DisableScheduling(KernelCore& kernel) { 378void KScheduler::DisableScheduling(KernelCore& kernel) {
379 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { 379 // If we are shutting down the kernel, none of this is relevant anymore.
380 ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0); 380 if (kernel.IsShuttingDown()) {
381 scheduler->GetCurrentThread()->DisableDispatch(); 381 return;
382 } 382 }
383
384 ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0);
385 GetCurrentThreadPointer(kernel)->DisableDispatch();
383} 386}
384 387
385void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { 388void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
386 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { 389 // If we are shutting down the kernel, none of this is relevant anymore.
387 ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1); 390 if (kernel.IsShuttingDown()) {
388 if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) { 391 return;
389 scheduler->GetCurrentThread()->EnableDispatch(); 392 }
390 } 393
394 auto* current_thread = GetCurrentThreadPointer(kernel);
395
396 ASSERT(current_thread->GetDisableDispatchCount() >= 1);
397
398 if (current_thread->GetDisableDispatchCount() > 1) {
399 current_thread->EnableDispatch();
400 } else {
401 RescheduleCores(kernel, cores_needing_scheduling);
391 } 402 }
392 RescheduleCores(kernel, cores_needing_scheduling);
393} 403}
394 404
395u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { 405u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
@@ -617,13 +627,17 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c
617 state.highest_priority_thread = nullptr; 627 state.highest_priority_thread = nullptr;
618} 628}
619 629
620KScheduler::~KScheduler() { 630void KScheduler::Finalize() {
621 if (idle_thread) { 631 if (idle_thread) {
622 idle_thread->Close(); 632 idle_thread->Close();
623 idle_thread = nullptr; 633 idle_thread = nullptr;
624 } 634 }
625} 635}
626 636
637KScheduler::~KScheduler() {
638 ASSERT(!idle_thread);
639}
640
627KThread* KScheduler::GetCurrentThread() const { 641KThread* KScheduler::GetCurrentThread() const {
628 if (auto result = current_thread.load(); result) { 642 if (auto result = current_thread.load(); result) {
629 return result; 643 return result;
@@ -642,10 +656,12 @@ void KScheduler::RescheduleCurrentCore() {
642 if (phys_core.IsInterrupted()) { 656 if (phys_core.IsInterrupted()) {
643 phys_core.ClearInterrupt(); 657 phys_core.ClearInterrupt();
644 } 658 }
659
645 guard.Lock(); 660 guard.Lock();
646 if (state.needs_scheduling.load()) { 661 if (state.needs_scheduling.load()) {
647 Schedule(); 662 Schedule();
648 } else { 663 } else {
664 GetCurrentThread()->EnableDispatch();
649 guard.Unlock(); 665 guard.Unlock();
650 } 666 }
651} 667}
@@ -655,26 +671,33 @@ void KScheduler::OnThreadStart() {
655} 671}
656 672
657void KScheduler::Unload(KThread* thread) { 673void KScheduler::Unload(KThread* thread) {
674 ASSERT(thread);
675
658 LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); 676 LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
659 677
660 if (thread) { 678 if (thread->IsCallingSvc()) {
661 if (thread->IsCallingSvc()) { 679 thread->ClearIsCallingSvc();
662 thread->ClearIsCallingSvc(); 680 }
663 } 681
664 if (!thread->IsTerminationRequested()) { 682 auto& physical_core = system.Kernel().PhysicalCore(core_id);
665 prev_thread = thread; 683 if (!physical_core.IsInitialized()) {
666 684 return;
667 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); 685 }
668 cpu_core.SaveContext(thread->GetContext32()); 686
669 cpu_core.SaveContext(thread->GetContext64()); 687 Core::ARM_Interface& cpu_core = physical_core.ArmInterface();
670 // Save the TPIDR_EL0 system register in case it was modified. 688 cpu_core.SaveContext(thread->GetContext32());
671 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); 689 cpu_core.SaveContext(thread->GetContext64());
672 cpu_core.ClearExclusiveState(); 690 // Save the TPIDR_EL0 system register in case it was modified.
673 } else { 691 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
674 prev_thread = nullptr; 692 cpu_core.ClearExclusiveState();
675 } 693
676 thread->context_guard.Unlock(); 694 if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) {
695 prev_thread = thread;
696 } else {
697 prev_thread = nullptr;
677 } 698 }
699
700 thread->context_guard.Unlock();
678} 701}
679 702
680void KScheduler::Reload(KThread* thread) { 703void KScheduler::Reload(KThread* thread) {
@@ -683,11 +706,6 @@ void KScheduler::Reload(KThread* thread) {
683 if (thread) { 706 if (thread) {
684 ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); 707 ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
685 708
686 auto* const thread_owner_process = thread->GetOwnerProcess();
687 if (thread_owner_process != nullptr) {
688 system.Kernel().MakeCurrentProcess(thread_owner_process);
689 }
690
691 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); 709 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
692 cpu_core.LoadContext(thread->GetContext32()); 710 cpu_core.LoadContext(thread->GetContext32());
693 cpu_core.LoadContext(thread->GetContext64()); 711 cpu_core.LoadContext(thread->GetContext64());
@@ -705,7 +723,7 @@ void KScheduler::SwitchContextStep2() {
705} 723}
706 724
707void KScheduler::ScheduleImpl() { 725void KScheduler::ScheduleImpl() {
708 KThread* previous_thread = current_thread.load(); 726 KThread* previous_thread = GetCurrentThread();
709 KThread* next_thread = state.highest_priority_thread; 727 KThread* next_thread = state.highest_priority_thread;
710 728
711 state.needs_scheduling = false; 729 state.needs_scheduling = false;
@@ -717,10 +735,15 @@ void KScheduler::ScheduleImpl() {
717 735
718 // If we're not actually switching thread, there's nothing to do. 736 // If we're not actually switching thread, there's nothing to do.
719 if (next_thread == current_thread.load()) { 737 if (next_thread == current_thread.load()) {
738 previous_thread->EnableDispatch();
720 guard.Unlock(); 739 guard.Unlock();
721 return; 740 return;
722 } 741 }
723 742
743 if (next_thread->GetCurrentCore() != core_id) {
744 next_thread->SetCurrentCore(core_id);
745 }
746
724 current_thread.store(next_thread); 747 current_thread.store(next_thread);
725 748
726 KProcess* const previous_process = system.Kernel().CurrentProcess(); 749 KProcess* const previous_process = system.Kernel().CurrentProcess();
@@ -731,11 +754,7 @@ void KScheduler::ScheduleImpl() {
731 Unload(previous_thread); 754 Unload(previous_thread);
732 755
733 std::shared_ptr<Common::Fiber>* old_context; 756 std::shared_ptr<Common::Fiber>* old_context;
734 if (previous_thread != nullptr) { 757 old_context = &previous_thread->GetHostContext();
735 old_context = &previous_thread->GetHostContext();
736 } else {
737 old_context = &idle_thread->GetHostContext();
738 }
739 guard.Unlock(); 758 guard.Unlock();
740 759
741 Common::Fiber::YieldTo(*old_context, *switch_fiber); 760 Common::Fiber::YieldTo(*old_context, *switch_fiber);
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 7df288438..82fcd99e7 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -33,6 +33,8 @@ public:
33 explicit KScheduler(Core::System& system_, s32 core_id_); 33 explicit KScheduler(Core::System& system_, s32 core_id_);
34 ~KScheduler(); 34 ~KScheduler();
35 35
36 void Finalize();
37
36 /// Reschedules to the next available thread (call after current thread is suspended) 38 /// Reschedules to the next available thread (call after current thread is suspended)
37 void RescheduleCurrentCore(); 39 void RescheduleCurrentCore();
38 40
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
index c571f2992..93c47f1b1 100644
--- a/src/core/hle/kernel/k_scheduler_lock.h
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -23,6 +23,11 @@ public:
23 } 23 }
24 24
25 void Lock() { 25 void Lock() {
26 // If we are shutting down the kernel, none of this is relevant anymore.
27 if (kernel.IsShuttingDown()) {
28 return;
29 }
30
26 if (IsLockedByCurrentThread()) { 31 if (IsLockedByCurrentThread()) {
27 // If we already own the lock, we can just increment the count. 32 // If we already own the lock, we can just increment the count.
28 ASSERT(lock_count > 0); 33 ASSERT(lock_count > 0);
@@ -43,6 +48,11 @@ public:
43 } 48 }
44 49
45 void Unlock() { 50 void Unlock() {
51 // If we are shutting down the kernel, none of this is relevant anymore.
52 if (kernel.IsShuttingDown()) {
53 return;
54 }
55
46 ASSERT(IsLockedByCurrentThread()); 56 ASSERT(IsLockedByCurrentThread());
47 ASSERT(lock_count > 0); 57 ASSERT(lock_count > 0);
48 58
diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
index 61dc2858f..2995c492d 100644
--- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
+++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
@@ -8,6 +8,7 @@
8#pragma once 8#pragma once
9 9
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "core/hle/kernel/global_scheduler_context.h"
11#include "core/hle/kernel/k_thread.h" 12#include "core/hle/kernel/k_thread.h"
12#include "core/hle/kernel/kernel.h" 13#include "core/hle/kernel/kernel.h"
13#include "core/hle/kernel/time_manager.h" 14#include "core/hle/kernel/time_manager.h"
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index 2bd53ccbd..d4e4a6b06 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -175,8 +175,7 @@ ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
175 { 175 {
176 KScopedSchedulerLock lock(kernel); 176 KScopedSchedulerLock lock(kernel);
177 if (!context.IsThreadWaiting()) { 177 if (!context.IsThreadWaiting()) {
178 context.GetThread().Wakeup(); 178 context.GetThread().EndWait(result);
179 context.GetThread().SetSyncedObject(nullptr, result);
180 } 179 }
181 } 180 }
182 181
diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp
index f168b4f21..e4c5eb74f 100644
--- a/src/core/hle/kernel/k_synchronization_object.cpp
+++ b/src/core/hle/kernel/k_synchronization_object.cpp
@@ -8,11 +8,66 @@
8#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 8#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
9#include "core/hle/kernel/k_synchronization_object.h" 9#include "core/hle/kernel/k_synchronization_object.h"
10#include "core/hle/kernel/k_thread.h" 10#include "core/hle/kernel/k_thread.h"
11#include "core/hle/kernel/k_thread_queue.h"
11#include "core/hle/kernel/kernel.h" 12#include "core/hle/kernel/kernel.h"
12#include "core/hle/kernel/svc_results.h" 13#include "core/hle/kernel/svc_results.h"
13 14
14namespace Kernel { 15namespace Kernel {
15 16
17namespace {
18
19class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait {
20public:
21 ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel_, KSynchronizationObject** o,
22 KSynchronizationObject::ThreadListNode* n, s32 c)
23 : KThreadQueueWithoutEndWait(kernel_), m_objects(o), m_nodes(n), m_count(c) {}
24
25 void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
26 ResultCode wait_result) override {
27 // Determine the sync index, and unlink all nodes.
28 s32 sync_index = -1;
29 for (auto i = 0; i < m_count; ++i) {
30 // Check if this is the signaled object.
31 if (m_objects[i] == signaled_object && sync_index == -1) {
32 sync_index = i;
33 }
34
35 // Unlink the current node from the current object.
36 m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
37 }
38
39 // Set the waiting thread's sync index.
40 waiting_thread->SetSyncedIndex(sync_index);
41
42 // Set the waiting thread as not cancellable.
43 waiting_thread->ClearCancellable();
44
45 // Invoke the base end wait handler.
46 KThreadQueue::EndWait(waiting_thread, wait_result);
47 }
48
49 void CancelWait(KThread* waiting_thread, ResultCode wait_result,
50 bool cancel_timer_task) override {
51 // Remove all nodes from our list.
52 for (auto i = 0; i < m_count; ++i) {
53 m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
54 }
55
56 // Set the waiting thread as not cancellable.
57 waiting_thread->ClearCancellable();
58
59 // Invoke the base cancel wait handler.
60 KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
61 }
62
63private:
64 KSynchronizationObject** m_objects;
65 KSynchronizationObject::ThreadListNode* m_nodes;
66 s32 m_count;
67};
68
69} // namespace
70
16void KSynchronizationObject::Finalize() { 71void KSynchronizationObject::Finalize() {
17 this->OnFinalizeSynchronizationObject(); 72 this->OnFinalizeSynchronizationObject();
18 KAutoObject::Finalize(); 73 KAutoObject::Finalize();
@@ -25,11 +80,19 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
25 std::vector<ThreadListNode> thread_nodes(num_objects); 80 std::vector<ThreadListNode> thread_nodes(num_objects);
26 81
27 // Prepare for wait. 82 // Prepare for wait.
28 KThread* thread = kernel_ctx.CurrentScheduler()->GetCurrentThread(); 83 KThread* thread = GetCurrentThreadPointer(kernel_ctx);
84 ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel_ctx, objects,
85 thread_nodes.data(), num_objects);
29 86
30 { 87 {
31 // Setup the scheduling lock and sleep. 88 // Setup the scheduling lock and sleep.
32 KScopedSchedulerLockAndSleep slp{kernel_ctx, thread, timeout}; 89 KScopedSchedulerLockAndSleep slp(kernel_ctx, thread, timeout);
90
91 // Check if the thread should terminate.
92 if (thread->IsTerminationRequested()) {
93 slp.CancelSleep();
94 return ResultTerminationRequested;
95 }
33 96
34 // Check if any of the objects are already signaled. 97 // Check if any of the objects are already signaled.
35 for (auto i = 0; i < num_objects; ++i) { 98 for (auto i = 0; i < num_objects; ++i) {
@@ -48,12 +111,6 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
48 return ResultTimedOut; 111 return ResultTimedOut;
49 } 112 }
50 113
51 // Check if the thread should terminate.
52 if (thread->IsTerminationRequested()) {
53 slp.CancelSleep();
54 return ResultTerminationRequested;
55 }
56
57 // Check if waiting was canceled. 114 // Check if waiting was canceled.
58 if (thread->IsWaitCancelled()) { 115 if (thread->IsWaitCancelled()) {
59 slp.CancelSleep(); 116 slp.CancelSleep();
@@ -66,73 +123,25 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
66 thread_nodes[i].thread = thread; 123 thread_nodes[i].thread = thread;
67 thread_nodes[i].next = nullptr; 124 thread_nodes[i].next = nullptr;
68 125
69 if (objects[i]->thread_list_tail == nullptr) { 126 objects[i]->LinkNode(std::addressof(thread_nodes[i]));
70 objects[i]->thread_list_head = std::addressof(thread_nodes[i]);
71 } else {
72 objects[i]->thread_list_tail->next = std::addressof(thread_nodes[i]);
73 }
74
75 objects[i]->thread_list_tail = std::addressof(thread_nodes[i]);
76 } 127 }
77 128
78 // For debugging only 129 // Mark the thread as cancellable.
79 thread->SetWaitObjectsForDebugging({objects, static_cast<std::size_t>(num_objects)});
80
81 // Mark the thread as waiting.
82 thread->SetCancellable(); 130 thread->SetCancellable();
83 thread->SetSyncedObject(nullptr, ResultTimedOut);
84 thread->SetState(ThreadState::Waiting);
85 thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
86 }
87 131
88 // The lock/sleep is done, so we should be able to get our result. 132 // Clear the thread's synced index.
133 thread->SetSyncedIndex(-1);
89 134
90 // Thread is no longer cancellable. 135 // Wait for an object to be signaled.
91 thread->ClearCancellable(); 136 thread->BeginWait(std::addressof(wait_queue));
92 137 thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
93 // For debugging only 138 }
94 thread->SetWaitObjectsForDebugging({});
95 139
96 // Cancel the timer as needed. 140 // Set the output index.
97 kernel_ctx.TimeManager().UnscheduleTimeEvent(thread); 141 *out_index = thread->GetSyncedIndex();
98 142
99 // Get the wait result. 143 // Get the wait result.
100 ResultCode wait_result{ResultSuccess}; 144 return thread->GetWaitResult();
101 s32 sync_index = -1;
102 {
103 KScopedSchedulerLock lock(kernel_ctx);
104 KSynchronizationObject* synced_obj;
105 wait_result = thread->GetWaitResult(std::addressof(synced_obj));
106
107 for (auto i = 0; i < num_objects; ++i) {
108 // Unlink the object from the list.
109 ThreadListNode* prev_ptr =
110 reinterpret_cast<ThreadListNode*>(std::addressof(objects[i]->thread_list_head));
111 ThreadListNode* prev_val = nullptr;
112 ThreadListNode *prev, *tail_prev;
113
114 do {
115 prev = prev_ptr;
116 prev_ptr = prev_ptr->next;
117 tail_prev = prev_val;
118 prev_val = prev_ptr;
119 } while (prev_ptr != std::addressof(thread_nodes[i]));
120
121 if (objects[i]->thread_list_tail == std::addressof(thread_nodes[i])) {
122 objects[i]->thread_list_tail = tail_prev;
123 }
124
125 prev->next = thread_nodes[i].next;
126
127 if (objects[i] == synced_obj) {
128 sync_index = i;
129 }
130 }
131 }
132
133 // Set output.
134 *out_index = sync_index;
135 return wait_result;
136} 145}
137 146
138KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_) 147KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
@@ -141,7 +150,7 @@ KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
141KSynchronizationObject::~KSynchronizationObject() = default; 150KSynchronizationObject::~KSynchronizationObject() = default;
142 151
143void KSynchronizationObject::NotifyAvailable(ResultCode result) { 152void KSynchronizationObject::NotifyAvailable(ResultCode result) {
144 KScopedSchedulerLock lock(kernel); 153 KScopedSchedulerLock sl(kernel);
145 154
146 // If we're not signaled, we've nothing to notify. 155 // If we're not signaled, we've nothing to notify.
147 if (!this->IsSignaled()) { 156 if (!this->IsSignaled()) {
@@ -150,11 +159,7 @@ void KSynchronizationObject::NotifyAvailable(ResultCode result) {
150 159
151 // Iterate over each thread. 160 // Iterate over each thread.
152 for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { 161 for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
153 KThread* thread = cur_node->thread; 162 cur_node->thread->NotifyAvailable(this, result);
154 if (thread->GetState() == ThreadState::Waiting) {
155 thread->SetSyncedObject(this, result);
156 thread->SetState(ThreadState::Runnable);
157 }
158 } 163 }
159} 164}
160 165
diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h
index 898e58e16..ec235437b 100644
--- a/src/core/hle/kernel/k_synchronization_object.h
+++ b/src/core/hle/kernel/k_synchronization_object.h
@@ -35,6 +35,38 @@ public:
35 35
36 [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const; 36 [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const;
37 37
38 void LinkNode(ThreadListNode* node_) {
39 // Link the node to the list.
40 if (thread_list_tail == nullptr) {
41 thread_list_head = node_;
42 } else {
43 thread_list_tail->next = node_;
44 }
45
46 thread_list_tail = node_;
47 }
48
49 void UnlinkNode(ThreadListNode* node_) {
50 // Unlink the node from the list.
51 ThreadListNode* prev_ptr =
52 reinterpret_cast<ThreadListNode*>(std::addressof(thread_list_head));
53 ThreadListNode* prev_val = nullptr;
54 ThreadListNode *prev, *tail_prev;
55
56 do {
57 prev = prev_ptr;
58 prev_ptr = prev_ptr->next;
59 tail_prev = prev_val;
60 prev_val = prev_ptr;
61 } while (prev_ptr != node_);
62
63 if (thread_list_tail == node_) {
64 thread_list_tail = tail_prev;
65 }
66
67 prev->next = node_->next;
68 }
69
38protected: 70protected:
39 explicit KSynchronizationObject(KernelCore& kernel); 71 explicit KSynchronizationObject(KernelCore& kernel);
40 ~KSynchronizationObject() override; 72 ~KSynchronizationObject() override;
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index db65ce79a..752592e2e 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -13,6 +13,9 @@
13#include "common/common_types.h" 13#include "common/common_types.h"
14#include "common/fiber.h" 14#include "common/fiber.h"
15#include "common/logging/log.h" 15#include "common/logging/log.h"
16#include "common/scope_exit.h"
17#include "common/settings.h"
18#include "common/thread_queue_list.h"
16#include "core/core.h" 19#include "core/core.h"
17#include "core/cpu_manager.h" 20#include "core/cpu_manager.h"
18#include "core/hardware_properties.h" 21#include "core/hardware_properties.h"
@@ -56,6 +59,34 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context,
56 59
57namespace Kernel { 60namespace Kernel {
58 61
62namespace {
63
64class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait {
65public:
66 explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel_)
67 : KThreadQueueWithoutEndWait(kernel_) {}
68};
69
70class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue {
71public:
72 explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel_, KThread::WaiterList* wl)
73 : KThreadQueue(kernel_), m_wait_list(wl) {}
74
75 void CancelWait(KThread* waiting_thread, ResultCode wait_result,
76 bool cancel_timer_task) override {
77 // Remove the thread from the wait list.
78 m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
79
80 // Invoke the base cancel wait handler.
81 KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
82 }
83
84private:
85 KThread::WaiterList* m_wait_list;
86};
87
88} // namespace
89
59KThread::KThread(KernelCore& kernel_) 90KThread::KThread(KernelCore& kernel_)
60 : KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {} 91 : KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {}
61KThread::~KThread() = default; 92KThread::~KThread() = default;
@@ -82,6 +113,8 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
82 [[fallthrough]]; 113 [[fallthrough]];
83 case ThreadType::HighPriority: 114 case ThreadType::HighPriority:
84 [[fallthrough]]; 115 [[fallthrough]];
116 case ThreadType::Dummy:
117 [[fallthrough]];
85 case ThreadType::User: 118 case ThreadType::User:
86 ASSERT(((owner == nullptr) || 119 ASSERT(((owner == nullptr) ||
87 (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask())); 120 (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
@@ -127,11 +160,8 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
127 priority = prio; 160 priority = prio;
128 base_priority = prio; 161 base_priority = prio;
129 162
130 // Set sync object and waiting lock to null.
131 synced_object = nullptr;
132
133 // Initialize sleeping queue. 163 // Initialize sleeping queue.
134 sleeping_queue = nullptr; 164 wait_queue = nullptr;
135 165
136 // Set suspend flags. 166 // Set suspend flags.
137 suspend_request_flags = 0; 167 suspend_request_flags = 0;
@@ -184,7 +214,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
184 // Setup the stack parameters. 214 // Setup the stack parameters.
185 StackParameters& sp = GetStackParameters(); 215 StackParameters& sp = GetStackParameters();
186 sp.cur_thread = this; 216 sp.cur_thread = this;
187 sp.disable_count = 1; 217 sp.disable_count = 0;
188 SetInExceptionHandler(); 218 SetInExceptionHandler();
189 219
190 // Set thread ID. 220 // Set thread ID.
@@ -211,15 +241,16 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint
211 // Initialize the thread. 241 // Initialize the thread.
212 R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); 242 R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
213 243
214 // Initialize host context. 244 // Initialize emulation parameters.
215 thread->host_context = 245 thread->host_context =
216 std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter); 246 std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter);
247 thread->is_single_core = !Settings::values.use_multi_core.GetValue();
217 248
218 return ResultSuccess; 249 return ResultSuccess;
219} 250}
220 251
221ResultCode KThread::InitializeDummyThread(KThread* thread) { 252ResultCode KThread::InitializeDummyThread(KThread* thread) {
222 return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Main); 253 return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Dummy);
223} 254}
224 255
225ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { 256ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
@@ -273,11 +304,14 @@ void KThread::Finalize() {
273 304
274 auto it = waiter_list.begin(); 305 auto it = waiter_list.begin();
275 while (it != waiter_list.end()) { 306 while (it != waiter_list.end()) {
276 // The thread shouldn't be a kernel waiter. 307 // Clear the lock owner
277 it->SetLockOwner(nullptr); 308 it->SetLockOwner(nullptr);
278 it->SetSyncedObject(nullptr, ResultInvalidState); 309
279 it->Wakeup(); 310 // Erase the waiter from our list.
280 it = waiter_list.erase(it); 311 it = waiter_list.erase(it);
312
313 // Cancel the thread's wait.
314 it->CancelWait(ResultInvalidState, true);
281 } 315 }
282 } 316 }
283 317
@@ -294,15 +328,12 @@ bool KThread::IsSignaled() const {
294 return signaled; 328 return signaled;
295} 329}
296 330
297void KThread::Wakeup() { 331void KThread::OnTimer() {
298 KScopedSchedulerLock sl{kernel}; 332 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
299 333
334 // If we're waiting, cancel the wait.
300 if (GetState() == ThreadState::Waiting) { 335 if (GetState() == ThreadState::Waiting) {
301 if (sleeping_queue != nullptr) { 336 wait_queue->CancelWait(this, ResultTimedOut, false);
302 sleeping_queue->WakeupThread(this);
303 } else {
304 SetState(ThreadState::Runnable);
305 }
306 } 337 }
307} 338}
308 339
@@ -327,7 +358,7 @@ void KThread::StartTermination() {
327 358
328 // Signal. 359 // Signal.
329 signaled = true; 360 signaled = true;
330 NotifyAvailable(); 361 KSynchronizationObject::NotifyAvailable();
331 362
332 // Clear previous thread in KScheduler. 363 // Clear previous thread in KScheduler.
333 KScheduler::ClearPreviousThread(kernel, this); 364 KScheduler::ClearPreviousThread(kernel, this);
@@ -475,30 +506,32 @@ ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_m
475 return ResultSuccess; 506 return ResultSuccess;
476} 507}
477 508
478ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) { 509ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
479 ASSERT(parent != nullptr); 510 ASSERT(parent != nullptr);
480 ASSERT(v_affinity_mask != 0); 511 ASSERT(v_affinity_mask != 0);
481 KScopedLightLock lk{activity_pause_lock}; 512 KScopedLightLock lk(activity_pause_lock);
482 513
483 // Set the core mask. 514 // Set the core mask.
484 u64 p_affinity_mask = 0; 515 u64 p_affinity_mask = 0;
485 { 516 {
486 KScopedSchedulerLock sl{kernel}; 517 KScopedSchedulerLock sl(kernel);
487 ASSERT(num_core_migration_disables >= 0); 518 ASSERT(num_core_migration_disables >= 0);
488 519
489 // If the core id is no-update magic, preserve the ideal core id. 520 // If we're updating, set our ideal virtual core.
490 if (cpu_core_id == Svc::IdealCoreNoUpdate) { 521 if (core_id_ != Svc::IdealCoreNoUpdate) {
491 cpu_core_id = virtual_ideal_core_id; 522 virtual_ideal_core_id = core_id_;
492 R_UNLESS(((1ULL << cpu_core_id) & v_affinity_mask) != 0, ResultInvalidCombination); 523 } else {
524 // Preserve our ideal core id.
525 core_id_ = virtual_ideal_core_id;
526 R_UNLESS(((1ULL << core_id_) & v_affinity_mask) != 0, ResultInvalidCombination);
493 } 527 }
494 528
495 // Set the virtual core/affinity mask. 529 // Set our affinity mask.
496 virtual_ideal_core_id = cpu_core_id;
497 virtual_affinity_mask = v_affinity_mask; 530 virtual_affinity_mask = v_affinity_mask;
498 531
499 // Translate the virtual core to a physical core. 532 // Translate the virtual core to a physical core.
500 if (cpu_core_id >= 0) { 533 if (core_id_ >= 0) {
501 cpu_core_id = Core::Hardware::VirtualToPhysicalCoreMap[cpu_core_id]; 534 core_id_ = Core::Hardware::VirtualToPhysicalCoreMap[core_id_];
502 } 535 }
503 536
504 // Translate the virtual affinity mask to a physical one. 537 // Translate the virtual affinity mask to a physical one.
@@ -513,7 +546,7 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
513 const KAffinityMask old_mask = physical_affinity_mask; 546 const KAffinityMask old_mask = physical_affinity_mask;
514 547
515 // Set our new ideals. 548 // Set our new ideals.
516 physical_ideal_core_id = cpu_core_id; 549 physical_ideal_core_id = core_id_;
517 physical_affinity_mask.SetAffinityMask(p_affinity_mask); 550 physical_affinity_mask.SetAffinityMask(p_affinity_mask);
518 551
519 if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { 552 if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
@@ -531,18 +564,18 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
531 } 564 }
532 } else { 565 } else {
533 // Otherwise, we edit the original affinity for restoration later. 566 // Otherwise, we edit the original affinity for restoration later.
534 original_physical_ideal_core_id = cpu_core_id; 567 original_physical_ideal_core_id = core_id_;
535 original_physical_affinity_mask.SetAffinityMask(p_affinity_mask); 568 original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
536 } 569 }
537 } 570 }
538 571
539 // Update the pinned waiter list. 572 // Update the pinned waiter list.
573 ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, std::addressof(pinned_waiter_list));
540 { 574 {
541 bool retry_update{}; 575 bool retry_update{};
542 bool thread_is_pinned{};
543 do { 576 do {
544 // Lock the scheduler. 577 // Lock the scheduler.
545 KScopedSchedulerLock sl{kernel}; 578 KScopedSchedulerLock sl(kernel);
546 579
547 // Don't do any further management if our termination has been requested. 580 // Don't do any further management if our termination has been requested.
548 R_SUCCEED_IF(IsTerminationRequested()); 581 R_SUCCEED_IF(IsTerminationRequested());
@@ -570,12 +603,9 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
570 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), 603 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
571 ResultTerminationRequested); 604 ResultTerminationRequested);
572 605
573 // Note that the thread was pinned.
574 thread_is_pinned = true;
575
576 // Wait until the thread isn't pinned any more. 606 // Wait until the thread isn't pinned any more.
577 pinned_waiter_list.push_back(GetCurrentThread(kernel)); 607 pinned_waiter_list.push_back(GetCurrentThread(kernel));
578 GetCurrentThread(kernel).SetState(ThreadState::Waiting); 608 GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_));
579 } else { 609 } else {
580 // If the thread isn't pinned, release the scheduler lock and retry until it's 610 // If the thread isn't pinned, release the scheduler lock and retry until it's
581 // not current. 611 // not current.
@@ -583,16 +613,6 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
583 } 613 }
584 } 614 }
585 } while (retry_update); 615 } while (retry_update);
586
587 // If the thread was pinned, it no longer is, and we should remove the current thread from
588 // our waiter list.
589 if (thread_is_pinned) {
590 // Lock the scheduler.
591 KScopedSchedulerLock sl{kernel};
592
593 // Remove from the list.
594 pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
595 }
596 } 616 }
597 617
598 return ResultSuccess; 618 return ResultSuccess;
@@ -641,15 +661,9 @@ void KThread::WaitCancel() {
641 KScopedSchedulerLock sl{kernel}; 661 KScopedSchedulerLock sl{kernel};
642 662
643 // Check if we're waiting and cancellable. 663 // Check if we're waiting and cancellable.
644 if (GetState() == ThreadState::Waiting && cancellable) { 664 if (this->GetState() == ThreadState::Waiting && cancellable) {
645 if (sleeping_queue != nullptr) { 665 wait_cancelled = false;
646 sleeping_queue->WakeupThread(this); 666 wait_queue->CancelWait(this, ResultCancelled, true);
647 wait_cancelled = true;
648 } else {
649 SetSyncedObject(nullptr, ResultCancelled);
650 SetState(ThreadState::Runnable);
651 wait_cancelled = false;
652 }
653 } else { 667 } else {
654 // Otherwise, note that we cancelled a wait. 668 // Otherwise, note that we cancelled a wait.
655 wait_cancelled = true; 669 wait_cancelled = true;
@@ -700,60 +714,59 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
700 // Set the activity. 714 // Set the activity.
701 { 715 {
702 // Lock the scheduler. 716 // Lock the scheduler.
703 KScopedSchedulerLock sl{kernel}; 717 KScopedSchedulerLock sl(kernel);
704 718
705 // Verify our state. 719 // Verify our state.
706 const auto cur_state = GetState(); 720 const auto cur_state = this->GetState();
707 R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable), 721 R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable),
708 ResultInvalidState); 722 ResultInvalidState);
709 723
710 // Either pause or resume. 724 // Either pause or resume.
711 if (activity == Svc::ThreadActivity::Paused) { 725 if (activity == Svc::ThreadActivity::Paused) {
712 // Verify that we're not suspended. 726 // Verify that we're not suspended.
713 R_UNLESS(!IsSuspendRequested(SuspendType::Thread), ResultInvalidState); 727 R_UNLESS(!this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
714 728
715 // Suspend. 729 // Suspend.
716 RequestSuspend(SuspendType::Thread); 730 this->RequestSuspend(SuspendType::Thread);
717 } else { 731 } else {
718 ASSERT(activity == Svc::ThreadActivity::Runnable); 732 ASSERT(activity == Svc::ThreadActivity::Runnable);
719 733
720 // Verify that we're suspended. 734 // Verify that we're suspended.
721 R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState); 735 R_UNLESS(this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
722 736
723 // Resume. 737 // Resume.
724 Resume(SuspendType::Thread); 738 this->Resume(SuspendType::Thread);
725 } 739 }
726 } 740 }
727 741
728 // If the thread is now paused, update the pinned waiter list. 742 // If the thread is now paused, update the pinned waiter list.
729 if (activity == Svc::ThreadActivity::Paused) { 743 if (activity == Svc::ThreadActivity::Paused) {
730 bool thread_is_pinned{}; 744 ThreadQueueImplForKThreadSetProperty wait_queue_(kernel,
731 bool thread_is_current{}; 745 std::addressof(pinned_waiter_list));
746
747 bool thread_is_current;
732 do { 748 do {
733 // Lock the scheduler. 749 // Lock the scheduler.
734 KScopedSchedulerLock sl{kernel}; 750 KScopedSchedulerLock sl(kernel);
735 751
736 // Don't do any further management if our termination has been requested. 752 // Don't do any further management if our termination has been requested.
737 R_SUCCEED_IF(IsTerminationRequested()); 753 R_SUCCEED_IF(this->IsTerminationRequested());
754
755 // By default, treat the thread as not current.
756 thread_is_current = false;
738 757
739 // Check whether the thread is pinned. 758 // Check whether the thread is pinned.
740 if (GetStackParameters().is_pinned) { 759 if (this->GetStackParameters().is_pinned) {
741 // Verify that the current thread isn't terminating. 760 // Verify that the current thread isn't terminating.
742 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), 761 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
743 ResultTerminationRequested); 762 ResultTerminationRequested);
744 763
745 // Note that the thread was pinned and not current.
746 thread_is_pinned = true;
747 thread_is_current = false;
748
749 // Wait until the thread isn't pinned any more. 764 // Wait until the thread isn't pinned any more.
750 pinned_waiter_list.push_back(GetCurrentThread(kernel)); 765 pinned_waiter_list.push_back(GetCurrentThread(kernel));
751 GetCurrentThread(kernel).SetState(ThreadState::Waiting); 766 GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_));
752 } else { 767 } else {
753 // Check if the thread is currently running. 768 // Check if the thread is currently running.
754 // If it is, we'll need to retry. 769 // If it is, we'll need to retry.
755 thread_is_current = false;
756
757 for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) { 770 for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
758 if (kernel.Scheduler(i).GetCurrentThread() == this) { 771 if (kernel.Scheduler(i).GetCurrentThread() == this) {
759 thread_is_current = true; 772 thread_is_current = true;
@@ -762,16 +775,6 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
762 } 775 }
763 } 776 }
764 } while (thread_is_current); 777 } while (thread_is_current);
765
766 // If the thread was pinned, it no longer is, and we should remove the current thread from
767 // our waiter list.
768 if (thread_is_pinned) {
769 // Lock the scheduler.
770 KScopedSchedulerLock sl{kernel};
771
772 // Remove from the list.
773 pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
774 }
775 } 778 }
776 779
777 return ResultSuccess; 780 return ResultSuccess;
@@ -966,6 +969,9 @@ ResultCode KThread::Run() {
966 969
967 // Set our state and finish. 970 // Set our state and finish.
968 SetState(ThreadState::Runnable); 971 SetState(ThreadState::Runnable);
972
973 DisableDispatch();
974
969 return ResultSuccess; 975 return ResultSuccess;
970 } 976 }
971} 977}
@@ -996,27 +1002,61 @@ ResultCode KThread::Sleep(s64 timeout) {
996 ASSERT(this == GetCurrentThreadPointer(kernel)); 1002 ASSERT(this == GetCurrentThreadPointer(kernel));
997 ASSERT(timeout > 0); 1003 ASSERT(timeout > 0);
998 1004
1005 ThreadQueueImplForKThreadSleep wait_queue_(kernel);
999 { 1006 {
1000 // Setup the scheduling lock and sleep. 1007 // Setup the scheduling lock and sleep.
1001 KScopedSchedulerLockAndSleep slp{kernel, this, timeout}; 1008 KScopedSchedulerLockAndSleep slp(kernel, this, timeout);
1002 1009
1003 // Check if the thread should terminate. 1010 // Check if the thread should terminate.
1004 if (IsTerminationRequested()) { 1011 if (this->IsTerminationRequested()) {
1005 slp.CancelSleep(); 1012 slp.CancelSleep();
1006 return ResultTerminationRequested; 1013 return ResultTerminationRequested;
1007 } 1014 }
1008 1015
1009 // Mark the thread as waiting. 1016 // Wait for the sleep to end.
1010 SetState(ThreadState::Waiting); 1017 this->BeginWait(std::addressof(wait_queue_));
1011 SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); 1018 SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
1012 } 1019 }
1013 1020
1014 // The lock/sleep is done. 1021 return ResultSuccess;
1022}
1015 1023
1016 // Cancel the timer. 1024void KThread::BeginWait(KThreadQueue* queue) {
1017 kernel.TimeManager().UnscheduleTimeEvent(this); 1025 // Set our state as waiting.
1026 SetState(ThreadState::Waiting);
1018 1027
1019 return ResultSuccess; 1028 // Set our wait queue.
1029 wait_queue = queue;
1030}
1031
1032void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) {
1033 // Lock the scheduler.
1034 KScopedSchedulerLock sl(kernel);
1035
1036 // If we're waiting, notify our queue that we're available.
1037 if (GetState() == ThreadState::Waiting) {
1038 wait_queue->NotifyAvailable(this, signaled_object, wait_result_);
1039 }
1040}
1041
1042void KThread::EndWait(ResultCode wait_result_) {
1043 // Lock the scheduler.
1044 KScopedSchedulerLock sl(kernel);
1045
1046 // If we're waiting, notify our queue that we're available.
1047 if (GetState() == ThreadState::Waiting) {
1048 wait_queue->EndWait(this, wait_result_);
1049 }
1050}
1051
1052void KThread::CancelWait(ResultCode wait_result_, bool cancel_timer_task) {
1053 // Lock the scheduler.
1054 KScopedSchedulerLock sl(kernel);
1055
1056 // If we're waiting, notify our queue that we're available.
1057 if (GetState() == ThreadState::Waiting) {
1058 wait_queue->CancelWait(this, wait_result_, cancel_timer_task);
1059 }
1020} 1060}
1021 1061
1022void KThread::SetState(ThreadState state) { 1062void KThread::SetState(ThreadState state) {
@@ -1050,4 +1090,26 @@ s32 GetCurrentCoreId(KernelCore& kernel) {
1050 return GetCurrentThread(kernel).GetCurrentCore(); 1090 return GetCurrentThread(kernel).GetCurrentCore();
1051} 1091}
1052 1092
1093KScopedDisableDispatch::~KScopedDisableDispatch() {
1094 // If we are shutting down the kernel, none of this is relevant anymore.
1095 if (kernel.IsShuttingDown()) {
1096 return;
1097 }
1098
1099 // Skip the reschedule if single-core, as dispatch tracking is disabled here.
1100 if (!Settings::values.use_multi_core.GetValue()) {
1101 return;
1102 }
1103
1104 if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) {
1105 auto scheduler = kernel.CurrentScheduler();
1106
1107 if (scheduler) {
1108 scheduler->RescheduleCurrentCore();
1109 }
1110 } else {
1111 GetCurrentThread(kernel).EnableDispatch();
1112 }
1113}
1114
1053} // namespace Kernel 1115} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index c77f44ad4..c8a08bd71 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -48,6 +48,7 @@ enum class ThreadType : u32 {
48 Kernel = 1, 48 Kernel = 1,
49 HighPriority = 2, 49 HighPriority = 2,
50 User = 3, 50 User = 3,
51 Dummy = 100, // Special thread type for emulation purposes only
51}; 52};
52DECLARE_ENUM_FLAG_OPERATORS(ThreadType); 53DECLARE_ENUM_FLAG_OPERATORS(ThreadType);
53 54
@@ -161,8 +162,6 @@ public:
161 } 162 }
162 } 163 }
163 164
164 void Wakeup();
165
166 void SetBasePriority(s32 value); 165 void SetBasePriority(s32 value);
167 166
168 [[nodiscard]] ResultCode Run(); 167 [[nodiscard]] ResultCode Run();
@@ -197,13 +196,19 @@ public:
197 196
198 void Suspend(); 197 void Suspend();
199 198
200 void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) { 199 constexpr void SetSyncedIndex(s32 index) {
201 synced_object = obj; 200 synced_index = index;
201 }
202
203 [[nodiscard]] constexpr s32 GetSyncedIndex() const {
204 return synced_index;
205 }
206
207 constexpr void SetWaitResult(ResultCode wait_res) {
202 wait_result = wait_res; 208 wait_result = wait_res;
203 } 209 }
204 210
205 [[nodiscard]] ResultCode GetWaitResult(KSynchronizationObject** out) const { 211 [[nodiscard]] constexpr ResultCode GetWaitResult() const {
206 *out = synced_object;
207 return wait_result; 212 return wait_result;
208 } 213 }
209 214
@@ -374,6 +379,8 @@ public:
374 379
375 [[nodiscard]] bool IsSignaled() const override; 380 [[nodiscard]] bool IsSignaled() const override;
376 381
382 void OnTimer();
383
377 static void PostDestroy(uintptr_t arg); 384 static void PostDestroy(uintptr_t arg);
378 385
379 [[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread); 386 [[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread);
@@ -446,20 +453,39 @@ public:
446 return per_core_priority_queue_entry[core]; 453 return per_core_priority_queue_entry[core];
447 } 454 }
448 455
449 void SetSleepingQueue(KThreadQueue* q) { 456 [[nodiscard]] bool IsKernelThread() const {
450 sleeping_queue = q; 457 return GetActiveCore() == 3;
458 }
459
460 [[nodiscard]] bool IsDispatchTrackingDisabled() const {
461 return is_single_core || IsKernelThread();
451 } 462 }
452 463
453 [[nodiscard]] s32 GetDisableDispatchCount() const { 464 [[nodiscard]] s32 GetDisableDispatchCount() const {
465 if (IsDispatchTrackingDisabled()) {
466 // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
467 return 1;
468 }
469
454 return this->GetStackParameters().disable_count; 470 return this->GetStackParameters().disable_count;
455 } 471 }
456 472
457 void DisableDispatch() { 473 void DisableDispatch() {
474 if (IsDispatchTrackingDisabled()) {
475 // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
476 return;
477 }
478
458 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); 479 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
459 this->GetStackParameters().disable_count++; 480 this->GetStackParameters().disable_count++;
460 } 481 }
461 482
462 void EnableDispatch() { 483 void EnableDispatch() {
484 if (IsDispatchTrackingDisabled()) {
485 // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
486 return;
487 }
488
463 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); 489 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
464 this->GetStackParameters().disable_count--; 490 this->GetStackParameters().disable_count--;
465 } 491 }
@@ -573,6 +599,15 @@ public:
573 address_key_value = val; 599 address_key_value = val;
574 } 600 }
575 601
602 void ClearWaitQueue() {
603 wait_queue = nullptr;
604 }
605
606 void BeginWait(KThreadQueue* queue);
607 void NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_);
608 void EndWait(ResultCode wait_result_);
609 void CancelWait(ResultCode wait_result_, bool cancel_timer_task);
610
576 [[nodiscard]] bool HasWaiters() const { 611 [[nodiscard]] bool HasWaiters() const {
577 return !waiter_list.empty(); 612 return !waiter_list.empty();
578 } 613 }
@@ -667,7 +702,6 @@ private:
667 KAffinityMask physical_affinity_mask{}; 702 KAffinityMask physical_affinity_mask{};
668 u64 thread_id{}; 703 u64 thread_id{};
669 std::atomic<s64> cpu_time{}; 704 std::atomic<s64> cpu_time{};
670 KSynchronizationObject* synced_object{};
671 VAddr address_key{}; 705 VAddr address_key{};
672 KProcess* parent{}; 706 KProcess* parent{};
673 VAddr kernel_stack_top{}; 707 VAddr kernel_stack_top{};
@@ -677,13 +711,14 @@ private:
677 s64 schedule_count{}; 711 s64 schedule_count{};
678 s64 last_scheduled_tick{}; 712 s64 last_scheduled_tick{};
679 std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{}; 713 std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
680 KThreadQueue* sleeping_queue{}; 714 KThreadQueue* wait_queue{};
681 WaiterList waiter_list{}; 715 WaiterList waiter_list{};
682 WaiterList pinned_waiter_list{}; 716 WaiterList pinned_waiter_list{};
683 KThread* lock_owner{}; 717 KThread* lock_owner{};
684 u32 address_key_value{}; 718 u32 address_key_value{};
685 u32 suspend_request_flags{}; 719 u32 suspend_request_flags{};
686 u32 suspend_allowed_flags{}; 720 u32 suspend_allowed_flags{};
721 s32 synced_index{};
687 ResultCode wait_result{ResultSuccess}; 722 ResultCode wait_result{ResultSuccess};
688 s32 base_priority{}; 723 s32 base_priority{};
689 s32 physical_ideal_core_id{}; 724 s32 physical_ideal_core_id{};
@@ -708,6 +743,7 @@ private:
708 743
709 // For emulation 744 // For emulation
710 std::shared_ptr<Common::Fiber> host_context{}; 745 std::shared_ptr<Common::Fiber> host_context{};
746 bool is_single_core{};
711 747
712 // For debugging 748 // For debugging
713 std::vector<KSynchronizationObject*> wait_objects_for_debugging; 749 std::vector<KSynchronizationObject*> wait_objects_for_debugging;
@@ -752,4 +788,20 @@ public:
752 } 788 }
753}; 789};
754 790
791class KScopedDisableDispatch {
792public:
793 [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} {
794 // If we are shutting down the kernel, none of this is relevant anymore.
795 if (kernel.IsShuttingDown()) {
796 return;
797 }
798 GetCurrentThread(kernel).DisableDispatch();
799 }
800
801 ~KScopedDisableDispatch();
802
803private:
804 KernelCore& kernel;
805};
806
755} // namespace Kernel 807} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread_queue.cpp b/src/core/hle/kernel/k_thread_queue.cpp
new file mode 100644
index 000000000..46f27172b
--- /dev/null
+++ b/src/core/hle/kernel/k_thread_queue.cpp
@@ -0,0 +1,51 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "core/hle/kernel/k_thread_queue.h"
8#include "core/hle/kernel/kernel.h"
9#include "core/hle/kernel/time_manager.h"
10
11namespace Kernel {
12
13void KThreadQueue::NotifyAvailable([[maybe_unused]] KThread* waiting_thread,
14 [[maybe_unused]] KSynchronizationObject* signaled_object,
15 [[maybe_unused]] ResultCode wait_result) {}
16
17void KThreadQueue::EndWait(KThread* waiting_thread, ResultCode wait_result) {
18 // Set the thread's wait result.
19 waiting_thread->SetWaitResult(wait_result);
20
21 // Set the thread as runnable.
22 waiting_thread->SetState(ThreadState::Runnable);
23
24 // Clear the thread's wait queue.
25 waiting_thread->ClearWaitQueue();
26
27 // Cancel the thread task.
28 kernel.TimeManager().UnscheduleTimeEvent(waiting_thread);
29}
30
31void KThreadQueue::CancelWait(KThread* waiting_thread, ResultCode wait_result,
32 bool cancel_timer_task) {
33 // Set the thread's wait result.
34 waiting_thread->SetWaitResult(wait_result);
35
36 // Set the thread as runnable.
37 waiting_thread->SetState(ThreadState::Runnable);
38
39 // Clear the thread's wait queue.
40 waiting_thread->ClearWaitQueue();
41
42 // Cancel the thread task.
43 if (cancel_timer_task) {
44 kernel.TimeManager().UnscheduleTimeEvent(waiting_thread);
45 }
46}
47
48void KThreadQueueWithoutEndWait::EndWait([[maybe_unused]] KThread* waiting_thread,
49 [[maybe_unused]] ResultCode wait_result) {}
50
51} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread_queue.h b/src/core/hle/kernel/k_thread_queue.h
index 35d471dc5..ccb718e49 100644
--- a/src/core/hle/kernel/k_thread_queue.h
+++ b/src/core/hle/kernel/k_thread_queue.h
@@ -4,6 +4,7 @@
4 4
5#pragma once 5#pragma once
6 6
7#include "core/hle/kernel/k_scheduler.h"
7#include "core/hle/kernel/k_thread.h" 8#include "core/hle/kernel/k_thread.h"
8 9
9namespace Kernel { 10namespace Kernel {
@@ -11,71 +12,24 @@ namespace Kernel {
11class KThreadQueue { 12class KThreadQueue {
12public: 13public:
13 explicit KThreadQueue(KernelCore& kernel_) : kernel{kernel_} {} 14 explicit KThreadQueue(KernelCore& kernel_) : kernel{kernel_} {}
15 virtual ~KThreadQueue() = default;
14 16
15 bool IsEmpty() const { 17 virtual void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
16 return wait_list.empty(); 18 ResultCode wait_result);
17 } 19 virtual void EndWait(KThread* waiting_thread, ResultCode wait_result);
18 20 virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
19 KThread::WaiterList::iterator begin() { 21 bool cancel_timer_task);
20 return wait_list.begin();
21 }
22 KThread::WaiterList::iterator end() {
23 return wait_list.end();
24 }
25
26 bool SleepThread(KThread* t) {
27 KScopedSchedulerLock sl{kernel};
28
29 // If the thread needs terminating, don't enqueue it.
30 if (t->IsTerminationRequested()) {
31 return false;
32 }
33
34 // Set the thread's queue and mark it as waiting.
35 t->SetSleepingQueue(this);
36 t->SetState(ThreadState::Waiting);
37
38 // Add the thread to the queue.
39 wait_list.push_back(*t);
40
41 return true;
42 }
43
44 void WakeupThread(KThread* t) {
45 KScopedSchedulerLock sl{kernel};
46
47 // Remove the thread from the queue.
48 wait_list.erase(wait_list.iterator_to(*t));
49
50 // Mark the thread as no longer sleeping.
51 t->SetState(ThreadState::Runnable);
52 t->SetSleepingQueue(nullptr);
53 }
54
55 KThread* WakeupFrontThread() {
56 KScopedSchedulerLock sl{kernel};
57
58 if (wait_list.empty()) {
59 return nullptr;
60 } else {
61 // Remove the thread from the queue.
62 auto it = wait_list.begin();
63 KThread* thread = std::addressof(*it);
64 wait_list.erase(it);
65
66 ASSERT(thread->GetState() == ThreadState::Waiting);
67
68 // Mark the thread as no longer sleeping.
69 thread->SetState(ThreadState::Runnable);
70 thread->SetSleepingQueue(nullptr);
71
72 return thread;
73 }
74 }
75 22
76private: 23private:
77 KernelCore& kernel; 24 KernelCore& kernel;
78 KThread::WaiterList wait_list{}; 25 KThread::WaiterList wait_list{};
79}; 26};
80 27
28class KThreadQueueWithoutEndWait : public KThreadQueue {
29public:
30 explicit KThreadQueueWithoutEndWait(KernelCore& kernel_) : KThreadQueue(kernel_) {}
31
32 void EndWait(KThread* waiting_thread, ResultCode wait_result) override final;
33};
34
81} // namespace Kernel 35} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 45e86a677..2e4e4cb1c 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -14,6 +14,7 @@
14#include "common/assert.h" 14#include "common/assert.h"
15#include "common/logging/log.h" 15#include "common/logging/log.h"
16#include "common/microprofile.h" 16#include "common/microprofile.h"
17#include "common/scope_exit.h"
17#include "common/thread.h" 18#include "common/thread.h"
18#include "common/thread_worker.h" 19#include "common/thread_worker.h"
19#include "core/arm/arm_interface.h" 20#include "core/arm/arm_interface.h"
@@ -83,12 +84,16 @@ struct KernelCore::Impl {
83 } 84 }
84 85
85 void InitializeCores() { 86 void InitializeCores() {
86 for (auto& core : cores) { 87 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
87 core.Initialize(current_process->Is64BitProcess()); 88 cores[core_id].Initialize(current_process->Is64BitProcess());
89 system.Memory().SetCurrentPageTable(*current_process, core_id);
88 } 90 }
89 } 91 }
90 92
91 void Shutdown() { 93 void Shutdown() {
94 is_shutting_down.store(true, std::memory_order_relaxed);
95 SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
96
92 process_list.clear(); 97 process_list.clear();
93 98
94 // Close all open server ports. 99 // Close all open server ports.
@@ -123,15 +128,6 @@ struct KernelCore::Impl {
123 next_user_process_id = KProcess::ProcessIDMin; 128 next_user_process_id = KProcess::ProcessIDMin;
124 next_thread_id = 1; 129 next_thread_id = 1;
125 130
126 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
127 if (suspend_threads[core_id]) {
128 suspend_threads[core_id]->Close();
129 suspend_threads[core_id] = nullptr;
130 }
131
132 schedulers[core_id].reset();
133 }
134
135 cores.clear(); 131 cores.clear();
136 132
137 global_handle_table->Finalize(); 133 global_handle_table->Finalize();
@@ -159,6 +155,16 @@ struct KernelCore::Impl {
159 CleanupObject(time_shared_mem); 155 CleanupObject(time_shared_mem);
160 CleanupObject(system_resource_limit); 156 CleanupObject(system_resource_limit);
161 157
158 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
159 if (suspend_threads[core_id]) {
160 suspend_threads[core_id]->Close();
161 suspend_threads[core_id] = nullptr;
162 }
163
164 schedulers[core_id]->Finalize();
165 schedulers[core_id].reset();
166 }
167
162 // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others 168 // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
163 next_host_thread_id = Core::Hardware::NUM_CPU_CORES; 169 next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
164 170
@@ -245,13 +251,11 @@ struct KernelCore::Impl {
245 KScopedSchedulerLock lock(kernel); 251 KScopedSchedulerLock lock(kernel);
246 global_scheduler_context->PreemptThreads(); 252 global_scheduler_context->PreemptThreads();
247 } 253 }
248 const auto time_interval = std::chrono::nanoseconds{ 254 const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
249 Core::Timing::msToCycles(std::chrono::milliseconds(10))};
250 system.CoreTiming().ScheduleEvent(time_interval, preemption_event); 255 system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
251 }); 256 });
252 257
253 const auto time_interval = 258 const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
254 std::chrono::nanoseconds{Core::Timing::msToCycles(std::chrono::milliseconds(10))};
255 system.CoreTiming().ScheduleEvent(time_interval, preemption_event); 259 system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
256 } 260 }
257 261
@@ -267,14 +271,6 @@ struct KernelCore::Impl {
267 271
268 void MakeCurrentProcess(KProcess* process) { 272 void MakeCurrentProcess(KProcess* process) {
269 current_process = process; 273 current_process = process;
270 if (process == nullptr) {
271 return;
272 }
273
274 const u32 core_id = GetCurrentHostThreadID();
275 if (core_id < Core::Hardware::NUM_CPU_CORES) {
276 system.Memory().SetCurrentPageTable(*process, core_id);
277 }
278 } 274 }
279 275
280 static inline thread_local u32 host_thread_id = UINT32_MAX; 276 static inline thread_local u32 host_thread_id = UINT32_MAX;
@@ -344,7 +340,16 @@ struct KernelCore::Impl {
344 is_phantom_mode_for_singlecore = value; 340 is_phantom_mode_for_singlecore = value;
345 } 341 }
346 342
343 bool IsShuttingDown() const {
344 return is_shutting_down.load(std::memory_order_relaxed);
345 }
346
347 KThread* GetCurrentEmuThread() { 347 KThread* GetCurrentEmuThread() {
348 // If we are shutting down the kernel, none of this is relevant anymore.
349 if (IsShuttingDown()) {
350 return {};
351 }
352
348 const auto thread_id = GetCurrentHostThreadID(); 353 const auto thread_id = GetCurrentHostThreadID();
349 if (thread_id >= Core::Hardware::NUM_CPU_CORES) { 354 if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
350 return GetHostDummyThread(); 355 return GetHostDummyThread();
@@ -760,6 +765,7 @@ struct KernelCore::Impl {
760 std::vector<std::unique_ptr<KThread>> dummy_threads; 765 std::vector<std::unique_ptr<KThread>> dummy_threads;
761 766
762 bool is_multicore{}; 767 bool is_multicore{};
768 std::atomic_bool is_shutting_down{};
763 bool is_phantom_mode_for_singlecore{}; 769 bool is_phantom_mode_for_singlecore{};
764 u32 single_core_thread_id{}; 770 u32 single_core_thread_id{};
765 771
@@ -845,16 +851,20 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const {
845 return impl->cores[id]; 851 return impl->cores[id];
846} 852}
847 853
854size_t KernelCore::CurrentPhysicalCoreIndex() const {
855 const u32 core_id = impl->GetCurrentHostThreadID();
856 if (core_id >= Core::Hardware::NUM_CPU_CORES) {
857 return Core::Hardware::NUM_CPU_CORES - 1;
858 }
859 return core_id;
860}
861
848Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { 862Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
849 u32 core_id = impl->GetCurrentHostThreadID(); 863 return impl->cores[CurrentPhysicalCoreIndex()];
850 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
851 return impl->cores[core_id];
852} 864}
853 865
854const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { 866const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
855 u32 core_id = impl->GetCurrentHostThreadID(); 867 return impl->cores[CurrentPhysicalCoreIndex()];
856 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
857 return impl->cores[core_id];
858} 868}
859 869
860Kernel::KScheduler* KernelCore::CurrentScheduler() { 870Kernel::KScheduler* KernelCore::CurrentScheduler() {
@@ -1057,6 +1067,9 @@ void KernelCore::Suspend(bool in_suspention) {
1057 impl->suspend_threads[core_id]->SetState(state); 1067 impl->suspend_threads[core_id]->SetState(state);
1058 impl->suspend_threads[core_id]->SetWaitReasonForDebugging( 1068 impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
1059 ThreadWaitReasonForDebugging::Suspended); 1069 ThreadWaitReasonForDebugging::Suspended);
1070 if (!should_suspend) {
1071 impl->suspend_threads[core_id]->DisableDispatch();
1072 }
1060 } 1073 }
1061 } 1074 }
1062} 1075}
@@ -1065,19 +1078,21 @@ bool KernelCore::IsMulticore() const {
1065 return impl->is_multicore; 1078 return impl->is_multicore;
1066} 1079}
1067 1080
1081bool KernelCore::IsShuttingDown() const {
1082 return impl->IsShuttingDown();
1083}
1084
1068void KernelCore::ExceptionalExit() { 1085void KernelCore::ExceptionalExit() {
1069 exception_exited = true; 1086 exception_exited = true;
1070 Suspend(true); 1087 Suspend(true);
1071} 1088}
1072 1089
1073void KernelCore::EnterSVCProfile() { 1090void KernelCore::EnterSVCProfile() {
1074 std::size_t core = impl->GetCurrentHostThreadID(); 1091 impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
1075 impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
1076} 1092}
1077 1093
1078void KernelCore::ExitSVCProfile() { 1094void KernelCore::ExitSVCProfile() {
1079 std::size_t core = impl->GetCurrentHostThreadID(); 1095 MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]);
1080 MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
1081} 1096}
1082 1097
1083std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { 1098std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index d847fd0c5..b9b423908 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -149,6 +149,9 @@ public:
149 /// Gets the an instance of the respective physical CPU core. 149 /// Gets the an instance of the respective physical CPU core.
150 const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; 150 const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
151 151
152 /// Gets the current physical core index for the running host thread.
153 std::size_t CurrentPhysicalCoreIndex() const;
154
152 /// Gets the sole instance of the Scheduler at the current running core. 155 /// Gets the sole instance of the Scheduler at the current running core.
153 Kernel::KScheduler* CurrentScheduler(); 156 Kernel::KScheduler* CurrentScheduler();
154 157
@@ -272,6 +275,8 @@ public:
272 275
273 bool IsMulticore() const; 276 bool IsMulticore() const;
274 277
278 bool IsShuttingDown() const;
279
275 void EnterSVCProfile(); 280 void EnterSVCProfile();
276 281
277 void ExitSVCProfile(); 282 void ExitSVCProfile();
diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp
index 6721b6276..03f3dec10 100644
--- a/src/core/hle/kernel/service_thread.cpp
+++ b/src/core/hle/kernel/service_thread.cpp
@@ -25,24 +25,27 @@ public:
25 void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context); 25 void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context);
26 26
27private: 27private:
28 std::vector<std::thread> threads; 28 std::vector<std::jthread> threads;
29 std::queue<std::function<void()>> requests; 29 std::queue<std::function<void()>> requests;
30 std::mutex queue_mutex; 30 std::mutex queue_mutex;
31 std::condition_variable condition; 31 std::condition_variable_any condition;
32 const std::string service_name; 32 const std::string service_name;
33 bool stop{};
34}; 33};
35 34
36ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name) 35ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name)
37 : service_name{name} { 36 : service_name{name} {
38 for (std::size_t i = 0; i < num_threads; ++i) 37 for (std::size_t i = 0; i < num_threads; ++i) {
39 threads.emplace_back([this, &kernel] { 38 threads.emplace_back([this, &kernel](std::stop_token stop_token) {
40 Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str()); 39 Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str());
41 40
42 // Wait for first request before trying to acquire a render context 41 // Wait for first request before trying to acquire a render context
43 { 42 {
44 std::unique_lock lock{queue_mutex}; 43 std::unique_lock lock{queue_mutex};
45 condition.wait(lock, [this] { return stop || !requests.empty(); }); 44 condition.wait(lock, stop_token, [this] { return !requests.empty(); });
45 }
46
47 if (stop_token.stop_requested()) {
48 return;
46 } 49 }
47 50
48 kernel.RegisterHostThread(); 51 kernel.RegisterHostThread();
@@ -52,10 +55,16 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
52 55
53 { 56 {
54 std::unique_lock lock{queue_mutex}; 57 std::unique_lock lock{queue_mutex};
55 condition.wait(lock, [this] { return stop || !requests.empty(); }); 58 condition.wait(lock, stop_token, [this] { return !requests.empty(); });
56 if (stop || requests.empty()) { 59
60 if (stop_token.stop_requested()) {
57 return; 61 return;
58 } 62 }
63
64 if (requests.empty()) {
65 continue;
66 }
67
59 task = std::move(requests.front()); 68 task = std::move(requests.front());
60 requests.pop(); 69 requests.pop();
61 } 70 }
@@ -63,6 +72,7 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
63 task(); 72 task();
64 } 73 }
65 }); 74 });
75 }
66} 76}
67 77
68void ServiceThread::Impl::QueueSyncRequest(KSession& session, 78void ServiceThread::Impl::QueueSyncRequest(KSession& session,
@@ -88,12 +98,9 @@ void ServiceThread::Impl::QueueSyncRequest(KSession& session,
88} 98}
89 99
90ServiceThread::Impl::~Impl() { 100ServiceThread::Impl::~Impl() {
91 {
92 std::unique_lock lock{queue_mutex};
93 stop = true;
94 }
95 condition.notify_all(); 101 condition.notify_all();
96 for (std::thread& thread : threads) { 102 for (auto& thread : threads) {
103 thread.request_stop();
97 thread.join(); 104 thread.join();
98 } 105 }
99} 106}
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index b37db918e..a9f7438ea 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -32,6 +32,7 @@
32#include "core/hle/kernel/k_shared_memory.h" 32#include "core/hle/kernel/k_shared_memory.h"
33#include "core/hle/kernel/k_synchronization_object.h" 33#include "core/hle/kernel/k_synchronization_object.h"
34#include "core/hle/kernel/k_thread.h" 34#include "core/hle/kernel/k_thread.h"
35#include "core/hle/kernel/k_thread_queue.h"
35#include "core/hle/kernel/k_transfer_memory.h" 36#include "core/hle/kernel/k_transfer_memory.h"
36#include "core/hle/kernel/k_writable_event.h" 37#include "core/hle/kernel/k_writable_event.h"
37#include "core/hle/kernel/kernel.h" 38#include "core/hle/kernel/kernel.h"
@@ -308,26 +309,29 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
308 309
309/// Makes a blocking IPC call to an OS service. 310/// Makes a blocking IPC call to an OS service.
310static ResultCode SendSyncRequest(Core::System& system, Handle handle) { 311static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
311
312 auto& kernel = system.Kernel(); 312 auto& kernel = system.Kernel();
313 313
314 // Create the wait queue.
315 KThreadQueue wait_queue(kernel);
316
317 // Get the client session from its handle.
318 KScopedAutoObject session =
319 kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle);
320 R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
321
322 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
323
314 auto thread = kernel.CurrentScheduler()->GetCurrentThread(); 324 auto thread = kernel.CurrentScheduler()->GetCurrentThread();
315 { 325 {
316 KScopedSchedulerLock lock(kernel); 326 KScopedSchedulerLock lock(kernel);
317 thread->SetState(ThreadState::Waiting); 327
318 thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); 328 // This is a synchronous request, so we should wait for our request to complete.
319 329 GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue));
320 { 330 GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
321 KScopedAutoObject session = 331 session->SendSyncRequest(&GetCurrentThread(kernel), system.Memory(), system.CoreTiming());
322 kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle);
323 R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
324 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
325 session->SendSyncRequest(thread, system.Memory(), system.CoreTiming());
326 }
327 } 332 }
328 333
329 KSynchronizationObject* dummy{}; 334 return thread->GetWaitResult();
330 return thread->GetWaitResult(std::addressof(dummy));
331} 335}
332 336
333static ResultCode SendSyncRequest32(Core::System& system, Handle handle) { 337static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
@@ -874,7 +878,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
874 const u64 thread_ticks = current_thread->GetCpuTime(); 878 const u64 thread_ticks = current_thread->GetCpuTime();
875 879
876 out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); 880 out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
877 } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { 881 } else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) {
878 out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; 882 out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
879 } 883 }
880 884
@@ -888,7 +892,8 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
888 return ResultInvalidHandle; 892 return ResultInvalidHandle;
889 } 893 }
890 894
891 if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id != system.CurrentCoreIndex()) { 895 if (info_sub_id != 0xFFFFFFFFFFFFFFFF &&
896 info_sub_id != system.Kernel().CurrentPhysicalCoreIndex()) {
892 LOG_ERROR(Kernel_SVC, "Core is not the current core, got {}", info_sub_id); 897 LOG_ERROR(Kernel_SVC, "Core is not the current core, got {}", info_sub_id);
893 return ResultInvalidCombination; 898 return ResultInvalidCombination;
894 } 899 }
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
index 8cd7279a3..aa985d820 100644
--- a/src/core/hle/kernel/time_manager.cpp
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -5,6 +5,7 @@
5#include "common/assert.h" 5#include "common/assert.h"
6#include "core/core.h" 6#include "core/core.h"
7#include "core/core_timing.h" 7#include "core/core_timing.h"
8#include "core/hle/kernel/k_scheduler.h"
8#include "core/hle/kernel/k_thread.h" 9#include "core/hle/kernel/k_thread.h"
9#include "core/hle/kernel/time_manager.h" 10#include "core/hle/kernel/time_manager.h"
10 11
@@ -15,7 +16,10 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
15 Core::Timing::CreateEvent("Kernel::TimeManagerCallback", 16 Core::Timing::CreateEvent("Kernel::TimeManagerCallback",
16 [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { 17 [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
17 KThread* thread = reinterpret_cast<KThread*>(thread_handle); 18 KThread* thread = reinterpret_cast<KThread*>(thread_handle);
18 thread->Wakeup(); 19 {
20 KScopedSchedulerLock sl(system.Kernel());
21 thread->OnTimer();
22 }
19 }); 23 });
20} 24}
21 25