summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar bunnei2020-12-02 18:08:35 -0800
committerGravatar bunnei2020-12-06 00:03:24 -0800
commit9e29e36a784496f7290c03b6a42e400a164a5b1e (patch)
treed33cc91b4651b374e0c244be7b7e3b47ef7680fd /src
parenthle: kernel: physical_core: Clear exclusive state after each run. (diff)
downloadyuzu-9e29e36a784496f7290c03b6a42e400a164a5b1e.tar.gz
yuzu-9e29e36a784496f7290c03b6a42e400a164a5b1e.tar.xz
yuzu-9e29e36a784496f7290c03b6a42e400a164a5b1e.zip
hle: kernel: Rewrite scheduler implementation based on Mesopshere.
Diffstat (limited to 'src')
-rw-r--r--src/core/CMakeLists.txt4
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp2
-rw-r--r--src/core/core.cpp26
-rw-r--r--src/core/core.h20
-rw-r--r--src/core/cpu_manager.cpp55
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp6
-rw-r--r--src/core/hle/kernel/handle_table.cpp4
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp2
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp873
-rw-r--r--src/core/hle/kernel/k_scheduler.h (renamed from src/core/hle/kernel/scheduler.h)285
-rw-r--r--src/core/hle/kernel/kernel.cpp59
-rw-r--r--src/core/hle/kernel/kernel.h17
-rw-r--r--src/core/hle/kernel/mutex.cpp6
-rw-r--r--src/core/hle/kernel/physical_core.cpp9
-rw-r--r--src/core/hle/kernel/physical_core.h13
-rw-r--r--src/core/hle/kernel/process.cpp6
-rw-r--r--src/core/hle/kernel/readable_event.cpp2
-rw-r--r--src/core/hle/kernel/scheduler.cpp819
-rw-r--r--src/core/hle/kernel/server_session.cpp2
-rw-r--r--src/core/hle/kernel/svc.cpp57
-rw-r--r--src/core/hle/kernel/synchronization.cpp4
-rw-r--r--src/core/hle/kernel/thread.cpp50
-rw-r--r--src/core/hle/kernel/thread.h107
-rw-r--r--src/core/hle/kernel/time_manager.cpp2
-rw-r--r--src/core/hle/service/time/time.cpp2
-rw-r--r--src/yuzu/debugger/wait_tree.cpp6
26 files changed, 1215 insertions, 1223 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 2e60a8331..662839ff8 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -154,6 +154,8 @@ add_library(core STATIC
154 hle/kernel/hle_ipc.h 154 hle/kernel/hle_ipc.h
155 hle/kernel/k_affinity_mask.h 155 hle/kernel/k_affinity_mask.h
156 hle/kernel/k_priority_queue.h 156 hle/kernel/k_priority_queue.h
157 hle/kernel/k_scheduler.cpp
158 hle/kernel/k_scheduler.h
157 hle/kernel/k_scheduler_lock.h 159 hle/kernel/k_scheduler_lock.h
158 hle/kernel/kernel.cpp 160 hle/kernel/kernel.cpp
159 hle/kernel/kernel.h 161 hle/kernel/kernel.h
@@ -189,8 +191,6 @@ add_library(core STATIC
189 hle/kernel/readable_event.h 191 hle/kernel/readable_event.h
190 hle/kernel/resource_limit.cpp 192 hle/kernel/resource_limit.cpp
191 hle/kernel/resource_limit.h 193 hle/kernel/resource_limit.h
192 hle/kernel/scheduler.cpp
193 hle/kernel/scheduler.h
194 hle/kernel/server_port.cpp 194 hle/kernel/server_port.cpp
195 hle/kernel/server_port.h 195 hle/kernel/server_port.h
196 hle/kernel/server_session.cpp 196 hle/kernel/server_session.cpp
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index b63f79915..7a4eb88a2 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -15,8 +15,8 @@
15#include "core/core.h" 15#include "core/core.h"
16#include "core/core_timing.h" 16#include "core/core_timing.h"
17#include "core/hardware_properties.h" 17#include "core/hardware_properties.h"
18#include "core/hle/kernel/k_scheduler.h"
18#include "core/hle/kernel/process.h" 19#include "core/hle/kernel/process.h"
19#include "core/hle/kernel/scheduler.h"
20#include "core/hle/kernel/svc.h" 20#include "core/hle/kernel/svc.h"
21#include "core/memory.h" 21#include "core/memory.h"
22#include "core/settings.h" 22#include "core/settings.h"
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 01e4faac8..77d21d41c 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -27,10 +27,10 @@
27#include "core/file_sys/vfs_real.h" 27#include "core/file_sys/vfs_real.h"
28#include "core/hardware_interrupt_manager.h" 28#include "core/hardware_interrupt_manager.h"
29#include "core/hle/kernel/client_port.h" 29#include "core/hle/kernel/client_port.h"
30#include "core/hle/kernel/k_scheduler.h"
30#include "core/hle/kernel/kernel.h" 31#include "core/hle/kernel/kernel.h"
31#include "core/hle/kernel/physical_core.h" 32#include "core/hle/kernel/physical_core.h"
32#include "core/hle/kernel/process.h" 33#include "core/hle/kernel/process.h"
33#include "core/hle/kernel/scheduler.h"
34#include "core/hle/kernel/thread.h" 34#include "core/hle/kernel/thread.h"
35#include "core/hle/service/am/applets/applets.h" 35#include "core/hle/service/am/applets/applets.h"
36#include "core/hle/service/apm/controller.h" 36#include "core/hle/service/apm/controller.h"
@@ -508,14 +508,6 @@ std::size_t System::CurrentCoreIndex() const {
508 return core; 508 return core;
509} 509}
510 510
511Kernel::Scheduler& System::CurrentScheduler() {
512 return impl->kernel.CurrentScheduler();
513}
514
515const Kernel::Scheduler& System::CurrentScheduler() const {
516 return impl->kernel.CurrentScheduler();
517}
518
519Kernel::PhysicalCore& System::CurrentPhysicalCore() { 511Kernel::PhysicalCore& System::CurrentPhysicalCore() {
520 return impl->kernel.CurrentPhysicalCore(); 512 return impl->kernel.CurrentPhysicalCore();
521} 513}
@@ -524,22 +516,14 @@ const Kernel::PhysicalCore& System::CurrentPhysicalCore() const {
524 return impl->kernel.CurrentPhysicalCore(); 516 return impl->kernel.CurrentPhysicalCore();
525} 517}
526 518
527Kernel::Scheduler& System::Scheduler(std::size_t core_index) {
528 return impl->kernel.Scheduler(core_index);
529}
530
531const Kernel::Scheduler& System::Scheduler(std::size_t core_index) const {
532 return impl->kernel.Scheduler(core_index);
533}
534
535/// Gets the global scheduler 519/// Gets the global scheduler
536Kernel::GlobalScheduler& System::GlobalScheduler() { 520Kernel::GlobalSchedulerContext& System::GlobalSchedulerContext() {
537 return impl->kernel.GlobalScheduler(); 521 return impl->kernel.GlobalSchedulerContext();
538} 522}
539 523
540/// Gets the global scheduler 524/// Gets the global scheduler
541const Kernel::GlobalScheduler& System::GlobalScheduler() const { 525const Kernel::GlobalSchedulerContext& System::GlobalSchedulerContext() const {
542 return impl->kernel.GlobalScheduler(); 526 return impl->kernel.GlobalSchedulerContext();
543} 527}
544 528
545Kernel::Process* System::CurrentProcess() { 529Kernel::Process* System::CurrentProcess() {
diff --git a/src/core/core.h b/src/core/core.h
index 29b8fb92a..579a774e4 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -26,11 +26,11 @@ class VfsFilesystem;
26} // namespace FileSys 26} // namespace FileSys
27 27
28namespace Kernel { 28namespace Kernel {
29class GlobalScheduler; 29class GlobalSchedulerContext;
30class KernelCore; 30class KernelCore;
31class PhysicalCore; 31class PhysicalCore;
32class Process; 32class Process;
33class Scheduler; 33class KScheduler;
34} // namespace Kernel 34} // namespace Kernel
35 35
36namespace Loader { 36namespace Loader {
@@ -213,12 +213,6 @@ public:
213 /// Gets the index of the currently running CPU core 213 /// Gets the index of the currently running CPU core
214 [[nodiscard]] std::size_t CurrentCoreIndex() const; 214 [[nodiscard]] std::size_t CurrentCoreIndex() const;
215 215
216 /// Gets the scheduler for the CPU core that is currently running
217 [[nodiscard]] Kernel::Scheduler& CurrentScheduler();
218
219 /// Gets the scheduler for the CPU core that is currently running
220 [[nodiscard]] const Kernel::Scheduler& CurrentScheduler() const;
221
222 /// Gets the physical core for the CPU core that is currently running 216 /// Gets the physical core for the CPU core that is currently running
223 [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore(); 217 [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore();
224 218
@@ -261,17 +255,11 @@ public:
261 /// Gets an immutable reference to the renderer. 255 /// Gets an immutable reference to the renderer.
262 [[nodiscard]] const VideoCore::RendererBase& Renderer() const; 256 [[nodiscard]] const VideoCore::RendererBase& Renderer() const;
263 257
264 /// Gets the scheduler for the CPU core with the specified index
265 [[nodiscard]] Kernel::Scheduler& Scheduler(std::size_t core_index);
266
267 /// Gets the scheduler for the CPU core with the specified index
268 [[nodiscard]] const Kernel::Scheduler& Scheduler(std::size_t core_index) const;
269
270 /// Gets the global scheduler 258 /// Gets the global scheduler
271 [[nodiscard]] Kernel::GlobalScheduler& GlobalScheduler(); 259 [[nodiscard]] Kernel::GlobalSchedulerContext& GlobalSchedulerContext();
272 260
273 /// Gets the global scheduler 261 /// Gets the global scheduler
274 [[nodiscard]] const Kernel::GlobalScheduler& GlobalScheduler() const; 262 [[nodiscard]] const Kernel::GlobalSchedulerContext& GlobalSchedulerContext() const;
275 263
276 /// Gets the manager for the guest device memory 264 /// Gets the manager for the guest device memory
277 [[nodiscard]] Core::DeviceMemory& DeviceMemory(); 265 [[nodiscard]] Core::DeviceMemory& DeviceMemory();
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp
index 0cff985e9..179154348 100644
--- a/src/core/cpu_manager.cpp
+++ b/src/core/cpu_manager.cpp
@@ -10,9 +10,9 @@
10#include "core/core.h" 10#include "core/core.h"
11#include "core/core_timing.h" 11#include "core/core_timing.h"
12#include "core/cpu_manager.h" 12#include "core/cpu_manager.h"
13#include "core/hle/kernel/k_scheduler.h"
13#include "core/hle/kernel/kernel.h" 14#include "core/hle/kernel/kernel.h"
14#include "core/hle/kernel/physical_core.h" 15#include "core/hle/kernel/physical_core.h"
15#include "core/hle/kernel/scheduler.h"
16#include "core/hle/kernel/thread.h" 16#include "core/hle/kernel/thread.h"
17#include "video_core/gpu.h" 17#include "video_core/gpu.h"
18 18
@@ -109,11 +109,8 @@ void* CpuManager::GetStartFuncParamater() {
109 109
110void CpuManager::MultiCoreRunGuestThread() { 110void CpuManager::MultiCoreRunGuestThread() {
111 auto& kernel = system.Kernel(); 111 auto& kernel = system.Kernel();
112 { 112 kernel.CurrentScheduler()->OnThreadStart();
113 auto& sched = kernel.CurrentScheduler(); 113 auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
114 sched.OnThreadStart();
115 }
116 auto* thread = kernel.CurrentScheduler().GetCurrentThread();
117 auto& host_context = thread->GetHostContext(); 114 auto& host_context = thread->GetHostContext();
118 host_context->SetRewindPoint(GuestRewindFunction, this); 115 host_context->SetRewindPoint(GuestRewindFunction, this);
119 MultiCoreRunGuestLoop(); 116 MultiCoreRunGuestLoop();
@@ -130,8 +127,8 @@ void CpuManager::MultiCoreRunGuestLoop() {
130 physical_core = &kernel.CurrentPhysicalCore(); 127 physical_core = &kernel.CurrentPhysicalCore();
131 } 128 }
132 system.ExitDynarmicProfile(); 129 system.ExitDynarmicProfile();
133 auto& scheduler = kernel.CurrentScheduler(); 130 physical_core->ArmInterface().ClearExclusiveState();
134 scheduler.TryDoContextSwitch(); 131 kernel.CurrentScheduler()->RescheduleCurrentCore();
135 } 132 }
136} 133}
137 134
@@ -140,25 +137,21 @@ void CpuManager::MultiCoreRunIdleThread() {
140 while (true) { 137 while (true) {
141 auto& physical_core = kernel.CurrentPhysicalCore(); 138 auto& physical_core = kernel.CurrentPhysicalCore();
142 physical_core.Idle(); 139 physical_core.Idle();
143 auto& scheduler = kernel.CurrentScheduler(); 140 kernel.CurrentScheduler()->RescheduleCurrentCore();
144 scheduler.TryDoContextSwitch();
145 } 141 }
146} 142}
147 143
148void CpuManager::MultiCoreRunSuspendThread() { 144void CpuManager::MultiCoreRunSuspendThread() {
149 auto& kernel = system.Kernel(); 145 auto& kernel = system.Kernel();
150 { 146 kernel.CurrentScheduler()->OnThreadStart();
151 auto& sched = kernel.CurrentScheduler();
152 sched.OnThreadStart();
153 }
154 while (true) { 147 while (true) {
155 auto core = kernel.GetCurrentHostThreadID(); 148 auto core = kernel.GetCurrentHostThreadID();
156 auto& scheduler = kernel.CurrentScheduler(); 149 auto& scheduler = *kernel.CurrentScheduler();
157 Kernel::Thread* current_thread = scheduler.GetCurrentThread(); 150 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
158 Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context); 151 Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context);
159 ASSERT(scheduler.ContextSwitchPending()); 152 ASSERT(scheduler.ContextSwitchPending());
160 ASSERT(core == kernel.GetCurrentHostThreadID()); 153 ASSERT(core == kernel.GetCurrentHostThreadID());
161 scheduler.TryDoContextSwitch(); 154 scheduler.RescheduleCurrentCore();
162 } 155 }
163} 156}
164 157
@@ -206,11 +199,8 @@ void CpuManager::MultiCorePause(bool paused) {
206 199
207void CpuManager::SingleCoreRunGuestThread() { 200void CpuManager::SingleCoreRunGuestThread() {
208 auto& kernel = system.Kernel(); 201 auto& kernel = system.Kernel();
209 { 202 kernel.CurrentScheduler()->OnThreadStart();
210 auto& sched = kernel.CurrentScheduler(); 203 auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
211 sched.OnThreadStart();
212 }
213 auto* thread = kernel.CurrentScheduler().GetCurrentThread();
214 auto& host_context = thread->GetHostContext(); 204 auto& host_context = thread->GetHostContext();
215 host_context->SetRewindPoint(GuestRewindFunction, this); 205 host_context->SetRewindPoint(GuestRewindFunction, this);
216 SingleCoreRunGuestLoop(); 206 SingleCoreRunGuestLoop();
@@ -218,7 +208,7 @@ void CpuManager::SingleCoreRunGuestThread() {
218 208
219void CpuManager::SingleCoreRunGuestLoop() { 209void CpuManager::SingleCoreRunGuestLoop() {
220 auto& kernel = system.Kernel(); 210 auto& kernel = system.Kernel();
221 auto* thread = kernel.CurrentScheduler().GetCurrentThread(); 211 auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
222 while (true) { 212 while (true) {
223 auto* physical_core = &kernel.CurrentPhysicalCore(); 213 auto* physical_core = &kernel.CurrentPhysicalCore();
224 system.EnterDynarmicProfile(); 214 system.EnterDynarmicProfile();
@@ -230,9 +220,10 @@ void CpuManager::SingleCoreRunGuestLoop() {
230 thread->SetPhantomMode(true); 220 thread->SetPhantomMode(true);
231 system.CoreTiming().Advance(); 221 system.CoreTiming().Advance();
232 thread->SetPhantomMode(false); 222 thread->SetPhantomMode(false);
223 physical_core->ArmInterface().ClearExclusiveState();
233 PreemptSingleCore(); 224 PreemptSingleCore();
234 auto& scheduler = kernel.Scheduler(current_core); 225 auto& scheduler = kernel.Scheduler(current_core);
235 scheduler.TryDoContextSwitch(); 226 scheduler.RescheduleCurrentCore();
236 } 227 }
237} 228}
238 229
@@ -244,24 +235,21 @@ void CpuManager::SingleCoreRunIdleThread() {
244 system.CoreTiming().AddTicks(1000U); 235 system.CoreTiming().AddTicks(1000U);
245 idle_count++; 236 idle_count++;
246 auto& scheduler = physical_core.Scheduler(); 237 auto& scheduler = physical_core.Scheduler();
247 scheduler.TryDoContextSwitch(); 238 scheduler.RescheduleCurrentCore();
248 } 239 }
249} 240}
250 241
251void CpuManager::SingleCoreRunSuspendThread() { 242void CpuManager::SingleCoreRunSuspendThread() {
252 auto& kernel = system.Kernel(); 243 auto& kernel = system.Kernel();
253 { 244 kernel.CurrentScheduler()->OnThreadStart();
254 auto& sched = kernel.CurrentScheduler();
255 sched.OnThreadStart();
256 }
257 while (true) { 245 while (true) {
258 auto core = kernel.GetCurrentHostThreadID(); 246 auto core = kernel.GetCurrentHostThreadID();
259 auto& scheduler = kernel.CurrentScheduler(); 247 auto& scheduler = *kernel.CurrentScheduler();
260 Kernel::Thread* current_thread = scheduler.GetCurrentThread(); 248 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
261 Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context); 249 Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context);
262 ASSERT(scheduler.ContextSwitchPending()); 250 ASSERT(scheduler.ContextSwitchPending());
263 ASSERT(core == kernel.GetCurrentHostThreadID()); 251 ASSERT(core == kernel.GetCurrentHostThreadID());
264 scheduler.TryDoContextSwitch(); 252 scheduler.RescheduleCurrentCore();
265 } 253 }
266} 254}
267 255
@@ -280,12 +268,12 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
280 } 268 }
281 current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES); 269 current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
282 system.CoreTiming().ResetTicks(); 270 system.CoreTiming().ResetTicks();
283 scheduler.Unload(); 271 scheduler.Unload(scheduler.GetCurrentThread());
284 auto& next_scheduler = system.Kernel().Scheduler(current_core); 272 auto& next_scheduler = system.Kernel().Scheduler(current_core);
285 Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext()); 273 Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext());
286 /// May have changed scheduler 274 /// May have changed scheduler
287 auto& current_scheduler = system.Kernel().Scheduler(current_core); 275 auto& current_scheduler = system.Kernel().Scheduler(current_core);
288 current_scheduler.Reload(); 276 current_scheduler.Reload(scheduler.GetCurrentThread());
289 auto* currrent_thread2 = current_scheduler.GetCurrentThread(); 277 auto* currrent_thread2 = current_scheduler.GetCurrentThread();
290 if (!currrent_thread2->IsIdleThread()) { 278 if (!currrent_thread2->IsIdleThread()) {
291 idle_count = 0; 279 idle_count = 0;
@@ -369,8 +357,7 @@ void CpuManager::RunThread(std::size_t core) {
369 return; 357 return;
370 } 358 }
371 359
372 auto& scheduler = system.Kernel().CurrentScheduler(); 360 auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
373 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
374 data.is_running = true; 361 data.is_running = true;
375 Common::Fiber::YieldTo(data.host_context, current_thread->GetHostContext()); 362 Common::Fiber::YieldTo(data.host_context, current_thread->GetHostContext());
376 data.is_running = false; 363 data.is_running = false;
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index 048acd30e..bc32be18b 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -12,8 +12,8 @@
12#include "core/hle/kernel/address_arbiter.h" 12#include "core/hle/kernel/address_arbiter.h"
13#include "core/hle/kernel/errors.h" 13#include "core/hle/kernel/errors.h"
14#include "core/hle/kernel/handle_table.h" 14#include "core/hle/kernel/handle_table.h"
15#include "core/hle/kernel/k_scheduler.h"
15#include "core/hle/kernel/kernel.h" 16#include "core/hle/kernel/kernel.h"
16#include "core/hle/kernel/scheduler.h"
17#include "core/hle/kernel/thread.h" 17#include "core/hle/kernel/thread.h"
18#include "core/hle/kernel/time_manager.h" 18#include "core/hle/kernel/time_manager.h"
19#include "core/hle/result.h" 19#include "core/hle/result.h"
@@ -153,7 +153,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
153 bool should_decrement) { 153 bool should_decrement) {
154 auto& memory = system.Memory(); 154 auto& memory = system.Memory();
155 auto& kernel = system.Kernel(); 155 auto& kernel = system.Kernel();
156 Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); 156 Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
157 157
158 Handle event_handle = InvalidHandle; 158 Handle event_handle = InvalidHandle;
159 { 159 {
@@ -223,7 +223,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
223ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { 223ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
224 auto& memory = system.Memory(); 224 auto& memory = system.Memory();
225 auto& kernel = system.Kernel(); 225 auto& kernel = system.Kernel();
226 Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); 226 Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
227 227
228 Handle event_handle = InvalidHandle; 228 Handle event_handle = InvalidHandle;
229 { 229 {
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
index 3e745c18b..40988b0fd 100644
--- a/src/core/hle/kernel/handle_table.cpp
+++ b/src/core/hle/kernel/handle_table.cpp
@@ -8,9 +8,9 @@
8#include "core/core.h" 8#include "core/core.h"
9#include "core/hle/kernel/errors.h" 9#include "core/hle/kernel/errors.h"
10#include "core/hle/kernel/handle_table.h" 10#include "core/hle/kernel/handle_table.h"
11#include "core/hle/kernel/k_scheduler.h"
11#include "core/hle/kernel/kernel.h" 12#include "core/hle/kernel/kernel.h"
12#include "core/hle/kernel/process.h" 13#include "core/hle/kernel/process.h"
13#include "core/hle/kernel/scheduler.h"
14#include "core/hle/kernel/thread.h" 14#include "core/hle/kernel/thread.h"
15 15
16namespace Kernel { 16namespace Kernel {
@@ -105,7 +105,7 @@ bool HandleTable::IsValid(Handle handle) const {
105 105
106std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const { 106std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
107 if (handle == CurrentThread) { 107 if (handle == CurrentThread) {
108 return SharedFrom(kernel.CurrentScheduler().GetCurrentThread()); 108 return SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
109 } else if (handle == CurrentProcess) { 109 } else if (handle == CurrentProcess) {
110 return SharedFrom(kernel.CurrentProcess()); 110 return SharedFrom(kernel.CurrentProcess());
111 } 111 }
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 81f85643b..7eda89786 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -17,11 +17,11 @@
17#include "core/hle/kernel/errors.h" 17#include "core/hle/kernel/errors.h"
18#include "core/hle/kernel/handle_table.h" 18#include "core/hle/kernel/handle_table.h"
19#include "core/hle/kernel/hle_ipc.h" 19#include "core/hle/kernel/hle_ipc.h"
20#include "core/hle/kernel/k_scheduler.h"
20#include "core/hle/kernel/kernel.h" 21#include "core/hle/kernel/kernel.h"
21#include "core/hle/kernel/object.h" 22#include "core/hle/kernel/object.h"
22#include "core/hle/kernel/process.h" 23#include "core/hle/kernel/process.h"
23#include "core/hle/kernel/readable_event.h" 24#include "core/hle/kernel/readable_event.h"
24#include "core/hle/kernel/scheduler.h"
25#include "core/hle/kernel/server_session.h" 25#include "core/hle/kernel/server_session.h"
26#include "core/hle/kernel/thread.h" 26#include "core/hle/kernel/thread.h"
27#include "core/hle/kernel/time_manager.h" 27#include "core/hle/kernel/time_manager.h"
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
new file mode 100644
index 000000000..7f7da610d
--- /dev/null
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -0,0 +1,873 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7
8#include <algorithm>
9#include <mutex>
10#include <set>
11#include <unordered_set>
12#include <utility>
13
14#include "common/assert.h"
15#include "common/bit_util.h"
16#include "common/fiber.h"
17#include "common/logging/log.h"
18#include "core/arm/arm_interface.h"
19#include "core/core.h"
20#include "core/core_timing.h"
21#include "core/cpu_manager.h"
22#include "core/hle/kernel/kernel.h"
23#include "core/hle/kernel/physical_core.h"
24#include "core/hle/kernel/process.h"
25#include "core/hle/kernel/k_scheduler.h"
26#include "core/hle/kernel/thread.h"
27#include "core/hle/kernel/time_manager.h"
28
29namespace Kernel {
30
31static void IncrementScheduledCount(Kernel::Thread* thread) {
32 if (auto process = thread->GetOwnerProcess(); process) {
33 process->IncrementScheduledCount();
34 }
35}
36
37GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
38 : kernel{kernel}, scheduler_lock{kernel} {}
39
40GlobalSchedulerContext::~GlobalSchedulerContext() = default;
41
42/*static*/ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
43 Core::EmuThreadHandle global_thread) {
44 u32 current_core = global_thread.host_handle;
45 bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
46 (current_core < Core::Hardware::NUM_CPU_CORES);
47
48 while (cores_pending_reschedule != 0) {
49 u32 core = Common::CountTrailingZeroes64(cores_pending_reschedule);
50 ASSERT(core < Core::Hardware::NUM_CPU_CORES);
51 if (!must_context_switch || core != current_core) {
52 auto& phys_core = kernel.PhysicalCore(core);
53 phys_core.Interrupt();
54 } else {
55 must_context_switch = true;
56 }
57 cores_pending_reschedule &= ~(1ULL << core);
58 }
59 if (must_context_switch) {
60 auto core_scheduler = kernel.CurrentScheduler();
61 kernel.ExitSVCProfile();
62 core_scheduler->RescheduleCurrentCore();
63 kernel.EnterSVCProfile();
64 }
65}
66
67u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
68 std::scoped_lock lock{guard};
69 if (Thread* prev_highest_thread = this->state.highest_priority_thread;
70 prev_highest_thread != highest_thread) {
71 if (prev_highest_thread != nullptr) {
72 IncrementScheduledCount(prev_highest_thread);
73 prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks());
74 }
75 if (this->state.should_count_idle) {
76 if (highest_thread != nullptr) {
77 // if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) {
78 // process->SetRunningThread(this->core_id, highest_thread,
79 // this->state.idle_count);
80 //}
81 } else {
82 this->state.idle_count++;
83 }
84 }
85
86 this->state.highest_priority_thread = highest_thread;
87 this->state.needs_scheduling = true;
88 return (1ULL << this->core_id);
89 } else {
90 return 0;
91 }
92}
93
94/*static*/ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
95 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
96
97 /* Clear that we need to update. */
98 ClearSchedulerUpdateNeeded(kernel);
99
100 u64 cores_needing_scheduling = 0, idle_cores = 0;
101 Thread* top_threads[Core::Hardware::NUM_CPU_CORES];
102 auto& priority_queue = GetPriorityQueue(kernel);
103
104 /* We want to go over all cores, finding the highest priority thread and determining if
105 * scheduling is needed for that core. */
106 for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
107 Thread* top_thread = priority_queue.GetScheduledFront((s32)core_id);
108 if (top_thread != nullptr) {
109 ///* If the thread has no waiters, we need to check if the process has a thread pinned.
110 ///*/
111 // if (top_thread->GetNumKernelWaiters() == 0) {
112 // if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) {
113 // if (Thread* pinned = parent->GetPinnedThread(core_id);
114 // pinned != nullptr && pinned != top_thread) {
115 // /* We prefer our parent's pinned thread if possible. However, we also
116 // don't
117 // * want to schedule un-runnable threads. */
118 // if (pinned->GetRawState() == Thread::ThreadState_Runnable) {
119 // top_thread = pinned;
120 // } else {
121 // top_thread = nullptr;
122 // }
123 // }
124 // }
125 //}
126 } else {
127 idle_cores |= (1ULL << core_id);
128 }
129
130 top_threads[core_id] = top_thread;
131 cores_needing_scheduling |=
132 kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
133 }
134
135 /* Idle cores are bad. We're going to try to migrate threads to each idle core in turn. */
136 while (idle_cores != 0) {
137 u32 core_id = Common::CountTrailingZeroes64(idle_cores);
138 if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
139 s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
140 size_t num_candidates = 0;
141
142 /* While we have a suggested thread, try to migrate it! */
143 while (suggested != nullptr) {
144 /* Check if the suggested thread is the top thread on its core. */
145 const s32 suggested_core = suggested->GetActiveCore();
146 if (Thread* top_thread =
147 (suggested_core >= 0) ? top_threads[suggested_core] : nullptr;
148 top_thread != suggested) {
149 /* Make sure we're not dealing with threads too high priority for migration. */
150 if (top_thread != nullptr &&
151 top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) {
152 break;
153 }
154
155 /* The suggested thread isn't bound to its core, so we can migrate it! */
156 suggested->SetActiveCore(core_id);
157 priority_queue.ChangeCore(suggested_core, suggested);
158
159 top_threads[core_id] = suggested;
160 cores_needing_scheduling |=
161 kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
162 break;
163 }
164
165 /* Note this core as a candidate for migration. */
166 ASSERT(num_candidates < Core::Hardware::NUM_CPU_CORES);
167 migration_candidates[num_candidates++] = suggested_core;
168 suggested = priority_queue.GetSuggestedNext(core_id, suggested);
169 }
170
171 /* If suggested is nullptr, we failed to migrate a specific thread. So let's try all our
172 * candidate cores' top threads. */
173 if (suggested == nullptr) {
174 for (size_t i = 0; i < num_candidates; i++) {
175 /* Check if there's some other thread that can run on the candidate core. */
176 const s32 candidate_core = migration_candidates[i];
177 suggested = top_threads[candidate_core];
178 if (Thread* next_on_candidate_core =
179 priority_queue.GetScheduledNext(candidate_core, suggested);
180 next_on_candidate_core != nullptr) {
181 /* The candidate core can run some other thread! We'll migrate its current
182 * top thread to us. */
183 top_threads[candidate_core] = next_on_candidate_core;
184 cores_needing_scheduling |=
185 kernel.Scheduler(candidate_core)
186 .UpdateHighestPriorityThread(top_threads[candidate_core]);
187
188 /* Perform the migration. */
189 suggested->SetActiveCore(core_id);
190 priority_queue.ChangeCore(candidate_core, suggested);
191
192 top_threads[core_id] = suggested;
193 cores_needing_scheduling |=
194 kernel.Scheduler(core_id).UpdateHighestPriorityThread(
195 top_threads[core_id]);
196 break;
197 }
198 }
199 }
200 }
201
202 idle_cores &= ~(1ULL << core_id);
203 }
204
205 return cores_needing_scheduling;
206}
207
208void GlobalSchedulerContext::AddThread(std::shared_ptr<Thread> thread) {
209 std::scoped_lock lock{global_list_guard};
210 thread_list.push_back(std::move(thread));
211}
212
213void GlobalSchedulerContext::RemoveThread(std::shared_ptr<Thread> thread) {
214 std::scoped_lock lock{global_list_guard};
215 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
216 thread_list.end());
217}
218
219void GlobalSchedulerContext::PreemptThreads() {
220 // The priority levels at which the global scheduler preempts threads every 10 ms. They are
221 // ordered from Core 0 to Core 3.
222 std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 63};
223
224 ASSERT(IsLocked());
225 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
226 const u32 priority = preemption_priorities[core_id];
227 kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority);
228 }
229}
230
231bool GlobalSchedulerContext::IsLocked() const {
232 return scheduler_lock.IsLockedByCurrentThread();
233}
234
235/*static*/ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread,
236 u32 old_state) {
237 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
238
239 /* Check if the state has changed, because if it hasn't there's nothing to do. */
240 const auto cur_state = thread->scheduling_state;
241 if (cur_state == old_state) {
242 return;
243 }
244
245 /* Update the priority queues. */
246 if (old_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
247 /* If we were previously runnable, then we're not runnable now, and we should remove. */
248 GetPriorityQueue(kernel).Remove(thread);
249 IncrementScheduledCount(thread);
250 SetSchedulerUpdateNeeded(kernel);
251 } else if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
252 /* If we're now runnable, then we weren't previously, and we should add. */
253 GetPriorityQueue(kernel).PushBack(thread);
254 IncrementScheduledCount(thread);
255 SetSchedulerUpdateNeeded(kernel);
256 }
257}
258
259/*static*/ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread,
260 Thread* current_thread, u32 old_priority) {
261
262 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
263
264 /* If the thread is runnable, we want to change its priority in the queue. */
265 if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
266 GetPriorityQueue(kernel).ChangePriority(
267 old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread);
268 IncrementScheduledCount(thread);
269 SetSchedulerUpdateNeeded(kernel);
270 }
271}
272
273/*static*/ void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
274 const KAffinityMask& old_affinity,
275 s32 old_core) {
276 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
277
278 /* If the thread is runnable, we want to change its affinity in the queue. */
279 if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
280 GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread);
281 IncrementScheduledCount(thread);
282 SetSchedulerUpdateNeeded(kernel);
283 }
284}
285
286void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
287 ASSERT(system.GlobalSchedulerContext().IsLocked());
288
289 /* Get a reference to the priority queue. */
290 auto& kernel = system.Kernel();
291 auto& priority_queue = GetPriorityQueue(kernel);
292
293 /* Rotate the front of the queue to the end. */
294 Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
295 Thread* next_thread = nullptr;
296 if (top_thread != nullptr) {
297 next_thread = priority_queue.MoveToScheduledBack(top_thread);
298 if (next_thread != top_thread) {
299 IncrementScheduledCount(top_thread);
300 IncrementScheduledCount(next_thread);
301 }
302 }
303
304 /* While we have a suggested thread, try to migrate it! */
305 {
306 Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
307 while (suggested != nullptr) {
308 /* Check if the suggested thread is the top thread on its core. */
309 const s32 suggested_core = suggested->GetActiveCore();
310 if (Thread* top_on_suggested_core =
311 (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
312 : nullptr;
313 top_on_suggested_core != suggested) {
314 /* If the next thread is a new thread that has been waiting longer than our
315 * suggestion, we prefer it to our suggestion. */
316 if (top_thread != next_thread && next_thread != nullptr &&
317 next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) {
318 suggested = nullptr;
319 break;
320 }
321
322 /* If we're allowed to do a migration, do one. */
323 /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion
324 * to the front of the queue. */
325 if (top_on_suggested_core == nullptr ||
326 top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
327 suggested->SetActiveCore(core_id);
328 priority_queue.ChangeCore(suggested_core, suggested, true);
329 IncrementScheduledCount(suggested);
330 break;
331 }
332 }
333
334 /* Get the next suggestion. */
335 suggested = priority_queue.GetSamePriorityNext(core_id, suggested);
336 }
337 }
338
339 /* Now that we might have migrated a thread with the same priority, check if we can do better.
340 */
341 {
342 Thread* best_thread = priority_queue.GetScheduledFront(core_id);
343 if (best_thread == GetCurrentThread()) {
344 best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
345 }
346
347 /* If the best thread we can choose has a priority the same or worse than ours, try to
348 * migrate a higher priority thread. */
349 if (best_thread != nullptr && best_thread->GetPriority() >= static_cast<u32>(priority)) {
350 Thread* suggested = priority_queue.GetSuggestedFront(core_id);
351 while (suggested != nullptr) {
352 /* If the suggestion's priority is the same as ours, don't bother. */
353 if (suggested->GetPriority() >= best_thread->GetPriority()) {
354 break;
355 }
356
357 /* Check if the suggested thread is the top thread on its core. */
358 const s32 suggested_core = suggested->GetActiveCore();
359 if (Thread* top_on_suggested_core =
360 (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
361 : nullptr;
362 top_on_suggested_core != suggested) {
363 /* If we're allowed to do a migration, do one. */
364 /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
365 * suggestion to the front of the queue. */
366 if (top_on_suggested_core == nullptr ||
367 top_on_suggested_core->GetPriority() >=
368 HighestCoreMigrationAllowedPriority) {
369 suggested->SetActiveCore(core_id);
370 priority_queue.ChangeCore(suggested_core, suggested, true);
371 IncrementScheduledCount(suggested);
372 break;
373 }
374 }
375
376 /* Get the next suggestion. */
377 suggested = priority_queue.GetSuggestedNext(core_id, suggested);
378 }
379 }
380 }
381
382 /* After a rotation, we need a scheduler update. */
383 SetSchedulerUpdateNeeded(kernel);
384}
385
386/*static*/ bool KScheduler::CanSchedule(KernelCore& kernel) {
387 return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1;
388}
389
390/*static*/ bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
391 return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire);
392}
393
394/*static*/ void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) {
395 kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release);
396}
397
398/*static*/ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
399 kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release);
400}
401
402/*static*/ void KScheduler::DisableScheduling(KernelCore& kernel) {
403 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
404 ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0);
405 scheduler->GetCurrentThread()->DisableDispatch();
406 }
407}
408
409/*static*/ void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
410 Core::EmuThreadHandle global_thread) {
411 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
412 scheduler->GetCurrentThread()->EnableDispatch();
413 }
414 RescheduleCores(kernel, cores_needing_scheduling, global_thread);
415}
416
417/*static*/ u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
418 if (IsSchedulerUpdateNeeded(kernel)) {
419 return UpdateHighestPriorityThreadsImpl(kernel);
420 } else {
421 return 0;
422 }
423}
424
425/*static*/ KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) {
426 return kernel.GlobalSchedulerContext().priority_queue;
427}
428
429void KScheduler::YieldWithoutCoreMigration() {
430 auto& kernel = system.Kernel();
431
432 /* Validate preconditions. */
433 ASSERT(CanSchedule(kernel));
434 ASSERT(kernel.CurrentProcess() != nullptr);
435
436 /* Get the current thread and process. */
437 Thread& cur_thread = *GetCurrentThread();
438 Process& cur_process = *kernel.CurrentProcess();
439
440 /* If the thread's yield count matches, there's nothing for us to do. */
441 if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
442 return;
443 }
444
445 /* Get a reference to the priority queue. */
446 auto& priority_queue = GetPriorityQueue(kernel);
447
448 /* Perform the yield. */
449 {
450 SchedulerLock lock(kernel);
451
452 const auto cur_state = cur_thread.scheduling_state;
453 if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
454 /* Put the current thread at the back of the queue. */
455 Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
456 IncrementScheduledCount(std::addressof(cur_thread));
457
458 /* If the next thread is different, we have an update to perform. */
459 if (next_thread != std::addressof(cur_thread)) {
460 SetSchedulerUpdateNeeded(kernel);
461 } else {
462 /* Otherwise, set the thread's yield count so that we won't waste work until the
463 * process is scheduled again. */
464 cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
465 }
466 }
467 }
468}
469
470void KScheduler::YieldWithCoreMigration() {
471 auto& kernel = system.Kernel();
472
473 /* Validate preconditions. */
474 ASSERT(CanSchedule(kernel));
475 ASSERT(kernel.CurrentProcess() != nullptr);
476
477 /* Get the current thread and process. */
478 Thread& cur_thread = *GetCurrentThread();
479 Process& cur_process = *kernel.CurrentProcess();
480
481 /* If the thread's yield count matches, there's nothing for us to do. */
482 if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
483 return;
484 }
485
486 /* Get a reference to the priority queue. */
487 auto& priority_queue = GetPriorityQueue(kernel);
488
489 /* Perform the yield. */
490 {
491 SchedulerLock lock(kernel);
492
493 const auto cur_state = cur_thread.scheduling_state;
494 if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
495 /* Get the current active core. */
496 const s32 core_id = cur_thread.GetActiveCore();
497
498 /* Put the current thread at the back of the queue. */
499 Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
500 IncrementScheduledCount(std::addressof(cur_thread));
501
502 /* While we have a suggested thread, try to migrate it! */
503 bool recheck = false;
504 Thread* suggested = priority_queue.GetSuggestedFront(core_id);
505 while (suggested != nullptr) {
506 /* Check if the suggested thread is the thread running on its core. */
507 const s32 suggested_core = suggested->GetActiveCore();
508
509 if (Thread* running_on_suggested_core =
510 (suggested_core >= 0)
511 ? kernel.Scheduler(suggested_core).state.highest_priority_thread
512 : nullptr;
513 running_on_suggested_core != suggested) {
514 /* If the current thread's priority is higher than our suggestion's we prefer
515 * the next thread to the suggestion. */
516 /* We also prefer the next thread when the current thread's priority is equal to
517 * the suggestions, but the next thread has been waiting longer. */
518 if ((suggested->GetPriority() > cur_thread.GetPriority()) ||
519 (suggested->GetPriority() == cur_thread.GetPriority() &&
520 next_thread != std::addressof(cur_thread) &&
521 next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick())) {
522 suggested = nullptr;
523 break;
524 }
525
526 /* If we're allowed to do a migration, do one. */
527 /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
528 * suggestion to the front of the queue. */
529 if (running_on_suggested_core == nullptr ||
530 running_on_suggested_core->GetPriority() >=
531 HighestCoreMigrationAllowedPriority) {
532 suggested->SetActiveCore(core_id);
533 priority_queue.ChangeCore(suggested_core, suggested, true);
534 IncrementScheduledCount(suggested);
535 break;
536 } else {
537 /* We couldn't perform a migration, but we should check again on a future
538 * yield. */
539 recheck = true;
540 }
541 }
542
543 /* Get the next suggestion. */
544 suggested = priority_queue.GetSuggestedNext(core_id, suggested);
545 }
546
547 /* If we still have a suggestion or the next thread is different, we have an update to
548 * perform. */
549 if (suggested != nullptr || next_thread != std::addressof(cur_thread)) {
550 SetSchedulerUpdateNeeded(kernel);
551 } else if (!recheck) {
552 /* Otherwise if we don't need to re-check, set the thread's yield count so that we
553 * won't waste work until the process is scheduled again. */
554 cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
555 }
556 }
557 }
558}
559
560void KScheduler::YieldToAnyThread() {
561 auto& kernel = system.Kernel();
562
563 /* Validate preconditions. */
564 ASSERT(CanSchedule(kernel));
565 ASSERT(kernel.CurrentProcess() != nullptr);
566
567 /* Get the current thread and process. */
568 Thread& cur_thread = *GetCurrentThread();
569 Process& cur_process = *kernel.CurrentProcess();
570
571 /* If the thread's yield count matches, there's nothing for us to do. */
572 if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
573 return;
574 }
575
576 /* Get a reference to the priority queue. */
577 auto& priority_queue = GetPriorityQueue(kernel);
578
579 /* Perform the yield. */
580 {
581 SchedulerLock lock(kernel);
582
583 const auto cur_state = cur_thread.scheduling_state;
584 if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
585 /* Get the current active core. */
586 const s32 core_id = cur_thread.GetActiveCore();
587
588 /* Migrate the current thread to core -1. */
589 cur_thread.SetActiveCore(-1);
590 priority_queue.ChangeCore(core_id, std::addressof(cur_thread));
591 IncrementScheduledCount(std::addressof(cur_thread));
592
593 /* If there's nothing scheduled, we can try to perform a migration. */
594 if (priority_queue.GetScheduledFront(core_id) == nullptr) {
595 /* While we have a suggested thread, try to migrate it! */
596 Thread* suggested = priority_queue.GetSuggestedFront(core_id);
597 while (suggested != nullptr) {
598 /* Check if the suggested thread is the top thread on its core. */
599 const s32 suggested_core = suggested->GetActiveCore();
600 if (Thread* top_on_suggested_core =
601 (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
602 : nullptr;
603 top_on_suggested_core != suggested) {
604 /* If we're allowed to do a migration, do one. */
605 if (top_on_suggested_core == nullptr ||
606 top_on_suggested_core->GetPriority() >=
607 HighestCoreMigrationAllowedPriority) {
608 suggested->SetActiveCore(core_id);
609 priority_queue.ChangeCore(suggested_core, suggested);
610 IncrementScheduledCount(suggested);
611 }
612
613 /* Regardless of whether we migrated, we had a candidate, so we're done. */
614 break;
615 }
616
617 /* Get the next suggestion. */
618 suggested = priority_queue.GetSuggestedNext(core_id, suggested);
619 }
620
621 /* If the suggestion is different from the current thread, we need to perform an
622 * update. */
623 if (suggested != std::addressof(cur_thread)) {
624 SetSchedulerUpdateNeeded(kernel);
625 } else {
626 /* Otherwise, set the thread's yield count so that we won't waste work until the
627 * process is scheduled again. */
628 cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
629 }
630 } else {
631 /* Otherwise, we have an update to perform. */
632 SetSchedulerUpdateNeeded(kernel);
633 }
634 }
635 }
636}
637
638void GlobalSchedulerContext::Lock() {
639 scheduler_lock.Lock();
640}
641
642void GlobalSchedulerContext::Unlock() {
643 scheduler_lock.Unlock();
644}
645
646KScheduler::KScheduler(Core::System& system, std::size_t core_id)
647 : system(system), core_id(core_id) {
648 switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this);
649 this->state.needs_scheduling = true;
650 this->state.interrupt_task_thread_runnable = false;
651 this->state.should_count_idle = false;
652 this->state.idle_count = 0;
653 this->state.idle_thread_stack = nullptr;
654 this->state.highest_priority_thread = nullptr;
655}
656
657KScheduler::~KScheduler() = default;
658
659Thread* KScheduler::GetCurrentThread() const {
660 if (current_thread) {
661 return current_thread;
662 }
663 return idle_thread;
664}
665
666u64 KScheduler::GetLastContextSwitchTicks() const {
667 return last_context_switch_time;
668}
669
670void KScheduler::RescheduleCurrentCore() {
671 ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
672
673 auto& phys_core = system.Kernel().PhysicalCore(core_id);
674 if (phys_core.IsInterrupted()) {
675 phys_core.ClearInterrupt();
676 }
677 guard.lock();
678 if (this->state.needs_scheduling) {
679 Schedule();
680 } else {
681 guard.unlock();
682 }
683}
684
685void KScheduler::OnThreadStart() {
686 SwitchContextStep2();
687}
688
689void KScheduler::Unload(Thread* thread) {
690 if (thread) {
691 thread->SetIsRunning(false);
692 if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) {
693 system.ArmInterface(core_id).ExceptionalExit();
694 thread->SetContinuousOnSVC(false);
695 }
696 if (!thread->IsHLEThread() && !thread->HasExited()) {
697 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
698 cpu_core.SaveContext(thread->GetContext32());
699 cpu_core.SaveContext(thread->GetContext64());
700 // Save the TPIDR_EL0 system register in case it was modified.
701 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
702 cpu_core.ClearExclusiveState();
703 }
704 thread->context_guard.unlock();
705 }
706}
707
708void KScheduler::Reload(Thread* thread) {
709 if (thread) {
710 ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
711 "Thread must be runnable.");
712
713 // Cancel any outstanding wakeup events for this thread
714 thread->SetIsRunning(true);
715 thread->SetWasRunning(false);
716
717 auto* const thread_owner_process = thread->GetOwnerProcess();
718 if (thread_owner_process != nullptr) {
719 system.Kernel().MakeCurrentProcess(thread_owner_process);
720 }
721 if (!thread->IsHLEThread()) {
722 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
723 cpu_core.LoadContext(thread->GetContext32());
724 cpu_core.LoadContext(thread->GetContext64());
725 cpu_core.SetTlsAddress(thread->GetTLSAddress());
726 cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
727 cpu_core.ClearExclusiveState();
728 }
729 }
730}
731
732void KScheduler::SwitchContextStep2() {
733 // Load context of new thread
734 Reload(current_thread);
735
736 RescheduleCurrentCore();
737}
738
739void KScheduler::ScheduleImpl() {
740 Thread* previous_thread = current_thread;
741 current_thread = state.highest_priority_thread;
742
743 this->state.needs_scheduling = false;
744
745 if (current_thread == previous_thread) {
746 guard.unlock();
747 return;
748 }
749
750 Process* const previous_process = system.Kernel().CurrentProcess();
751
752 UpdateLastContextSwitchTime(previous_thread, previous_process);
753
754 // Save context for previous thread
755 Unload(previous_thread);
756
757 std::shared_ptr<Common::Fiber>* old_context;
758 if (previous_thread != nullptr) {
759 old_context = &previous_thread->GetHostContext();
760 } else {
761 old_context = &idle_thread->GetHostContext();
762 }
763 guard.unlock();
764
765 Common::Fiber::YieldTo(*old_context, switch_fiber);
766 /// When a thread wakes up, the scheduler may have changed to other in another core.
767 auto& next_scheduler = *system.Kernel().CurrentScheduler();
768 next_scheduler.SwitchContextStep2();
769}
770
771void KScheduler::OnSwitch(void* this_scheduler) {
772 KScheduler* sched = static_cast<KScheduler*>(this_scheduler);
773 sched->SwitchToCurrent();
774}
775
776void KScheduler::SwitchToCurrent() {
777 while (true) {
778 {
779 std::scoped_lock lock{guard};
780 current_thread = state.highest_priority_thread;
781 this->state.needs_scheduling = false;
782 }
783 const auto is_switch_pending = [this] {
784 std::scoped_lock lock{guard};
785 return !!this->state.needs_scheduling;
786 };
787 do {
788 if (current_thread != nullptr && !current_thread->IsHLEThread()) {
789 current_thread->context_guard.lock();
790 if (!current_thread->IsRunnable()) {
791 current_thread->context_guard.unlock();
792 break;
793 }
794 if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) {
795 current_thread->context_guard.unlock();
796 break;
797 }
798 }
799 std::shared_ptr<Common::Fiber>* next_context;
800 if (current_thread != nullptr) {
801 next_context = &current_thread->GetHostContext();
802 } else {
803 next_context = &idle_thread->GetHostContext();
804 }
805 Common::Fiber::YieldTo(switch_fiber, *next_context);
806 } while (!is_switch_pending());
807 }
808}
809
810void KScheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
811 const u64 prev_switch_ticks = last_context_switch_time;
812 const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
813 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
814
815 if (thread != nullptr) {
816 thread->UpdateCPUTimeTicks(update_ticks);
817 }
818
819 if (process != nullptr) {
820 process->UpdateCPUTimeTicks(update_ticks);
821 }
822
823 last_context_switch_time = most_recent_switch_ticks;
824}
825
826void KScheduler::Initialize() {
827 std::string name = "Idle Thread Id:" + std::to_string(core_id);
828 std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
829 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
830 ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
831 auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
832 nullptr, std::move(init_func), init_func_parameter);
833 idle_thread = thread_res.Unwrap().get();
834
835 {
836 KScopedSchedulerLock lock{system.Kernel()};
837 idle_thread->SetStatus(ThreadStatus::Ready);
838 }
839}
840
841SchedulerLock::SchedulerLock(KernelCore& kernel) : kernel{kernel} {
842 kernel.GlobalSchedulerContext().Lock();
843}
844
845SchedulerLock::~SchedulerLock() {
846 kernel.GlobalSchedulerContext().Unlock();
847}
848
849SchedulerLockAndSleep::SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle,
850 Thread* time_task, s64 nanoseconds)
851 : SchedulerLock{kernel}, event_handle{event_handle}, time_task{time_task}, nanoseconds{
852 nanoseconds} {
853 event_handle = InvalidHandle;
854}
855
856SchedulerLockAndSleep::~SchedulerLockAndSleep() {
857 if (sleep_cancelled) {
858 return;
859 }
860 auto& time_manager = kernel.TimeManager();
861 time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
862}
863
864void SchedulerLockAndSleep::Release() {
865 if (sleep_cancelled) {
866 return;
867 }
868 auto& time_manager = kernel.TimeManager();
869 time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
870 sleep_cancelled = true;
871}
872
873} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 68db4a5ef..535ee34b9 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -1,7 +1,10 @@
1// Copyright 2018 yuzu emulator team 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7
5#pragma once 8#pragma once
6 9
7#include <atomic> 10#include <atomic>
@@ -11,8 +14,12 @@
11 14
12#include "common/common_types.h" 15#include "common/common_types.h"
13#include "common/multi_level_queue.h" 16#include "common/multi_level_queue.h"
17#include "common/scope_exit.h"
14#include "common/spin_lock.h" 18#include "common/spin_lock.h"
19#include "core/core_timing.h"
15#include "core/hardware_properties.h" 20#include "core/hardware_properties.h"
21#include "core/hle/kernel/k_priority_queue.h"
22#include "core/hle/kernel/k_scheduler_lock.h"
16#include "core/hle/kernel/thread.h" 23#include "core/hle/kernel/thread.h"
17 24
18namespace Common { 25namespace Common {
@@ -30,10 +37,16 @@ class KernelCore;
30class Process; 37class Process;
31class SchedulerLock; 38class SchedulerLock;
32 39
33class GlobalScheduler final { 40using KSchedulerPriorityQueue =
41 KPriorityQueue<Thread, Core::Hardware::NUM_CPU_CORES, THREADPRIO_LOWEST, THREADPRIO_HIGHEST>;
42static constexpr s32 HighestCoreMigrationAllowedPriority = 2;
43
44class GlobalSchedulerContext final {
45 friend class KScheduler;
46
34public: 47public:
35 explicit GlobalScheduler(KernelCore& kernel); 48 explicit GlobalSchedulerContext(KernelCore& kernel);
36 ~GlobalScheduler(); 49 ~GlobalSchedulerContext();
37 50
38 /// Adds a new thread to the scheduler 51 /// Adds a new thread to the scheduler
39 void AddThread(std::shared_ptr<Thread> thread); 52 void AddThread(std::shared_ptr<Thread> thread);
@@ -46,60 +59,6 @@ public:
46 return thread_list; 59 return thread_list;
47 } 60 }
48 61
49 /// Notify the scheduler a thread's status has changed.
50 void AdjustSchedulingOnStatus(Thread* thread, u32 old_flags);
51
52 /// Notify the scheduler a thread's priority has changed.
53 void AdjustSchedulingOnPriority(Thread* thread, u32 old_priority);
54
55 /// Notify the scheduler a thread's core and/or affinity mask has changed.
56 void AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask, s32 old_core);
57
58 /**
59 * Takes care of selecting the new scheduled threads in three steps:
60 *
61 * 1. First a thread is selected from the top of the priority queue. If no thread
62 * is obtained then we move to step two, else we are done.
63 *
64 * 2. Second we try to get a suggested thread that's not assigned to any core or
65 * that is not the top thread in that core.
66 *
67 * 3. Third is no suggested thread is found, we do a second pass and pick a running
68 * thread in another core and swap it with its current thread.
69 *
70 * returns the cores needing scheduling.
71 */
72 u32 SelectThreads();
73
74 bool HaveReadyThreads(std::size_t core_id) const {
75 return !scheduled_queue[core_id].empty();
76 }
77
78 /**
79 * Takes a thread and moves it to the back of the it's priority list.
80 *
81 * @note This operation can be redundant and no scheduling is changed if marked as so.
82 */
83 bool YieldThread(Thread* thread);
84
85 /**
86 * Takes a thread and moves it to the back of the it's priority list.
87 * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
88 * a better priority than the next thread in the core.
89 *
90 * @note This operation can be redundant and no scheduling is changed if marked as so.
91 */
92 bool YieldThreadAndBalanceLoad(Thread* thread);
93
94 /**
95 * Takes a thread and moves it out of the scheduling queue.
96 * and into the suggested queue. If no thread can be scheduled afterwards in that core,
97 * a suggested thread is obtained instead.
98 *
99 * @note This operation can be redundant and no scheduling is changed if marked as so.
100 */
101 bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
102
103 /** 62 /**
104 * Rotates the scheduling queues of threads at a preemption priority and then does 63 * Rotates the scheduling queues of threads at a preemption priority and then does
105 * some core rebalancing. Preemption priorities can be found in the array 64 * some core rebalancing. Preemption priorities can be found in the array
@@ -113,15 +72,7 @@ public:
113 return Core::Hardware::NUM_CPU_CORES; 72 return Core::Hardware::NUM_CPU_CORES;
114 } 73 }
115 74
116 void SetReselectionPending() { 75 bool IsLocked() const;
117 is_reselection_pending.store(true, std::memory_order_release);
118 }
119
120 bool IsReselectionPending() const {
121 return is_reselection_pending.load(std::memory_order_acquire);
122 }
123
124 void Shutdown();
125 76
126private: 77private:
127 friend class SchedulerLock; 78 friend class SchedulerLock;
@@ -133,109 +84,50 @@ private:
133 /// and reschedules current core if needed. 84 /// and reschedules current core if needed.
134 void Unlock(); 85 void Unlock();
135 86
136 void EnableInterruptAndSchedule(u32 cores_pending_reschedule, 87 using LockType = KAbstractSchedulerLock<KScheduler>;
137 Core::EmuThreadHandle global_thread);
138
139 /**
140 * Add a thread to the suggested queue of a cpu core. Suggested threads may be
141 * picked if no thread is scheduled to run on the core.
142 */
143 void Suggest(u32 priority, std::size_t core, Thread* thread);
144
145 /**
146 * Remove a thread to the suggested queue of a cpu core. Suggested threads may be
147 * picked if no thread is scheduled to run on the core.
148 */
149 void Unsuggest(u32 priority, std::size_t core, Thread* thread);
150
151 /**
152 * Add a thread to the scheduling queue of a cpu core. The thread is added at the
153 * back the queue in its priority level.
154 */
155 void Schedule(u32 priority, std::size_t core, Thread* thread);
156
157 /**
158 * Add a thread to the scheduling queue of a cpu core. The thread is added at the
159 * front the queue in its priority level.
160 */
161 void SchedulePrepend(u32 priority, std::size_t core, Thread* thread);
162
163 /// Reschedule an already scheduled thread based on a new priority
164 void Reschedule(u32 priority, std::size_t core, Thread* thread);
165
166 /// Unschedules a thread.
167 void Unschedule(u32 priority, std::size_t core, Thread* thread);
168
169 /**
170 * Transfers a thread into an specific core. If the destination_core is -1
171 * it will be unscheduled from its source code and added into its suggested
172 * queue.
173 */
174 void TransferToCore(u32 priority, s32 destination_core, Thread* thread);
175
176 bool AskForReselectionOrMarkRedundant(Thread* current_thread, const Thread* winner);
177
178 static constexpr u32 min_regular_priority = 2;
179 std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES>
180 scheduled_queue;
181 std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES>
182 suggested_queue;
183 std::atomic<bool> is_reselection_pending{false};
184
185 // The priority levels at which the global scheduler preempts threads every 10 ms. They are
186 // ordered from Core 0 to Core 3.
187 std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
188 88
189 /// Scheduler lock mechanisms. 89 KernelCore& kernel;
190 bool is_locked{};
191 std::mutex inner_lock;
192 std::atomic<s64> scope_lock{};
193 Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()};
194 90
195 Common::SpinLock global_list_guard{}; 91 std::atomic_bool scheduler_update_needed{};
92 KSchedulerPriorityQueue priority_queue;
93 LockType scheduler_lock;
196 94
197 /// Lists all thread ids that aren't deleted/etc. 95 /// Lists all thread ids that aren't deleted/etc.
198 std::vector<std::shared_ptr<Thread>> thread_list; 96 std::vector<std::shared_ptr<Thread>> thread_list;
199 KernelCore& kernel; 97 Common::SpinLock global_list_guard{};
200}; 98};
201 99
202class Scheduler final { 100class KScheduler final {
203public: 101public:
204 explicit Scheduler(Core::System& system, std::size_t core_id); 102 explicit KScheduler(Core::System& system, std::size_t core_id);
205 ~Scheduler(); 103 ~KScheduler();
206
207 /// Returns whether there are any threads that are ready to run.
208 bool HaveReadyThreads() const;
209 104
210 /// Reschedules to the next available thread (call after current thread is suspended) 105 /// Reschedules to the next available thread (call after current thread is suspended)
211 void TryDoContextSwitch(); 106 void RescheduleCurrentCore();
107
108 /// Reschedules cores pending reschedule, to be called on EnableScheduling.
109 static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
110 Core::EmuThreadHandle global_thread);
212 111
213 /// The next two are for SingleCore Only. 112 /// The next two are for SingleCore Only.
214 /// Unload current thread before preempting core. 113 /// Unload current thread before preempting core.
215 void Unload(Thread* thread); 114 void Unload(Thread* thread);
216 void Unload(); 115
217 /// Reload current thread after core preemption. 116 /// Reload current thread after core preemption.
218 void Reload(Thread* thread); 117 void Reload(Thread* thread);
219 void Reload();
220 118
221 /// Gets the current running thread 119 /// Gets the current running thread
222 Thread* GetCurrentThread() const; 120 Thread* GetCurrentThread() const;
223 121
224 /// Gets the currently selected thread from the top of the multilevel queue
225 Thread* GetSelectedThread() const;
226
227 /// Gets the timestamp for the last context switch in ticks. 122 /// Gets the timestamp for the last context switch in ticks.
228 u64 GetLastContextSwitchTicks() const; 123 u64 GetLastContextSwitchTicks() const;
229 124
230 bool ContextSwitchPending() const { 125 bool ContextSwitchPending() const {
231 return is_context_switch_pending; 126 return this->state.needs_scheduling;
232 } 127 }
233 128
234 void Initialize(); 129 void Initialize();
235 130
236 /// Shutdowns the scheduler.
237 void Shutdown();
238
239 void OnThreadStart(); 131 void OnThreadStart();
240 132
241 std::shared_ptr<Common::Fiber>& ControlContext() { 133 std::shared_ptr<Common::Fiber>& ControlContext() {
@@ -246,11 +138,90 @@ public:
246 return switch_fiber; 138 return switch_fiber;
247 } 139 }
248 140
141 std::size_t CurrentCoreId() const {
142 return core_id;
143 }
144
145 u64 UpdateHighestPriorityThread(Thread* highest_thread);
146
147 /**
148 * Takes a thread and moves it to the back of the it's priority list.
149 *
150 * @note This operation can be redundant and no scheduling is changed if marked as so.
151 */
152 void YieldWithoutCoreMigration();
153
154 /**
155 * Takes a thread and moves it to the back of the it's priority list.
156 * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
157 * a better priority than the next thread in the core.
158 *
159 * @note This operation can be redundant and no scheduling is changed if marked as so.
160 */
161 void YieldWithCoreMigration();
162
163 /**
164 * Takes a thread and moves it out of the scheduling queue.
165 * and into the suggested queue. If no thread can be scheduled afterwards in that core,
166 * a suggested thread is obtained instead.
167 *
168 * @note This operation can be redundant and no scheduling is changed if marked as so.
169 */
170 void YieldToAnyThread();
171
172 /// Notify the scheduler a thread's status has changed.
173 static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state);
174
175 /// Notify the scheduler a thread's priority has changed.
176 static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
177 u32 old_priority);
178
179 /// Notify the scheduler a thread's core and/or affinity mask has changed.
180 static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
181 const KAffinityMask& old_affinity, s32 old_core);
182
183private:
184 /**
185 * Takes care of selecting the new scheduled threads in three steps:
186 *
187 * 1. First a thread is selected from the top of the priority queue. If no thread
188 * is obtained then we move to step two, else we are done.
189 *
190 * 2. Second we try to get a suggested thread that's not assigned to any core or
191 * that is not the top thread in that core.
192 *
193 * 3. Third is no suggested thread is found, we do a second pass and pick a running
194 * thread in another core and swap it with its current thread.
195 *
196 * returns the cores needing scheduling.
197 */
198 static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
199
200 void RotateScheduledQueue(s32 core_id, s32 priority);
201
202public:
203 static bool CanSchedule(KernelCore& kernel);
204 static bool IsSchedulerUpdateNeeded(const KernelCore& kernel);
205 static void SetSchedulerUpdateNeeded(KernelCore& kernel);
206 static void ClearSchedulerUpdateNeeded(KernelCore& kernel);
207 static void DisableScheduling(KernelCore& kernel);
208 static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
209 Core::EmuThreadHandle global_thread);
210 static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
211
249private: 212private:
250 friend class GlobalScheduler; 213 friend class GlobalSchedulerContext;
214
215 static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel);
216
217 void Schedule() {
218 ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
219 this->ScheduleImpl();
220 }
251 221
252 /// Switches the CPU's active thread context to that of the specified thread 222 /// Switches the CPU's active thread context to that of the specified thread
253 void SwitchContext(); 223 void ScheduleImpl();
224 void SwitchThread(Thread* next_thread);
254 225
255 /// When a thread wakes up, it must run this through it's new scheduler 226 /// When a thread wakes up, it must run this through it's new scheduler
256 void SwitchContextStep2(); 227 void SwitchContextStep2();
@@ -271,22 +242,28 @@ private:
271 static void OnSwitch(void* this_scheduler); 242 static void OnSwitch(void* this_scheduler);
272 void SwitchToCurrent(); 243 void SwitchToCurrent();
273 244
274 std::shared_ptr<Thread> current_thread = nullptr; 245private:
275 std::shared_ptr<Thread> selected_thread = nullptr; 246 Thread* current_thread{};
276 std::shared_ptr<Thread> current_thread_prev = nullptr; 247 Thread* idle_thread{};
277 std::shared_ptr<Thread> selected_thread_set = nullptr; 248
278 std::shared_ptr<Thread> idle_thread = nullptr; 249 std::shared_ptr<Common::Fiber> switch_fiber{};
279 250
280 std::shared_ptr<Common::Fiber> switch_fiber = nullptr; 251 struct SchedulingState {
252 std::atomic<bool> needs_scheduling;
253 bool interrupt_task_thread_runnable{};
254 bool should_count_idle{};
255 u64 idle_count{};
256 Thread* highest_priority_thread{};
257 void* idle_thread_stack{};
258 };
259
260 SchedulingState state;
281 261
282 Core::System& system; 262 Core::System& system;
283 u64 last_context_switch_time = 0; 263 u64 last_context_switch_time{};
284 u64 idle_selection_count = 0;
285 const std::size_t core_id; 264 const std::size_t core_id;
286 265
287 Common::SpinLock guard{}; 266 Common::SpinLock guard{};
288
289 bool is_context_switch_pending = false;
290}; 267};
291 268
292class SchedulerLock { 269class SchedulerLock {
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 929db696d..b74e34c40 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -27,6 +27,7 @@
27#include "core/hle/kernel/client_port.h" 27#include "core/hle/kernel/client_port.h"
28#include "core/hle/kernel/errors.h" 28#include "core/hle/kernel/errors.h"
29#include "core/hle/kernel/handle_table.h" 29#include "core/hle/kernel/handle_table.h"
30#include "core/hle/kernel/k_scheduler.h"
30#include "core/hle/kernel/kernel.h" 31#include "core/hle/kernel/kernel.h"
31#include "core/hle/kernel/memory/memory_layout.h" 32#include "core/hle/kernel/memory/memory_layout.h"
32#include "core/hle/kernel/memory/memory_manager.h" 33#include "core/hle/kernel/memory/memory_manager.h"
@@ -34,7 +35,6 @@
34#include "core/hle/kernel/physical_core.h" 35#include "core/hle/kernel/physical_core.h"
35#include "core/hle/kernel/process.h" 36#include "core/hle/kernel/process.h"
36#include "core/hle/kernel/resource_limit.h" 37#include "core/hle/kernel/resource_limit.h"
37#include "core/hle/kernel/scheduler.h"
38#include "core/hle/kernel/shared_memory.h" 38#include "core/hle/kernel/shared_memory.h"
39#include "core/hle/kernel/synchronization.h" 39#include "core/hle/kernel/synchronization.h"
40#include "core/hle/kernel/thread.h" 40#include "core/hle/kernel/thread.h"
@@ -49,17 +49,18 @@ namespace Kernel {
49 49
50struct KernelCore::Impl { 50struct KernelCore::Impl {
51 explicit Impl(Core::System& system, KernelCore& kernel) 51 explicit Impl(Core::System& system, KernelCore& kernel)
52 : global_scheduler{kernel}, synchronization{system}, time_manager{system}, 52 : synchronization{system}, time_manager{system}, global_handle_table{kernel}, system{
53 global_handle_table{kernel}, system{system} {} 53 system} {}
54 54
55 void SetMulticore(bool is_multicore) { 55 void SetMulticore(bool is_multicore) {
56 this->is_multicore = is_multicore; 56 this->is_multicore = is_multicore;
57 } 57 }
58 58
59 void Initialize(KernelCore& kernel) { 59 void Initialize(KernelCore& kernel) {
60 Shutdown();
61 RegisterHostThread(); 60 RegisterHostThread();
62 61
62 global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
63
63 InitializePhysicalCores(); 64 InitializePhysicalCores();
64 InitializeSystemResourceLimit(kernel); 65 InitializeSystemResourceLimit(kernel);
65 InitializeMemoryLayout(); 66 InitializeMemoryLayout();
@@ -86,29 +87,20 @@ struct KernelCore::Impl {
86 } 87 }
87 } 88 }
88 89
89 for (std::size_t i = 0; i < cores.size(); i++) {
90 cores[i].Shutdown();
91 schedulers[i].reset();
92 }
93 cores.clear(); 90 cores.clear();
94 91
95 process_list.clear(); 92 process_list.clear();
93
96 current_process = nullptr; 94 current_process = nullptr;
97 95
98 system_resource_limit = nullptr; 96 system_resource_limit = nullptr;
99 97
100 global_handle_table.Clear(); 98 global_handle_table.Clear();
101 preemption_event = nullptr;
102 99
103 global_scheduler.Shutdown(); 100 preemption_event = nullptr;
104 101
105 named_ports.clear(); 102 named_ports.clear();
106 103
107 for (auto& core : cores) {
108 core.Shutdown();
109 }
110 cores.clear();
111
112 exclusive_monitor.reset(); 104 exclusive_monitor.reset();
113 105
114 num_host_threads = 0; 106 num_host_threads = 0;
@@ -121,7 +113,7 @@ struct KernelCore::Impl {
121 exclusive_monitor = 113 exclusive_monitor =
122 Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); 114 Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
123 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 115 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
124 schedulers[i] = std::make_unique<Kernel::Scheduler>(system, i); 116 schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i);
125 cores.emplace_back(i, system, *schedulers[i], interrupts); 117 cores.emplace_back(i, system, *schedulers[i], interrupts);
126 } 118 }
127 } 119 }
@@ -155,7 +147,7 @@ struct KernelCore::Impl {
155 "PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) { 147 "PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) {
156 { 148 {
157 SchedulerLock lock(kernel); 149 SchedulerLock lock(kernel);
158 global_scheduler.PreemptThreads(); 150 global_scheduler_context->PreemptThreads();
159 } 151 }
160 const auto time_interval = std::chrono::nanoseconds{ 152 const auto time_interval = std::chrono::nanoseconds{
161 Core::Timing::msToCycles(std::chrono::milliseconds(10))}; 153 Core::Timing::msToCycles(std::chrono::milliseconds(10))};
@@ -245,7 +237,7 @@ struct KernelCore::Impl {
245 if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) { 237 if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) {
246 return result; 238 return result;
247 } 239 }
248 const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler(); 240 const Kernel::KScheduler& sched = cores[result.host_handle].Scheduler();
249 const Kernel::Thread* current = sched.GetCurrentThread(); 241 const Kernel::Thread* current = sched.GetCurrentThread();
250 if (current != nullptr && !current->IsPhantomMode()) { 242 if (current != nullptr && !current->IsPhantomMode()) {
251 result.guest_handle = current->GetGlobalHandle(); 243 result.guest_handle = current->GetGlobalHandle();
@@ -314,7 +306,7 @@ struct KernelCore::Impl {
314 // Lists all processes that exist in the current session. 306 // Lists all processes that exist in the current session.
315 std::vector<std::shared_ptr<Process>> process_list; 307 std::vector<std::shared_ptr<Process>> process_list;
316 Process* current_process = nullptr; 308 Process* current_process = nullptr;
317 Kernel::GlobalScheduler global_scheduler; 309 std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
318 Kernel::Synchronization synchronization; 310 Kernel::Synchronization synchronization;
319 Kernel::TimeManager time_manager; 311 Kernel::TimeManager time_manager;
320 312
@@ -355,7 +347,7 @@ struct KernelCore::Impl {
355 347
356 std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; 348 std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
357 std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{}; 349 std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
358 std::array<std::unique_ptr<Kernel::Scheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; 350 std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
359 351
360 bool is_multicore{}; 352 bool is_multicore{};
361 std::thread::id single_core_thread_id{}; 353 std::thread::id single_core_thread_id{};
@@ -415,19 +407,19 @@ const std::vector<std::shared_ptr<Process>>& KernelCore::GetProcessList() const
415 return impl->process_list; 407 return impl->process_list;
416} 408}
417 409
418Kernel::GlobalScheduler& KernelCore::GlobalScheduler() { 410Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() {
419 return impl->global_scheduler; 411 return *impl->global_scheduler_context;
420} 412}
421 413
422const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const { 414const Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() const {
423 return impl->global_scheduler; 415 return *impl->global_scheduler_context;
424} 416}
425 417
426Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) { 418Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) {
427 return *impl->schedulers[id]; 419 return *impl->schedulers[id];
428} 420}
429 421
430const Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) const { 422const Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) const {
431 return *impl->schedulers[id]; 423 return *impl->schedulers[id];
432} 424}
433 425
@@ -451,16 +443,13 @@ const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
451 return impl->cores[core_id]; 443 return impl->cores[core_id];
452} 444}
453 445
454Kernel::Scheduler& KernelCore::CurrentScheduler() { 446Kernel::KScheduler* KernelCore::CurrentScheduler() {
455 u32 core_id = impl->GetCurrentHostThreadID(); 447 u32 core_id = impl->GetCurrentHostThreadID();
456 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); 448 if (core_id >= Core::Hardware::NUM_CPU_CORES) {
457 return *impl->schedulers[core_id]; 449 // This is expected when called from not a guest thread
458} 450 return {};
459 451 }
460const Kernel::Scheduler& KernelCore::CurrentScheduler() const { 452 return impl->schedulers[core_id].get();
461 u32 core_id = impl->GetCurrentHostThreadID();
462 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
463 return *impl->schedulers[core_id];
464} 453}
465 454
466std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() { 455std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() {
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index a73a93039..5846c3f39 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -35,12 +35,12 @@ class SlabHeap;
35 35
36class AddressArbiter; 36class AddressArbiter;
37class ClientPort; 37class ClientPort;
38class GlobalScheduler; 38class GlobalSchedulerContext;
39class HandleTable; 39class HandleTable;
40class PhysicalCore; 40class PhysicalCore;
41class Process; 41class Process;
42class ResourceLimit; 42class ResourceLimit;
43class Scheduler; 43class KScheduler;
44class SharedMemory; 44class SharedMemory;
45class Synchronization; 45class Synchronization;
46class Thread; 46class Thread;
@@ -102,16 +102,16 @@ public:
102 const std::vector<std::shared_ptr<Process>>& GetProcessList() const; 102 const std::vector<std::shared_ptr<Process>>& GetProcessList() const;
103 103
104 /// Gets the sole instance of the global scheduler 104 /// Gets the sole instance of the global scheduler
105 Kernel::GlobalScheduler& GlobalScheduler(); 105 Kernel::GlobalSchedulerContext& GlobalSchedulerContext();
106 106
107 /// Gets the sole instance of the global scheduler 107 /// Gets the sole instance of the global scheduler
108 const Kernel::GlobalScheduler& GlobalScheduler() const; 108 const Kernel::GlobalSchedulerContext& GlobalSchedulerContext() const;
109 109
110 /// Gets the sole instance of the Scheduler assoviated with cpu core 'id' 110 /// Gets the sole instance of the Scheduler assoviated with cpu core 'id'
111 Kernel::Scheduler& Scheduler(std::size_t id); 111 Kernel::KScheduler& Scheduler(std::size_t id);
112 112
113 /// Gets the sole instance of the Scheduler assoviated with cpu core 'id' 113 /// Gets the sole instance of the Scheduler assoviated with cpu core 'id'
114 const Kernel::Scheduler& Scheduler(std::size_t id) const; 114 const Kernel::KScheduler& Scheduler(std::size_t id) const;
115 115
116 /// Gets the an instance of the respective physical CPU core. 116 /// Gets the an instance of the respective physical CPU core.
117 Kernel::PhysicalCore& PhysicalCore(std::size_t id); 117 Kernel::PhysicalCore& PhysicalCore(std::size_t id);
@@ -120,10 +120,7 @@ public:
120 const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; 120 const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
121 121
122 /// Gets the sole instance of the Scheduler at the current running core. 122 /// Gets the sole instance of the Scheduler at the current running core.
123 Kernel::Scheduler& CurrentScheduler(); 123 Kernel::KScheduler* CurrentScheduler();
124
125 /// Gets the sole instance of the Scheduler at the current running core.
126 const Kernel::Scheduler& CurrentScheduler() const;
127 124
128 /// Gets the an instance of the current physical CPU core. 125 /// Gets the an instance of the current physical CPU core.
129 Kernel::PhysicalCore& CurrentPhysicalCore(); 126 Kernel::PhysicalCore& CurrentPhysicalCore();
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
index 8f6c944d1..6299b1342 100644
--- a/src/core/hle/kernel/mutex.cpp
+++ b/src/core/hle/kernel/mutex.cpp
@@ -11,11 +11,11 @@
11#include "core/core.h" 11#include "core/core.h"
12#include "core/hle/kernel/errors.h" 12#include "core/hle/kernel/errors.h"
13#include "core/hle/kernel/handle_table.h" 13#include "core/hle/kernel/handle_table.h"
14#include "core/hle/kernel/k_scheduler.h"
14#include "core/hle/kernel/kernel.h" 15#include "core/hle/kernel/kernel.h"
15#include "core/hle/kernel/mutex.h" 16#include "core/hle/kernel/mutex.h"
16#include "core/hle/kernel/object.h" 17#include "core/hle/kernel/object.h"
17#include "core/hle/kernel/process.h" 18#include "core/hle/kernel/process.h"
18#include "core/hle/kernel/scheduler.h"
19#include "core/hle/kernel/thread.h" 19#include "core/hle/kernel/thread.h"
20#include "core/hle/result.h" 20#include "core/hle/result.h"
21#include "core/memory.h" 21#include "core/memory.h"
@@ -73,7 +73,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
73 73
74 auto& kernel = system.Kernel(); 74 auto& kernel = system.Kernel();
75 std::shared_ptr<Thread> current_thread = 75 std::shared_ptr<Thread> current_thread =
76 SharedFrom(kernel.CurrentScheduler().GetCurrentThread()); 76 SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
77 { 77 {
78 SchedulerLock lock(kernel); 78 SchedulerLock lock(kernel);
79 // The mutex address must be 4-byte aligned 79 // The mutex address must be 4-byte aligned
@@ -156,7 +156,7 @@ ResultCode Mutex::Release(VAddr address) {
156 SchedulerLock lock(kernel); 156 SchedulerLock lock(kernel);
157 157
158 std::shared_ptr<Thread> current_thread = 158 std::shared_ptr<Thread> current_thread =
159 SharedFrom(kernel.CurrentScheduler().GetCurrentThread()); 159 SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
160 160
161 auto [result, new_owner] = Unlock(current_thread, address); 161 auto [result, new_owner] = Unlock(current_thread, address);
162 162
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index d6a5742bd..7fea45f96 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -7,14 +7,14 @@
7#include "core/arm/dynarmic/arm_dynarmic_32.h" 7#include "core/arm/dynarmic/arm_dynarmic_32.h"
8#include "core/arm/dynarmic/arm_dynarmic_64.h" 8#include "core/arm/dynarmic/arm_dynarmic_64.h"
9#include "core/core.h" 9#include "core/core.h"
10#include "core/hle/kernel/k_scheduler.h"
10#include "core/hle/kernel/kernel.h" 11#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/physical_core.h" 12#include "core/hle/kernel/physical_core.h"
12#include "core/hle/kernel/scheduler.h"
13 13
14namespace Kernel { 14namespace Kernel {
15 15
16PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system, 16PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system,
17 Kernel::Scheduler& scheduler, Core::CPUInterrupts& interrupts) 17 Kernel::KScheduler& scheduler, Core::CPUInterrupts& interrupts)
18 : core_index{core_index}, system{system}, scheduler{scheduler}, 18 : core_index{core_index}, system{system}, scheduler{scheduler},
19 interrupts{interrupts}, guard{std::make_unique<Common::SpinLock>()} {} 19 interrupts{interrupts}, guard{std::make_unique<Common::SpinLock>()} {}
20 20
@@ -37,17 +37,12 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) {
37 37
38void PhysicalCore::Run() { 38void PhysicalCore::Run() {
39 arm_interface->Run(); 39 arm_interface->Run();
40 arm_interface->ClearExclusiveState();
41} 40}
42 41
43void PhysicalCore::Idle() { 42void PhysicalCore::Idle() {
44 interrupts[core_index].AwaitInterrupt(); 43 interrupts[core_index].AwaitInterrupt();
45} 44}
46 45
47void PhysicalCore::Shutdown() {
48 scheduler.Shutdown();
49}
50
51bool PhysicalCore::IsInterrupted() const { 46bool PhysicalCore::IsInterrupted() const {
52 return interrupts[core_index].IsInterrupted(); 47 return interrupts[core_index].IsInterrupted();
53} 48}
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h
index 37513130a..b4d3c15de 100644
--- a/src/core/hle/kernel/physical_core.h
+++ b/src/core/hle/kernel/physical_core.h
@@ -15,7 +15,7 @@ class SpinLock;
15} 15}
16 16
17namespace Kernel { 17namespace Kernel {
18class Scheduler; 18class KScheduler;
19} // namespace Kernel 19} // namespace Kernel
20 20
21namespace Core { 21namespace Core {
@@ -28,7 +28,7 @@ namespace Kernel {
28 28
29class PhysicalCore { 29class PhysicalCore {
30public: 30public:
31 PhysicalCore(std::size_t core_index, Core::System& system, Kernel::Scheduler& scheduler, 31 PhysicalCore(std::size_t core_index, Core::System& system, Kernel::KScheduler& scheduler,
32 Core::CPUInterrupts& interrupts); 32 Core::CPUInterrupts& interrupts);
33 ~PhysicalCore(); 33 ~PhysicalCore();
34 34
@@ -55,9 +55,6 @@ public:
55 /// Check if this core is interrupted 55 /// Check if this core is interrupted
56 bool IsInterrupted() const; 56 bool IsInterrupted() const;
57 57
58 // Shutdown this physical core.
59 void Shutdown();
60
61 bool IsInitialized() const { 58 bool IsInitialized() const {
62 return arm_interface != nullptr; 59 return arm_interface != nullptr;
63 } 60 }
@@ -82,18 +79,18 @@ public:
82 return core_index; 79 return core_index;
83 } 80 }
84 81
85 Kernel::Scheduler& Scheduler() { 82 Kernel::KScheduler& Scheduler() {
86 return scheduler; 83 return scheduler;
87 } 84 }
88 85
89 const Kernel::Scheduler& Scheduler() const { 86 const Kernel::KScheduler& Scheduler() const {
90 return scheduler; 87 return scheduler;
91 } 88 }
92 89
93private: 90private:
94 const std::size_t core_index; 91 const std::size_t core_index;
95 Core::System& system; 92 Core::System& system;
96 Kernel::Scheduler& scheduler; 93 Kernel::KScheduler& scheduler;
97 Core::CPUInterrupts& interrupts; 94 Core::CPUInterrupts& interrupts;
98 std::unique_ptr<Common::SpinLock> guard; 95 std::unique_ptr<Common::SpinLock> guard;
99 std::unique_ptr<Core::ARM_Interface> arm_interface; 96 std::unique_ptr<Core::ARM_Interface> arm_interface;
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index b17529dee..238c03a13 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -15,13 +15,13 @@
15#include "core/file_sys/program_metadata.h" 15#include "core/file_sys/program_metadata.h"
16#include "core/hle/kernel/code_set.h" 16#include "core/hle/kernel/code_set.h"
17#include "core/hle/kernel/errors.h" 17#include "core/hle/kernel/errors.h"
18#include "core/hle/kernel/k_scheduler.h"
18#include "core/hle/kernel/kernel.h" 19#include "core/hle/kernel/kernel.h"
19#include "core/hle/kernel/memory/memory_block_manager.h" 20#include "core/hle/kernel/memory/memory_block_manager.h"
20#include "core/hle/kernel/memory/page_table.h" 21#include "core/hle/kernel/memory/page_table.h"
21#include "core/hle/kernel/memory/slab_heap.h" 22#include "core/hle/kernel/memory/slab_heap.h"
22#include "core/hle/kernel/process.h" 23#include "core/hle/kernel/process.h"
23#include "core/hle/kernel/resource_limit.h" 24#include "core/hle/kernel/resource_limit.h"
24#include "core/hle/kernel/scheduler.h"
25#include "core/hle/kernel/thread.h" 25#include "core/hle/kernel/thread.h"
26#include "core/hle/lock.h" 26#include "core/hle/lock.h"
27#include "core/memory.h" 27#include "core/memory.h"
@@ -314,7 +314,7 @@ void Process::PrepareForTermination() {
314 if (thread->GetOwnerProcess() != this) 314 if (thread->GetOwnerProcess() != this)
315 continue; 315 continue;
316 316
317 if (thread.get() == system.CurrentScheduler().GetCurrentThread()) 317 if (thread.get() == kernel.CurrentScheduler()->GetCurrentThread())
318 continue; 318 continue;
319 319
320 // TODO(Subv): When are the other running/ready threads terminated? 320 // TODO(Subv): When are the other running/ready threads terminated?
@@ -325,7 +325,7 @@ void Process::PrepareForTermination() {
325 } 325 }
326 }; 326 };
327 327
328 stop_threads(system.GlobalScheduler().GetThreadList()); 328 stop_threads(system.GlobalSchedulerContext().GetThreadList());
329 329
330 FreeTLSRegion(tls_region_address); 330 FreeTLSRegion(tls_region_address);
331 tls_region_address = 0; 331 tls_region_address = 0;
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index 6e286419e..927f88fed 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -6,10 +6,10 @@
6#include "common/assert.h" 6#include "common/assert.h"
7#include "common/logging/log.h" 7#include "common/logging/log.h"
8#include "core/hle/kernel/errors.h" 8#include "core/hle/kernel/errors.h"
9#include "core/hle/kernel/k_scheduler.h"
9#include "core/hle/kernel/kernel.h" 10#include "core/hle/kernel/kernel.h"
10#include "core/hle/kernel/object.h" 11#include "core/hle/kernel/object.h"
11#include "core/hle/kernel/readable_event.h" 12#include "core/hle/kernel/readable_event.h"
12#include "core/hle/kernel/scheduler.h"
13#include "core/hle/kernel/thread.h" 13#include "core/hle/kernel/thread.h"
14 14
15namespace Kernel { 15namespace Kernel {
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
deleted file mode 100644
index 9a969fdb5..000000000
--- a/src/core/hle/kernel/scheduler.cpp
+++ /dev/null
@@ -1,819 +0,0 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4//
5// SelectThreads, Yield functions originally by TuxSH.
6// licensed under GPLv2 or later under exception provided by the author.
7
8#include <algorithm>
9#include <mutex>
10#include <set>
11#include <unordered_set>
12#include <utility>
13
14#include "common/assert.h"
15#include "common/bit_util.h"
16#include "common/fiber.h"
17#include "common/logging/log.h"
18#include "core/arm/arm_interface.h"
19#include "core/core.h"
20#include "core/core_timing.h"
21#include "core/cpu_manager.h"
22#include "core/hle/kernel/kernel.h"
23#include "core/hle/kernel/physical_core.h"
24#include "core/hle/kernel/process.h"
25#include "core/hle/kernel/scheduler.h"
26#include "core/hle/kernel/time_manager.h"
27
28namespace Kernel {
29
30GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {}
31
32GlobalScheduler::~GlobalScheduler() = default;
33
34void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) {
35 std::scoped_lock lock{global_list_guard};
36 thread_list.push_back(std::move(thread));
37}
38
39void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) {
40 std::scoped_lock lock{global_list_guard};
41 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
42 thread_list.end());
43}
44
45u32 GlobalScheduler::SelectThreads() {
46 ASSERT(is_locked);
47 const auto update_thread = [](Thread* thread, Scheduler& sched) {
48 std::scoped_lock lock{sched.guard};
49 if (thread != sched.selected_thread_set.get()) {
50 if (thread == nullptr) {
51 ++sched.idle_selection_count;
52 }
53 sched.selected_thread_set = SharedFrom(thread);
54 }
55 const bool reschedule_pending =
56 sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread);
57 sched.is_context_switch_pending = reschedule_pending;
58 std::atomic_thread_fence(std::memory_order_seq_cst);
59 return reschedule_pending;
60 };
61 if (!is_reselection_pending.load()) {
62 return 0;
63 }
64 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> top_threads{};
65
66 u32 idle_cores{};
67
68 // Step 1: Get top thread in schedule queue.
69 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
70 Thread* top_thread =
71 scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
72 if (top_thread != nullptr) {
73 // TODO(Blinkhawk): Implement Thread Pinning
74 } else {
75 idle_cores |= (1U << core);
76 }
77 top_threads[core] = top_thread;
78 }
79
80 while (idle_cores != 0) {
81 u32 core_id = Common::CountTrailingZeroes32(idle_cores);
82
83 if (!suggested_queue[core_id].empty()) {
84 std::array<s32, Core::Hardware::NUM_CPU_CORES> migration_candidates{};
85 std::size_t num_candidates = 0;
86 auto iter = suggested_queue[core_id].begin();
87 Thread* suggested = nullptr;
88 // Step 2: Try selecting a suggested thread.
89 while (iter != suggested_queue[core_id].end()) {
90 suggested = *iter;
91 iter++;
92 s32 suggested_core_id = suggested->GetProcessorID();
93 Thread* top_thread =
94 suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr;
95 if (top_thread != suggested) {
96 if (top_thread != nullptr &&
97 top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) {
98 suggested = nullptr;
99 break;
100 // There's a too high thread to do core migration, cancel
101 }
102 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested);
103 break;
104 }
105 suggested = nullptr;
106 migration_candidates[num_candidates++] = suggested_core_id;
107 }
108 // Step 3: Select a suggested thread from another core
109 if (suggested == nullptr) {
110 for (std::size_t i = 0; i < num_candidates; i++) {
111 s32 candidate_core = migration_candidates[i];
112 suggested = top_threads[candidate_core];
113 auto it = scheduled_queue[candidate_core].begin();
114 it++;
115 Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr;
116 if (next != nullptr) {
117 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id),
118 suggested);
119 top_threads[candidate_core] = next;
120 break;
121 } else {
122 suggested = nullptr;
123 }
124 }
125 }
126 top_threads[core_id] = suggested;
127 }
128
129 idle_cores &= ~(1U << core_id);
130 }
131 u32 cores_needing_context_switch{};
132 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
133 Scheduler& sched = kernel.Scheduler(core);
134 ASSERT(top_threads[core] == nullptr ||
135 static_cast<u32>(top_threads[core]->GetProcessorID()) == core);
136 if (update_thread(top_threads[core], sched)) {
137 cores_needing_context_switch |= (1U << core);
138 }
139 }
140 return cores_needing_context_switch;
141}
142
143bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
144 ASSERT(is_locked);
145 // Note: caller should use critical section, etc.
146 if (!yielding_thread->IsRunnable()) {
147 // Normally this case shouldn't happen except for SetThreadActivity.
148 is_reselection_pending.store(true, std::memory_order_release);
149 return false;
150 }
151 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
152 const u32 priority = yielding_thread->GetPriority();
153
154 // Yield the thread
155 Reschedule(priority, core_id, yielding_thread);
156 const Thread* const winner = scheduled_queue[core_id].front();
157 if (kernel.GetCurrentHostThreadID() != core_id) {
158 is_reselection_pending.store(true, std::memory_order_release);
159 }
160
161 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
162}
163
164bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
165 ASSERT(is_locked);
166 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
167 // etc.
168 if (!yielding_thread->IsRunnable()) {
169 // Normally this case shouldn't happen except for SetThreadActivity.
170 is_reselection_pending.store(true, std::memory_order_release);
171 return false;
172 }
173 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
174 const u32 priority = yielding_thread->GetPriority();
175
176 // Yield the thread
177 Reschedule(priority, core_id, yielding_thread);
178
179 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
180 for (std::size_t i = 0; i < current_threads.size(); i++) {
181 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
182 }
183
184 Thread* next_thread = scheduled_queue[core_id].front(priority);
185 Thread* winner = nullptr;
186 for (auto& thread : suggested_queue[core_id]) {
187 const s32 source_core = thread->GetProcessorID();
188 if (source_core >= 0) {
189 if (current_threads[source_core] != nullptr) {
190 if (thread == current_threads[source_core] ||
191 current_threads[source_core]->GetPriority() < min_regular_priority) {
192 continue;
193 }
194 }
195 }
196 if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() ||
197 next_thread->GetPriority() < thread->GetPriority()) {
198 if (thread->GetPriority() <= priority) {
199 winner = thread;
200 break;
201 }
202 }
203 }
204
205 if (winner != nullptr) {
206 if (winner != yielding_thread) {
207 TransferToCore(winner->GetPriority(), s32(core_id), winner);
208 }
209 } else {
210 winner = next_thread;
211 }
212
213 if (kernel.GetCurrentHostThreadID() != core_id) {
214 is_reselection_pending.store(true, std::memory_order_release);
215 }
216
217 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
218}
219
220bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
221 ASSERT(is_locked);
222 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
223 // etc.
224 if (!yielding_thread->IsRunnable()) {
225 // Normally this case shouldn't happen except for SetThreadActivity.
226 is_reselection_pending.store(true, std::memory_order_release);
227 return false;
228 }
229 Thread* winner = nullptr;
230 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
231
232 // Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead
233 TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread);
234
235 // If the core is idle, perform load balancing, excluding the threads that have just used this
236 // function...
237 if (scheduled_queue[core_id].empty()) {
238 // Here, "current_threads" is calculated after the ""yield"", unlike yield -1
239 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
240 for (std::size_t i = 0; i < current_threads.size(); i++) {
241 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
242 }
243 for (auto& thread : suggested_queue[core_id]) {
244 const s32 source_core = thread->GetProcessorID();
245 if (source_core < 0 || thread == current_threads[source_core]) {
246 continue;
247 }
248 if (current_threads[source_core] == nullptr ||
249 current_threads[source_core]->GetPriority() >= min_regular_priority) {
250 winner = thread;
251 }
252 break;
253 }
254 if (winner != nullptr) {
255 if (winner != yielding_thread) {
256 TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner);
257 }
258 } else {
259 winner = yielding_thread;
260 }
261 } else {
262 winner = scheduled_queue[core_id].front();
263 }
264
265 if (kernel.GetCurrentHostThreadID() != core_id) {
266 is_reselection_pending.store(true, std::memory_order_release);
267 }
268
269 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
270}
271
272void GlobalScheduler::PreemptThreads() {
273 ASSERT(is_locked);
274 for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
275 const u32 priority = preemption_priorities[core_id];
276
277 if (scheduled_queue[core_id].size(priority) > 0) {
278 if (scheduled_queue[core_id].size(priority) > 1) {
279 scheduled_queue[core_id].front(priority)->IncrementYieldCount();
280 }
281 scheduled_queue[core_id].yield(priority);
282 if (scheduled_queue[core_id].size(priority) > 1) {
283 scheduled_queue[core_id].front(priority)->IncrementYieldCount();
284 }
285 }
286
287 Thread* current_thread =
288 scheduled_queue[core_id].empty() ? nullptr : scheduled_queue[core_id].front();
289 Thread* winner = nullptr;
290 for (auto& thread : suggested_queue[core_id]) {
291 const s32 source_core = thread->GetProcessorID();
292 if (thread->GetPriority() != priority) {
293 continue;
294 }
295 if (source_core >= 0) {
296 Thread* next_thread = scheduled_queue[source_core].empty()
297 ? nullptr
298 : scheduled_queue[source_core].front();
299 if (next_thread != nullptr && next_thread->GetPriority() < 2) {
300 break;
301 }
302 if (next_thread == thread) {
303 continue;
304 }
305 }
306 if (current_thread != nullptr &&
307 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
308 winner = thread;
309 break;
310 }
311 }
312
313 if (winner != nullptr) {
314 TransferToCore(winner->GetPriority(), s32(core_id), winner);
315 current_thread =
316 winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread;
317 }
318
319 if (current_thread != nullptr && current_thread->GetPriority() > priority) {
320 for (auto& thread : suggested_queue[core_id]) {
321 const s32 source_core = thread->GetProcessorID();
322 if (thread->GetPriority() < priority) {
323 continue;
324 }
325 if (source_core >= 0) {
326 Thread* next_thread = scheduled_queue[source_core].empty()
327 ? nullptr
328 : scheduled_queue[source_core].front();
329 if (next_thread != nullptr && next_thread->GetPriority() < 2) {
330 break;
331 }
332 if (next_thread == thread) {
333 continue;
334 }
335 }
336 if (current_thread != nullptr &&
337 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
338 winner = thread;
339 break;
340 }
341 }
342
343 if (winner != nullptr) {
344 TransferToCore(winner->GetPriority(), s32(core_id), winner);
345 current_thread = winner;
346 }
347 }
348
349 is_reselection_pending.store(true, std::memory_order_release);
350 }
351}
352
353void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
354 Core::EmuThreadHandle global_thread) {
355 u32 current_core = global_thread.host_handle;
356 bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
357 (current_core < Core::Hardware::NUM_CPU_CORES);
358 while (cores_pending_reschedule != 0) {
359 u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule);
360 ASSERT(core < Core::Hardware::NUM_CPU_CORES);
361 if (!must_context_switch || core != current_core) {
362 auto& phys_core = kernel.PhysicalCore(core);
363 phys_core.Interrupt();
364 } else {
365 must_context_switch = true;
366 }
367 cores_pending_reschedule &= ~(1U << core);
368 }
369 if (must_context_switch) {
370 auto& core_scheduler = kernel.CurrentScheduler();
371 kernel.ExitSVCProfile();
372 core_scheduler.TryDoContextSwitch();
373 kernel.EnterSVCProfile();
374 }
375}
376
377void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) {
378 ASSERT(is_locked);
379 suggested_queue[core].add(thread, priority);
380}
381
382void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) {
383 ASSERT(is_locked);
384 suggested_queue[core].remove(thread, priority);
385}
386
387void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) {
388 ASSERT(is_locked);
389 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
390 scheduled_queue[core].add(thread, priority);
391}
392
393void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) {
394 ASSERT(is_locked);
395 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
396 scheduled_queue[core].add(thread, priority, false);
397}
398
399void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) {
400 ASSERT(is_locked);
401 scheduled_queue[core].remove(thread, priority);
402 scheduled_queue[core].add(thread, priority);
403}
404
405void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) {
406 ASSERT(is_locked);
407 scheduled_queue[core].remove(thread, priority);
408}
409
410void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
411 ASSERT(is_locked);
412 const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
413 const s32 source_core = thread->GetProcessorID();
414 if (source_core == destination_core || !schedulable) {
415 return;
416 }
417 thread->SetProcessorID(destination_core);
418 if (source_core >= 0) {
419 Unschedule(priority, static_cast<u32>(source_core), thread);
420 }
421 if (destination_core >= 0) {
422 Unsuggest(priority, static_cast<u32>(destination_core), thread);
423 Schedule(priority, static_cast<u32>(destination_core), thread);
424 }
425 if (source_core >= 0) {
426 Suggest(priority, static_cast<u32>(source_core), thread);
427 }
428}
429
430bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread,
431 const Thread* winner) {
432 if (current_thread == winner) {
433 current_thread->IncrementYieldCount();
434 return true;
435 } else {
436 is_reselection_pending.store(true, std::memory_order_release);
437 return false;
438 }
439}
440
441void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) {
442 if (old_flags == thread->scheduling_state) {
443 return;
444 }
445 ASSERT(is_locked);
446
447 if (old_flags == static_cast<u32>(ThreadSchedStatus::Runnable)) {
448 // In this case the thread was running, now it's pausing/exitting
449 if (thread->processor_id >= 0) {
450 Unschedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
451 }
452
453 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
454 if (core != static_cast<u32>(thread->processor_id) &&
455 thread->affinity_mask.GetAffinity(core)) {
456 Unsuggest(thread->current_priority, core, thread);
457 }
458 }
459 } else if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
460 // The thread is now set to running from being stopped
461 if (thread->processor_id >= 0) {
462 Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
463 }
464
465 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
466 if (core != static_cast<u32>(thread->processor_id) &&
467 thread->affinity_mask.GetAffinity(core)) {
468 Suggest(thread->current_priority, core, thread);
469 }
470 }
471 }
472
473 SetReselectionPending();
474}
475
476void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priority) {
477 if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable)) {
478 return;
479 }
480 ASSERT(is_locked);
481 if (thread->processor_id >= 0) {
482 Unschedule(old_priority, static_cast<u32>(thread->processor_id), thread);
483 }
484
485 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
486 if (core != static_cast<u32>(thread->processor_id) &&
487 thread->affinity_mask.GetAffinity(core)) {
488 Unsuggest(old_priority, core, thread);
489 }
490 }
491
492 if (thread->processor_id >= 0) {
493 if (thread == kernel.CurrentScheduler().GetCurrentThread()) {
494 SchedulePrepend(thread->current_priority, static_cast<u32>(thread->processor_id),
495 thread);
496 } else {
497 Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
498 }
499 }
500
501 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
502 if (core != static_cast<u32>(thread->processor_id) &&
503 thread->affinity_mask.GetAffinity(core)) {
504 Suggest(thread->current_priority, core, thread);
505 }
506 }
507 thread->IncrementYieldCount();
508 SetReselectionPending();
509}
510
511void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask,
512 s32 old_core) {
513 if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable) ||
514 thread->current_priority >= THREADPRIO_COUNT) {
515 return;
516 }
517 ASSERT(is_locked);
518
519 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
520 if (((old_affinity_mask >> core) & 1) != 0) {
521 if (core == static_cast<u32>(old_core)) {
522 Unschedule(thread->current_priority, core, thread);
523 } else {
524 Unsuggest(thread->current_priority, core, thread);
525 }
526 }
527 }
528
529 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
530 if (thread->affinity_mask.GetAffinity(core)) {
531 if (core == static_cast<u32>(thread->processor_id)) {
532 Schedule(thread->current_priority, core, thread);
533 } else {
534 Suggest(thread->current_priority, core, thread);
535 }
536 }
537 }
538
539 thread->IncrementYieldCount();
540 SetReselectionPending();
541}
542
543void GlobalScheduler::Shutdown() {
544 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
545 scheduled_queue[core].clear();
546 suggested_queue[core].clear();
547 }
548 thread_list.clear();
549}
550
551void GlobalScheduler::Lock() {
552 Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID();
553 ASSERT(!current_thread.IsInvalid());
554 if (current_thread == current_owner) {
555 ++scope_lock;
556 } else {
557 inner_lock.lock();
558 is_locked = true;
559 current_owner = current_thread;
560 ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle());
561 scope_lock = 1;
562 }
563}
564
565void GlobalScheduler::Unlock() {
566 if (--scope_lock != 0) {
567 ASSERT(scope_lock > 0);
568 return;
569 }
570 u32 cores_pending_reschedule = SelectThreads();
571 Core::EmuThreadHandle leaving_thread = current_owner;
572 current_owner = Core::EmuThreadHandle::InvalidHandle();
573 scope_lock = 1;
574 is_locked = false;
575 inner_lock.unlock();
576 EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread);
577}
578
579Scheduler::Scheduler(Core::System& system, std::size_t core_id) : system(system), core_id(core_id) {
580 switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this);
581}
582
583Scheduler::~Scheduler() = default;
584
585bool Scheduler::HaveReadyThreads() const {
586 return system.GlobalScheduler().HaveReadyThreads(core_id);
587}
588
589Thread* Scheduler::GetCurrentThread() const {
590 if (current_thread) {
591 return current_thread.get();
592 }
593 return idle_thread.get();
594}
595
596Thread* Scheduler::GetSelectedThread() const {
597 return selected_thread.get();
598}
599
600u64 Scheduler::GetLastContextSwitchTicks() const {
601 return last_context_switch_time;
602}
603
604void Scheduler::TryDoContextSwitch() {
605 auto& phys_core = system.Kernel().CurrentPhysicalCore();
606 if (phys_core.IsInterrupted()) {
607 phys_core.ClearInterrupt();
608 }
609 guard.lock();
610 if (is_context_switch_pending) {
611 SwitchContext();
612 } else {
613 guard.unlock();
614 }
615}
616
617void Scheduler::OnThreadStart() {
618 SwitchContextStep2();
619}
620
621void Scheduler::Unload(Thread* thread) {
622 if (thread) {
623 thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
624 thread->SetIsRunning(false);
625 if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) {
626 system.ArmInterface(core_id).ExceptionalExit();
627 thread->SetContinuousOnSVC(false);
628 }
629 if (!thread->IsHLEThread() && !thread->HasExited()) {
630 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
631 cpu_core.SaveContext(thread->GetContext32());
632 cpu_core.SaveContext(thread->GetContext64());
633 // Save the TPIDR_EL0 system register in case it was modified.
634 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
635 cpu_core.ClearExclusiveState();
636 }
637 thread->context_guard.unlock();
638 }
639}
640
641void Scheduler::Unload() {
642 Unload(current_thread.get());
643}
644
645void Scheduler::Reload(Thread* thread) {
646 if (thread) {
647 ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
648 "Thread must be runnable.");
649
650 // Cancel any outstanding wakeup events for this thread
651 thread->SetIsRunning(true);
652 thread->SetWasRunning(false);
653 thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
654
655 auto* const thread_owner_process = thread->GetOwnerProcess();
656 if (thread_owner_process != nullptr) {
657 system.Kernel().MakeCurrentProcess(thread_owner_process);
658 }
659 if (!thread->IsHLEThread()) {
660 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
661 cpu_core.LoadContext(thread->GetContext32());
662 cpu_core.LoadContext(thread->GetContext64());
663 cpu_core.SetTlsAddress(thread->GetTLSAddress());
664 cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
665 cpu_core.ClearExclusiveState();
666 }
667 }
668}
669
670void Scheduler::Reload() {
671 Reload(current_thread.get());
672}
673
674void Scheduler::SwitchContextStep2() {
675 // Load context of new thread
676 Reload(selected_thread.get());
677
678 TryDoContextSwitch();
679}
680
681void Scheduler::SwitchContext() {
682 current_thread_prev = current_thread;
683 selected_thread = selected_thread_set;
684 Thread* previous_thread = current_thread_prev.get();
685 Thread* new_thread = selected_thread.get();
686 current_thread = selected_thread;
687
688 is_context_switch_pending = false;
689
690 if (new_thread == previous_thread) {
691 guard.unlock();
692 return;
693 }
694
695 Process* const previous_process = system.Kernel().CurrentProcess();
696
697 UpdateLastContextSwitchTime(previous_thread, previous_process);
698
699 // Save context for previous thread
700 Unload(previous_thread);
701
702 std::shared_ptr<Common::Fiber>* old_context;
703 if (previous_thread != nullptr) {
704 old_context = &previous_thread->GetHostContext();
705 } else {
706 old_context = &idle_thread->GetHostContext();
707 }
708 guard.unlock();
709
710 Common::Fiber::YieldTo(*old_context, switch_fiber);
711 /// When a thread wakes up, the scheduler may have changed to other in another core.
712 auto& next_scheduler = system.Kernel().CurrentScheduler();
713 next_scheduler.SwitchContextStep2();
714}
715
716void Scheduler::OnSwitch(void* this_scheduler) {
717 Scheduler* sched = static_cast<Scheduler*>(this_scheduler);
718 sched->SwitchToCurrent();
719}
720
721void Scheduler::SwitchToCurrent() {
722 while (true) {
723 {
724 std::scoped_lock lock{guard};
725 selected_thread = selected_thread_set;
726 current_thread = selected_thread;
727 is_context_switch_pending = false;
728 }
729 const auto is_switch_pending = [this] {
730 std::scoped_lock lock{guard};
731 return is_context_switch_pending;
732 };
733 do {
734 if (current_thread != nullptr && !current_thread->IsHLEThread()) {
735 current_thread->context_guard.lock();
736 if (!current_thread->IsRunnable()) {
737 current_thread->context_guard.unlock();
738 break;
739 }
740 if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) {
741 current_thread->context_guard.unlock();
742 break;
743 }
744 }
745 std::shared_ptr<Common::Fiber>* next_context;
746 if (current_thread != nullptr) {
747 next_context = &current_thread->GetHostContext();
748 } else {
749 next_context = &idle_thread->GetHostContext();
750 }
751 Common::Fiber::YieldTo(switch_fiber, *next_context);
752 } while (!is_switch_pending());
753 }
754}
755
756void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
757 const u64 prev_switch_ticks = last_context_switch_time;
758 const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
759 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
760
761 if (thread != nullptr) {
762 thread->UpdateCPUTimeTicks(update_ticks);
763 }
764
765 if (process != nullptr) {
766 process->UpdateCPUTimeTicks(update_ticks);
767 }
768
769 last_context_switch_time = most_recent_switch_ticks;
770}
771
772void Scheduler::Initialize() {
773 std::string name = "Idle Thread Id:" + std::to_string(core_id);
774 std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
775 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
776 ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
777 auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
778 nullptr, std::move(init_func), init_func_parameter);
779 idle_thread = std::move(thread_res).Unwrap();
780}
781
782void Scheduler::Shutdown() {
783 current_thread = nullptr;
784 selected_thread = nullptr;
785}
786
787SchedulerLock::SchedulerLock(KernelCore& kernel) : kernel{kernel} {
788 kernel.GlobalScheduler().Lock();
789}
790
791SchedulerLock::~SchedulerLock() {
792 kernel.GlobalScheduler().Unlock();
793}
794
795SchedulerLockAndSleep::SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle,
796 Thread* time_task, s64 nanoseconds)
797 : SchedulerLock{kernel}, event_handle{event_handle}, time_task{time_task}, nanoseconds{
798 nanoseconds} {
799 event_handle = InvalidHandle;
800}
801
802SchedulerLockAndSleep::~SchedulerLockAndSleep() {
803 if (sleep_cancelled) {
804 return;
805 }
806 auto& time_manager = kernel.TimeManager();
807 time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
808}
809
810void SchedulerLockAndSleep::Release() {
811 if (sleep_cancelled) {
812 return;
813 }
814 auto& time_manager = kernel.TimeManager();
815 time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
816 sleep_cancelled = true;
817}
818
819} // namespace Kernel
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp
index 8c19f2534..bf2c90028 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/server_session.cpp
@@ -14,9 +14,9 @@
14#include "core/hle/kernel/client_session.h" 14#include "core/hle/kernel/client_session.h"
15#include "core/hle/kernel/handle_table.h" 15#include "core/hle/kernel/handle_table.h"
16#include "core/hle/kernel/hle_ipc.h" 16#include "core/hle/kernel/hle_ipc.h"
17#include "core/hle/kernel/k_scheduler.h"
17#include "core/hle/kernel/kernel.h" 18#include "core/hle/kernel/kernel.h"
18#include "core/hle/kernel/process.h" 19#include "core/hle/kernel/process.h"
19#include "core/hle/kernel/scheduler.h"
20#include "core/hle/kernel/server_session.h" 20#include "core/hle/kernel/server_session.h"
21#include "core/hle/kernel/session.h" 21#include "core/hle/kernel/session.h"
22#include "core/hle/kernel/thread.h" 22#include "core/hle/kernel/thread.h"
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 9742aaf4c..2612a6b0d 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -24,6 +24,7 @@
24#include "core/hle/kernel/client_session.h" 24#include "core/hle/kernel/client_session.h"
25#include "core/hle/kernel/errors.h" 25#include "core/hle/kernel/errors.h"
26#include "core/hle/kernel/handle_table.h" 26#include "core/hle/kernel/handle_table.h"
27#include "core/hle/kernel/k_scheduler.h"
27#include "core/hle/kernel/kernel.h" 28#include "core/hle/kernel/kernel.h"
28#include "core/hle/kernel/memory/memory_block.h" 29#include "core/hle/kernel/memory/memory_block.h"
29#include "core/hle/kernel/memory/page_table.h" 30#include "core/hle/kernel/memory/page_table.h"
@@ -32,7 +33,6 @@
32#include "core/hle/kernel/process.h" 33#include "core/hle/kernel/process.h"
33#include "core/hle/kernel/readable_event.h" 34#include "core/hle/kernel/readable_event.h"
34#include "core/hle/kernel/resource_limit.h" 35#include "core/hle/kernel/resource_limit.h"
35#include "core/hle/kernel/scheduler.h"
36#include "core/hle/kernel/shared_memory.h" 36#include "core/hle/kernel/shared_memory.h"
37#include "core/hle/kernel/svc.h" 37#include "core/hle/kernel/svc.h"
38#include "core/hle/kernel/svc_types.h" 38#include "core/hle/kernel/svc_types.h"
@@ -332,7 +332,8 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
332 332
333/// Makes a blocking IPC call to an OS service. 333/// Makes a blocking IPC call to an OS service.
334static ResultCode SendSyncRequest(Core::System& system, Handle handle) { 334static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
335 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 335 auto& kernel = system.Kernel();
336 const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
336 std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle); 337 std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle);
337 if (!session) { 338 if (!session) {
338 LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle); 339 LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle);
@@ -341,9 +342,9 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
341 342
342 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); 343 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
343 344
344 auto thread = system.CurrentScheduler().GetCurrentThread(); 345 auto thread = kernel.CurrentScheduler()->GetCurrentThread();
345 { 346 {
346 SchedulerLock lock(system.Kernel()); 347 SchedulerLock lock(kernel);
347 thread->InvalidateHLECallback(); 348 thread->InvalidateHLECallback();
348 thread->SetStatus(ThreadStatus::WaitIPC); 349 thread->SetStatus(ThreadStatus::WaitIPC);
349 session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); 350 session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming());
@@ -352,12 +353,12 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
352 if (thread->HasHLECallback()) { 353 if (thread->HasHLECallback()) {
353 Handle event_handle = thread->GetHLETimeEvent(); 354 Handle event_handle = thread->GetHLETimeEvent();
354 if (event_handle != InvalidHandle) { 355 if (event_handle != InvalidHandle) {
355 auto& time_manager = system.Kernel().TimeManager(); 356 auto& time_manager = kernel.TimeManager();
356 time_manager.UnscheduleTimeEvent(event_handle); 357 time_manager.UnscheduleTimeEvent(event_handle);
357 } 358 }
358 359
359 { 360 {
360 SchedulerLock lock(system.Kernel()); 361 SchedulerLock lock(kernel);
361 auto* sync_object = thread->GetHLESyncObject(); 362 auto* sync_object = thread->GetHLESyncObject();
362 sync_object->RemoveWaitingThread(SharedFrom(thread)); 363 sync_object->RemoveWaitingThread(SharedFrom(thread));
363 } 364 }
@@ -665,7 +666,7 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
665 666
666 handle_debug_buffer(info1, info2); 667 handle_debug_buffer(info1, info2);
667 668
668 auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); 669 auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
669 const auto thread_processor_id = current_thread->GetProcessorID(); 670 const auto thread_processor_id = current_thread->GetProcessorID();
670 system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace(); 671 system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
671 } 672 }
@@ -917,7 +918,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
917 } 918 }
918 919
919 const auto& core_timing = system.CoreTiming(); 920 const auto& core_timing = system.CoreTiming();
920 const auto& scheduler = system.CurrentScheduler(); 921 const auto& scheduler = *system.Kernel().CurrentScheduler();
921 const auto* const current_thread = scheduler.GetCurrentThread(); 922 const auto* const current_thread = scheduler.GetCurrentThread();
922 const bool same_thread = current_thread == thread.get(); 923 const bool same_thread = current_thread == thread.get();
923 924
@@ -1085,7 +1086,7 @@ static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 act
1085 return ERR_INVALID_HANDLE; 1086 return ERR_INVALID_HANDLE;
1086 } 1087 }
1087 1088
1088 if (thread.get() == system.CurrentScheduler().GetCurrentThread()) { 1089 if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) {
1089 LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread"); 1090 LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
1090 return ERR_BUSY; 1091 return ERR_BUSY;
1091 } 1092 }
@@ -1118,7 +1119,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H
1118 return ERR_INVALID_HANDLE; 1119 return ERR_INVALID_HANDLE;
1119 } 1120 }
1120 1121
1121 if (thread.get() == system.CurrentScheduler().GetCurrentThread()) { 1122 if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) {
1122 LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread"); 1123 LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
1123 return ERR_BUSY; 1124 return ERR_BUSY;
1124 } 1125 }
@@ -1475,7 +1476,7 @@ static void ExitProcess(Core::System& system) {
1475 current_process->PrepareForTermination(); 1476 current_process->PrepareForTermination();
1476 1477
1477 // Kill the current thread 1478 // Kill the current thread
1478 system.CurrentScheduler().GetCurrentThread()->Stop(); 1479 system.Kernel().CurrentScheduler()->GetCurrentThread()->Stop();
1479} 1480}
1480 1481
1481static void ExitProcess32(Core::System& system) { 1482static void ExitProcess32(Core::System& system) {
@@ -1576,8 +1577,8 @@ static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
1576static void ExitThread(Core::System& system) { 1577static void ExitThread(Core::System& system) {
1577 LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC()); 1578 LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
1578 1579
1579 auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); 1580 auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
1580 system.GlobalScheduler().RemoveThread(SharedFrom(current_thread)); 1581 system.GlobalSchedulerContext().RemoveThread(SharedFrom(current_thread));
1581 current_thread->Stop(); 1582 current_thread->Stop();
1582} 1583}
1583 1584
@@ -1590,37 +1591,31 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
1590 LOG_DEBUG(Kernel_SVC, "called nanoseconds={}", nanoseconds); 1591 LOG_DEBUG(Kernel_SVC, "called nanoseconds={}", nanoseconds);
1591 1592
1592 enum class SleepType : s64 { 1593 enum class SleepType : s64 {
1593 YieldWithoutLoadBalancing = 0, 1594 YieldWithoutCoreMigration = 0,
1594 YieldWithLoadBalancing = -1, 1595 YieldWithCoreMigration = -1,
1595 YieldAndWaitForLoadBalancing = -2, 1596 YieldAndWaitForLoadBalancing = -2,
1596 }; 1597 };
1597 1598
1598 auto& scheduler = system.CurrentScheduler(); 1599 auto& scheduler = *system.Kernel().CurrentScheduler();
1599 auto* const current_thread = scheduler.GetCurrentThread();
1600 bool is_redundant = false;
1601
1602 if (nanoseconds <= 0) { 1600 if (nanoseconds <= 0) {
1603 switch (static_cast<SleepType>(nanoseconds)) { 1601 switch (static_cast<SleepType>(nanoseconds)) {
1604 case SleepType::YieldWithoutLoadBalancing: { 1602 case SleepType::YieldWithoutCoreMigration: {
1605 auto pair = current_thread->YieldSimple(); 1603 scheduler.YieldWithoutCoreMigration();
1606 is_redundant = pair.second;
1607 break; 1604 break;
1608 } 1605 }
1609 case SleepType::YieldWithLoadBalancing: { 1606 case SleepType::YieldWithCoreMigration: {
1610 auto pair = current_thread->YieldAndBalanceLoad(); 1607 scheduler.YieldWithCoreMigration();
1611 is_redundant = pair.second;
1612 break; 1608 break;
1613 } 1609 }
1614 case SleepType::YieldAndWaitForLoadBalancing: { 1610 case SleepType::YieldAndWaitForLoadBalancing: {
1615 auto pair = current_thread->YieldAndWaitForLoadBalancing(); 1611 scheduler.YieldToAnyThread();
1616 is_redundant = pair.second;
1617 break; 1612 break;
1618 } 1613 }
1619 default: 1614 default:
1620 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); 1615 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
1621 } 1616 }
1622 } else { 1617 } else {
1623 current_thread->Sleep(nanoseconds); 1618 scheduler.GetCurrentThread()->Sleep(nanoseconds);
1624 } 1619 }
1625} 1620}
1626 1621
@@ -1656,8 +1651,8 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
1656 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); 1651 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
1657 auto& kernel = system.Kernel(); 1652 auto& kernel = system.Kernel();
1658 Handle event_handle; 1653 Handle event_handle;
1659 Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); 1654 Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
1660 auto* const current_process = system.Kernel().CurrentProcess(); 1655 auto* const current_process = kernel.CurrentProcess();
1661 { 1656 {
1662 SchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds); 1657 SchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds);
1663 const auto& handle_table = current_process->GetHandleTable(); 1658 const auto& handle_table = current_process->GetHandleTable();
@@ -2627,7 +2622,7 @@ void Call(Core::System& system, u32 immediate) {
2627 auto& kernel = system.Kernel(); 2622 auto& kernel = system.Kernel();
2628 kernel.EnterSVCProfile(); 2623 kernel.EnterSVCProfile();
2629 2624
2630 auto* thread = system.CurrentScheduler().GetCurrentThread(); 2625 auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
2631 thread->SetContinuousOnSVC(true); 2626 thread->SetContinuousOnSVC(true);
2632 2627
2633 const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate) 2628 const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp
index 8b875d853..342fb4516 100644
--- a/src/core/hle/kernel/synchronization.cpp
+++ b/src/core/hle/kernel/synchronization.cpp
@@ -5,8 +5,8 @@
5#include "core/core.h" 5#include "core/core.h"
6#include "core/hle/kernel/errors.h" 6#include "core/hle/kernel/errors.h"
7#include "core/hle/kernel/handle_table.h" 7#include "core/hle/kernel/handle_table.h"
8#include "core/hle/kernel/k_scheduler.h"
8#include "core/hle/kernel/kernel.h" 9#include "core/hle/kernel/kernel.h"
9#include "core/hle/kernel/scheduler.h"
10#include "core/hle/kernel/synchronization.h" 10#include "core/hle/kernel/synchronization.h"
11#include "core/hle/kernel/synchronization_object.h" 11#include "core/hle/kernel/synchronization_object.h"
12#include "core/hle/kernel/thread.h" 12#include "core/hle/kernel/thread.h"
@@ -37,7 +37,7 @@ void Synchronization::SignalObject(SynchronizationObject& obj) const {
37std::pair<ResultCode, Handle> Synchronization::WaitFor( 37std::pair<ResultCode, Handle> Synchronization::WaitFor(
38 std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) { 38 std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) {
39 auto& kernel = system.Kernel(); 39 auto& kernel = system.Kernel();
40 auto* const thread = system.CurrentScheduler().GetCurrentThread(); 40 auto* const thread = kernel.CurrentScheduler()->GetCurrentThread();
41 Handle event_handle = InvalidHandle; 41 Handle event_handle = InvalidHandle;
42 { 42 {
43 SchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds); 43 SchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds);
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 38b4a0987..804e07f2b 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -17,10 +17,10 @@
17#include "core/hardware_properties.h" 17#include "core/hardware_properties.h"
18#include "core/hle/kernel/errors.h" 18#include "core/hle/kernel/errors.h"
19#include "core/hle/kernel/handle_table.h" 19#include "core/hle/kernel/handle_table.h"
20#include "core/hle/kernel/k_scheduler.h"
20#include "core/hle/kernel/kernel.h" 21#include "core/hle/kernel/kernel.h"
21#include "core/hle/kernel/object.h" 22#include "core/hle/kernel/object.h"
22#include "core/hle/kernel/process.h" 23#include "core/hle/kernel/process.h"
23#include "core/hle/kernel/scheduler.h"
24#include "core/hle/kernel/thread.h" 24#include "core/hle/kernel/thread.h"
25#include "core/hle/kernel/time_manager.h" 25#include "core/hle/kernel/time_manager.h"
26#include "core/hle/result.h" 26#include "core/hle/result.h"
@@ -186,9 +186,11 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
186 thread->status = ThreadStatus::Dormant; 186 thread->status = ThreadStatus::Dormant;
187 thread->entry_point = entry_point; 187 thread->entry_point = entry_point;
188 thread->stack_top = stack_top; 188 thread->stack_top = stack_top;
189 thread->disable_count = 1;
189 thread->tpidr_el0 = 0; 190 thread->tpidr_el0 = 0;
190 thread->nominal_priority = thread->current_priority = priority; 191 thread->nominal_priority = thread->current_priority = priority;
191 thread->last_running_ticks = 0; 192 thread->schedule_count = -1;
193 thread->last_scheduled_tick = 0;
192 thread->processor_id = processor_id; 194 thread->processor_id = processor_id;
193 thread->ideal_core = processor_id; 195 thread->ideal_core = processor_id;
194 thread->affinity_mask.SetAffinity(processor_id, true); 196 thread->affinity_mask.SetAffinity(processor_id, true);
@@ -201,7 +203,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
201 thread->owner_process = owner_process; 203 thread->owner_process = owner_process;
202 thread->type = type_flags; 204 thread->type = type_flags;
203 if ((type_flags & THREADTYPE_IDLE) == 0) { 205 if ((type_flags & THREADTYPE_IDLE) == 0) {
204 auto& scheduler = kernel.GlobalScheduler(); 206 auto& scheduler = kernel.GlobalSchedulerContext();
205 scheduler.AddThread(thread); 207 scheduler.AddThread(thread);
206 } 208 }
207 if (owner_process) { 209 if (owner_process) {
@@ -402,39 +404,12 @@ ResultCode Thread::Sleep(s64 nanoseconds) {
402 return RESULT_SUCCESS; 404 return RESULT_SUCCESS;
403} 405}
404 406
405std::pair<ResultCode, bool> Thread::YieldSimple() {
406 bool is_redundant = false;
407 {
408 SchedulerLock lock(kernel);
409 is_redundant = kernel.GlobalScheduler().YieldThread(this);
410 }
411 return {RESULT_SUCCESS, is_redundant};
412}
413
414std::pair<ResultCode, bool> Thread::YieldAndBalanceLoad() {
415 bool is_redundant = false;
416 {
417 SchedulerLock lock(kernel);
418 is_redundant = kernel.GlobalScheduler().YieldThreadAndBalanceLoad(this);
419 }
420 return {RESULT_SUCCESS, is_redundant};
421}
422
423std::pair<ResultCode, bool> Thread::YieldAndWaitForLoadBalancing() {
424 bool is_redundant = false;
425 {
426 SchedulerLock lock(kernel);
427 is_redundant = kernel.GlobalScheduler().YieldThreadAndWaitForLoadBalancing(this);
428 }
429 return {RESULT_SUCCESS, is_redundant};
430}
431
432void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { 407void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
433 const u32 old_state = scheduling_state; 408 const u32 old_state = scheduling_state;
434 pausing_state |= static_cast<u32>(flag); 409 pausing_state |= static_cast<u32>(flag);
435 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); 410 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
436 scheduling_state = base_scheduling | pausing_state; 411 scheduling_state = base_scheduling | pausing_state;
437 kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); 412 KScheduler::OnThreadStateChanged(kernel, this, old_state);
438} 413}
439 414
440void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { 415void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
@@ -442,19 +417,20 @@ void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
442 pausing_state &= ~static_cast<u32>(flag); 417 pausing_state &= ~static_cast<u32>(flag);
443 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); 418 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
444 scheduling_state = base_scheduling | pausing_state; 419 scheduling_state = base_scheduling | pausing_state;
445 kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); 420 KScheduler::OnThreadStateChanged(kernel, this, old_state);
446} 421}
447 422
448void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { 423void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
449 const u32 old_state = scheduling_state; 424 const u32 old_state = scheduling_state;
450 scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) | 425 scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
451 static_cast<u32>(new_status); 426 static_cast<u32>(new_status);
452 kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); 427 KScheduler::OnThreadStateChanged(kernel, this, old_state);
453} 428}
454 429
455void Thread::SetCurrentPriority(u32 new_priority) { 430void Thread::SetCurrentPriority(u32 new_priority) {
456 const u32 old_priority = std::exchange(current_priority, new_priority); 431 const u32 old_priority = std::exchange(current_priority, new_priority);
457 kernel.GlobalScheduler().AdjustSchedulingOnPriority(this, old_priority); 432 KScheduler::OnThreadPriorityChanged(kernel, this, kernel.CurrentScheduler()->GetCurrentThread(),
433 old_priority);
458} 434}
459 435
460ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { 436ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
@@ -480,10 +456,10 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
480 if (use_override) { 456 if (use_override) {
481 ideal_core_override = new_core; 457 ideal_core_override = new_core;
482 } else { 458 } else {
483 const auto old_affinity_mask = affinity_mask.GetAffinityMask(); 459 const auto old_affinity_mask = affinity_mask;
484 affinity_mask.SetAffinityMask(new_affinity_mask); 460 affinity_mask.SetAffinityMask(new_affinity_mask);
485 ideal_core = new_core; 461 ideal_core = new_core;
486 if (old_affinity_mask != new_affinity_mask) { 462 if (old_affinity_mask.GetAffinityMask() != new_affinity_mask) {
487 const s32 old_core = processor_id; 463 const s32 old_core = processor_id;
488 if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) { 464 if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) {
489 if (static_cast<s32>(ideal_core) < 0) { 465 if (static_cast<s32>(ideal_core) < 0) {
@@ -493,7 +469,7 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
493 processor_id = ideal_core; 469 processor_id = ideal_core;
494 } 470 }
495 } 471 }
496 kernel.GlobalScheduler().AdjustSchedulingOnAffinity(this, old_affinity_mask, old_core); 472 KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_affinity_mask, old_core);
497 } 473 }
498 } 474 }
499 return RESULT_SUCCESS; 475 return RESULT_SUCCESS;
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index 5192ecff1..f1aa358a4 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -28,10 +28,10 @@ class System;
28 28
29namespace Kernel { 29namespace Kernel {
30 30
31class GlobalScheduler; 31class GlobalSchedulerContext;
32class KernelCore; 32class KernelCore;
33class Process; 33class Process;
34class Scheduler; 34class KScheduler;
35 35
36enum ThreadPriority : u32 { 36enum ThreadPriority : u32 {
37 THREADPRIO_HIGHEST = 0, ///< Highest thread priority 37 THREADPRIO_HIGHEST = 0, ///< Highest thread priority
@@ -346,8 +346,11 @@ public:
346 346
347 void SetStatus(ThreadStatus new_status); 347 void SetStatus(ThreadStatus new_status);
348 348
349 u64 GetLastRunningTicks() const { 349 constexpr s64 GetLastScheduledTick() const {
350 return last_running_ticks; 350 return this->last_scheduled_tick;
351 }
352 constexpr void SetLastScheduledTick(s64 tick) {
353 this->last_scheduled_tick = tick;
351 } 354 }
352 355
353 u64 GetTotalCPUTimeTicks() const { 356 u64 GetTotalCPUTimeTicks() const {
@@ -362,10 +365,18 @@ public:
362 return processor_id; 365 return processor_id;
363 } 366 }
364 367
368 s32 GetActiveCore() const {
369 return GetProcessorID();
370 }
371
365 void SetProcessorID(s32 new_core) { 372 void SetProcessorID(s32 new_core) {
366 processor_id = new_core; 373 processor_id = new_core;
367 } 374 }
368 375
376 void SetActiveCore(s32 new_core) {
377 processor_id = new_core;
378 }
379
369 Process* GetOwnerProcess() { 380 Process* GetOwnerProcess() {
370 return owner_process; 381 return owner_process;
371 } 382 }
@@ -479,21 +490,11 @@ public:
479 /// Sleeps this thread for the given amount of nanoseconds. 490 /// Sleeps this thread for the given amount of nanoseconds.
480 ResultCode Sleep(s64 nanoseconds); 491 ResultCode Sleep(s64 nanoseconds);
481 492
482 /// Yields this thread without rebalancing loads. 493 constexpr s64 GetYieldScheduleCount() const {
483 std::pair<ResultCode, bool> YieldSimple(); 494 return this->schedule_count;
484
485 /// Yields this thread and does a load rebalancing.
486 std::pair<ResultCode, bool> YieldAndBalanceLoad();
487
488 /// Yields this thread and if the core is left idle, loads are rebalanced
489 std::pair<ResultCode, bool> YieldAndWaitForLoadBalancing();
490
491 void IncrementYieldCount() {
492 yield_count++;
493 } 495 }
494 496 constexpr void SetYieldScheduleCount(s64 count) {
495 u64 GetYieldCount() const { 497 this->schedule_count = count;
496 return yield_count;
497 } 498 }
498 499
499 ThreadSchedStatus GetSchedulingStatus() const { 500 ThreadSchedStatus GetSchedulingStatus() const {
@@ -569,9 +570,62 @@ public:
569 return has_exited; 570 return has_exited;
570 } 571 }
571 572
573 struct QueueEntry {
574 private:
575 Thread* prev;
576 Thread* next;
577
578 public:
579 constexpr QueueEntry() : prev(nullptr), next(nullptr) { /* ... */
580 }
581
582 constexpr void Initialize() {
583 this->prev = nullptr;
584 this->next = nullptr;
585 }
586
587 constexpr Thread* GetPrev() const {
588 return this->prev;
589 }
590 constexpr Thread* GetNext() const {
591 return this->next;
592 }
593 constexpr void SetPrev(Thread* t) {
594 this->prev = t;
595 }
596 constexpr void SetNext(Thread* t) {
597 this->next = t;
598 }
599 };
600
601 constexpr QueueEntry& GetPriorityQueueEntry(s32 core) {
602 return this->per_core_priority_queue_entry[core];
603 }
604 constexpr const QueueEntry& GetPriorityQueueEntry(s32 core) const {
605 return this->per_core_priority_queue_entry[core];
606 }
607
608 s32 GetDisableDispatchCount() const {
609 return disable_count;
610 }
611
612 void DisableDispatch() {
613 ASSERT(GetDisableDispatchCount() >= 0);
614 disable_count++;
615 }
616
617 void EnableDispatch() {
618 ASSERT(GetDisableDispatchCount() > 0);
619 disable_count--;
620 }
621
622 ThreadStatus status = ThreadStatus::Dormant;
623 u32 scheduling_state = 0;
624
572private: 625private:
573 friend class GlobalScheduler; 626 friend class GlobalSchedulerContext;
574 friend class Scheduler; 627 friend class KScheduler;
628 friend class Process;
575 629
576 void SetSchedulingStatus(ThreadSchedStatus new_status); 630 void SetSchedulingStatus(ThreadSchedStatus new_status);
577 void AddSchedulingFlag(ThreadSchedFlags flag); 631 void AddSchedulingFlag(ThreadSchedFlags flag);
@@ -586,10 +640,9 @@ private:
586 640
587 u64 thread_id = 0; 641 u64 thread_id = 0;
588 642
589 ThreadStatus status = ThreadStatus::Dormant;
590
591 VAddr entry_point = 0; 643 VAddr entry_point = 0;
592 VAddr stack_top = 0; 644 VAddr stack_top = 0;
645 std::atomic_int disable_count = 0;
593 646
594 ThreadType type; 647 ThreadType type;
595 648
@@ -603,9 +656,8 @@ private:
603 u32 current_priority = 0; 656 u32 current_priority = 0;
604 657
605 u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. 658 u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
606 u64 last_running_ticks = 0; ///< CPU tick when thread was last running 659 s64 schedule_count{};
607 u64 yield_count = 0; ///< Number of redundant yields carried by this thread. 660 s64 last_scheduled_tick{};
608 ///< a redundant yield is one where no scheduling is changed
609 661
610 s32 processor_id = 0; 662 s32 processor_id = 0;
611 663
@@ -647,7 +699,9 @@ private:
647 Handle hle_time_event; 699 Handle hle_time_event;
648 SynchronizationObject* hle_object; 700 SynchronizationObject* hle_object;
649 701
650 Scheduler* scheduler = nullptr; 702 KScheduler* scheduler = nullptr;
703
704 QueueEntry per_core_priority_queue_entry[Core::Hardware::NUM_CPU_CORES]{};
651 705
652 u32 ideal_core{0xFFFFFFFF}; 706 u32 ideal_core{0xFFFFFFFF};
653 KAffinityMask affinity_mask{}; 707 KAffinityMask affinity_mask{};
@@ -655,7 +709,6 @@ private:
655 s32 ideal_core_override = -1; 709 s32 ideal_core_override = -1;
656 u32 affinity_override_count = 0; 710 u32 affinity_override_count = 0;
657 711
658 u32 scheduling_state = 0;
659 u32 pausing_state = 0; 712 u32 pausing_state = 0;
660 bool is_running = false; 713 bool is_running = false;
661 bool is_waiting_on_sync = false; 714 bool is_waiting_on_sync = false;
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
index caf329bfb..8e4769694 100644
--- a/src/core/hle/kernel/time_manager.cpp
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -7,8 +7,8 @@
7#include "core/core_timing.h" 7#include "core/core_timing.h"
8#include "core/core_timing_util.h" 8#include "core/core_timing_util.h"
9#include "core/hle/kernel/handle_table.h" 9#include "core/hle/kernel/handle_table.h"
10#include "core/hle/kernel/k_scheduler.h"
10#include "core/hle/kernel/kernel.h" 11#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/scheduler.h"
12#include "core/hle/kernel/thread.h" 12#include "core/hle/kernel/thread.h"
13#include "core/hle/kernel/time_manager.h" 13#include "core/hle/kernel/time_manager.h"
14 14
diff --git a/src/core/hle/service/time/time.cpp b/src/core/hle/service/time/time.cpp
index 7b7ac282d..abc753d5d 100644
--- a/src/core/hle/service/time/time.cpp
+++ b/src/core/hle/service/time/time.cpp
@@ -10,8 +10,8 @@
10#include "core/hle/ipc_helpers.h" 10#include "core/hle/ipc_helpers.h"
11#include "core/hle/kernel/client_port.h" 11#include "core/hle/kernel/client_port.h"
12#include "core/hle/kernel/client_session.h" 12#include "core/hle/kernel/client_session.h"
13#include "core/hle/kernel/k_scheduler.h"
13#include "core/hle/kernel/kernel.h" 14#include "core/hle/kernel/kernel.h"
14#include "core/hle/kernel/scheduler.h"
15#include "core/hle/service/time/interface.h" 15#include "core/hle/service/time/interface.h"
16#include "core/hle/service/time/time.h" 16#include "core/hle/service/time/time.h"
17#include "core/hle/service/time/time_sharedmemory.h" 17#include "core/hle/service/time/time_sharedmemory.h"
diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp
index c4ae1d61f..546a2cd4d 100644
--- a/src/yuzu/debugger/wait_tree.cpp
+++ b/src/yuzu/debugger/wait_tree.cpp
@@ -13,10 +13,10 @@
13#include "core/arm/arm_interface.h" 13#include "core/arm/arm_interface.h"
14#include "core/core.h" 14#include "core/core.h"
15#include "core/hle/kernel/handle_table.h" 15#include "core/hle/kernel/handle_table.h"
16#include "core/hle/kernel/k_scheduler.h"
16#include "core/hle/kernel/mutex.h" 17#include "core/hle/kernel/mutex.h"
17#include "core/hle/kernel/process.h" 18#include "core/hle/kernel/process.h"
18#include "core/hle/kernel/readable_event.h" 19#include "core/hle/kernel/readable_event.h"
19#include "core/hle/kernel/scheduler.h"
20#include "core/hle/kernel/synchronization_object.h" 20#include "core/hle/kernel/synchronization_object.h"
21#include "core/hle/kernel/thread.h" 21#include "core/hle/kernel/thread.h"
22#include "core/memory.h" 22#include "core/memory.h"
@@ -101,7 +101,7 @@ std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList()
101 }; 101 };
102 102
103 const auto& system = Core::System::GetInstance(); 103 const auto& system = Core::System::GetInstance();
104 add_threads(system.GlobalScheduler().GetThreadList()); 104 add_threads(system.GlobalSchedulerContext().GetThreadList());
105 105
106 return item_list; 106 return item_list;
107} 107}
@@ -356,7 +356,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
356 .arg(thread.GetPriority()) 356 .arg(thread.GetPriority())
357 .arg(thread.GetNominalPriority()))); 357 .arg(thread.GetNominalPriority())));
358 list.push_back(std::make_unique<WaitTreeText>( 358 list.push_back(std::make_unique<WaitTreeText>(
359 tr("last running ticks = %1").arg(thread.GetLastRunningTicks()))); 359 tr("last running ticks = %1").arg(thread.GetLastScheduledTick())));
360 360
361 const VAddr mutex_wait_address = thread.GetMutexWaitAddress(); 361 const VAddr mutex_wait_address = thread.GetMutexWaitAddress();
362 if (mutex_wait_address != 0) { 362 if (mutex_wait_address != 0) {