diff options
| author | 2022-07-25 12:00:31 -0400 | |
|---|---|---|
| committer | 2022-07-25 12:00:31 -0400 | |
| commit | 591d1f1b09d2af6e432d4fb27af3321919758c0c (patch) | |
| tree | 5b0efec541b5db56b7d32e6c90f1b2587c71611d | |
| parent | Merge pull request #8484 from german77/irs_release (diff) | |
| parent | kernel: Ensure all uses of disable_count are balanced (diff) | |
| download | yuzu-591d1f1b09d2af6e432d4fb27af3321919758c0c.tar.gz yuzu-591d1f1b09d2af6e432d4fb27af3321919758c0c.tar.xz yuzu-591d1f1b09d2af6e432d4fb27af3321919758c0c.zip | |
Merge pull request #8549 from liamwhite/kscheduler-sc
kernel: use KScheduler from Mesosphere
| -rw-r--r-- | src/core/arm/arm_interface.cpp | 3 | ||||
| -rw-r--r-- | src/core/cpu_manager.cpp | 129 | ||||
| -rw-r--r-- | src/core/cpu_manager.h | 10 | ||||
| -rw-r--r-- | src/core/hle/kernel/global_scheduler_context.cpp | 7 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_interrupt_manager.cpp | 7 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 735 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.h | 223 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler_lock.h | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_thread.cpp | 30 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_thread.h | 26 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 27 | ||||
| -rw-r--r-- | src/core/hle/kernel/physical_core.cpp | 1 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc.cpp | 7 |
13 files changed, 605 insertions, 602 deletions
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp index e72b250be..953d96439 100644 --- a/src/core/arm/arm_interface.cpp +++ b/src/core/arm/arm_interface.cpp | |||
| @@ -154,9 +154,10 @@ void ARM_Interface::Run() { | |||
| 154 | break; | 154 | break; |
| 155 | } | 155 | } |
| 156 | 156 | ||
| 157 | // Handle syscalls and scheduling (this may change the current thread) | 157 | // Handle syscalls and scheduling (this may change the current thread/core) |
| 158 | if (Has(hr, svc_call)) { | 158 | if (Has(hr, svc_call)) { |
| 159 | Kernel::Svc::Call(system, GetSvcNumber()); | 159 | Kernel::Svc::Call(system, GetSvcNumber()); |
| 160 | break; | ||
| 160 | } | 161 | } |
| 161 | if (Has(hr, break_loop) || !uses_wall_clock) { | 162 | if (Has(hr, break_loop) || !uses_wall_clock) { |
| 162 | break; | 163 | break; |
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp index 37d3d83b9..9b1565ae1 100644 --- a/src/core/cpu_manager.cpp +++ b/src/core/cpu_manager.cpp | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include "core/core.h" | 8 | #include "core/core.h" |
| 9 | #include "core/core_timing.h" | 9 | #include "core/core_timing.h" |
| 10 | #include "core/cpu_manager.h" | 10 | #include "core/cpu_manager.h" |
| 11 | #include "core/hle/kernel/k_interrupt_manager.h" | ||
| 11 | #include "core/hle/kernel/k_scheduler.h" | 12 | #include "core/hle/kernel/k_scheduler.h" |
| 12 | #include "core/hle/kernel/k_thread.h" | 13 | #include "core/hle/kernel/k_thread.h" |
| 13 | #include "core/hle/kernel/kernel.h" | 14 | #include "core/hle/kernel/kernel.h" |
| @@ -49,14 +50,6 @@ void CpuManager::GuestThreadFunction() { | |||
| 49 | } | 50 | } |
| 50 | } | 51 | } |
| 51 | 52 | ||
| 52 | void CpuManager::GuestRewindFunction() { | ||
| 53 | if (is_multicore) { | ||
| 54 | MultiCoreRunGuestLoop(); | ||
| 55 | } else { | ||
| 56 | SingleCoreRunGuestLoop(); | ||
| 57 | } | ||
| 58 | } | ||
| 59 | |||
| 60 | void CpuManager::IdleThreadFunction() { | 53 | void CpuManager::IdleThreadFunction() { |
| 61 | if (is_multicore) { | 54 | if (is_multicore) { |
| 62 | MultiCoreRunIdleThread(); | 55 | MultiCoreRunIdleThread(); |
| @@ -69,21 +62,21 @@ void CpuManager::ShutdownThreadFunction() { | |||
| 69 | ShutdownThread(); | 62 | ShutdownThread(); |
| 70 | } | 63 | } |
| 71 | 64 | ||
| 65 | void CpuManager::HandleInterrupt() { | ||
| 66 | auto& kernel = system.Kernel(); | ||
| 67 | auto core_index = kernel.CurrentPhysicalCoreIndex(); | ||
| 68 | |||
| 69 | Kernel::KInterruptManager::HandleInterrupt(kernel, static_cast<s32>(core_index)); | ||
| 70 | } | ||
| 71 | |||
| 72 | /////////////////////////////////////////////////////////////////////////////// | 72 | /////////////////////////////////////////////////////////////////////////////// |
| 73 | /// MultiCore /// | 73 | /// MultiCore /// |
| 74 | /////////////////////////////////////////////////////////////////////////////// | 74 | /////////////////////////////////////////////////////////////////////////////// |
| 75 | 75 | ||
| 76 | void CpuManager::MultiCoreRunGuestThread() { | 76 | void CpuManager::MultiCoreRunGuestThread() { |
| 77 | // Similar to UserModeThreadStarter in HOS | ||
| 77 | auto& kernel = system.Kernel(); | 78 | auto& kernel = system.Kernel(); |
| 78 | kernel.CurrentScheduler()->OnThreadStart(); | 79 | kernel.CurrentScheduler()->OnThreadStart(); |
| 79 | auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread(); | ||
| 80 | auto& host_context = thread->GetHostContext(); | ||
| 81 | host_context->SetRewindPoint([this] { GuestRewindFunction(); }); | ||
| 82 | MultiCoreRunGuestLoop(); | ||
| 83 | } | ||
| 84 | |||
| 85 | void CpuManager::MultiCoreRunGuestLoop() { | ||
| 86 | auto& kernel = system.Kernel(); | ||
| 87 | 80 | ||
| 88 | while (true) { | 81 | while (true) { |
| 89 | auto* physical_core = &kernel.CurrentPhysicalCore(); | 82 | auto* physical_core = &kernel.CurrentPhysicalCore(); |
| @@ -91,18 +84,26 @@ void CpuManager::MultiCoreRunGuestLoop() { | |||
| 91 | physical_core->Run(); | 84 | physical_core->Run(); |
| 92 | physical_core = &kernel.CurrentPhysicalCore(); | 85 | physical_core = &kernel.CurrentPhysicalCore(); |
| 93 | } | 86 | } |
| 94 | { | 87 | |
| 95 | Kernel::KScopedDisableDispatch dd(kernel); | 88 | HandleInterrupt(); |
| 96 | physical_core->ArmInterface().ClearExclusiveState(); | ||
| 97 | } | ||
| 98 | } | 89 | } |
| 99 | } | 90 | } |
| 100 | 91 | ||
| 101 | void CpuManager::MultiCoreRunIdleThread() { | 92 | void CpuManager::MultiCoreRunIdleThread() { |
| 93 | // Not accurate to HOS. Remove this entire method when singlecore is removed. | ||
| 94 | // See notes in KScheduler::ScheduleImpl for more information about why this | ||
| 95 | // is inaccurate. | ||
| 96 | |||
| 102 | auto& kernel = system.Kernel(); | 97 | auto& kernel = system.Kernel(); |
| 98 | kernel.CurrentScheduler()->OnThreadStart(); | ||
| 99 | |||
| 103 | while (true) { | 100 | while (true) { |
| 104 | Kernel::KScopedDisableDispatch dd(kernel); | 101 | auto& physical_core = kernel.CurrentPhysicalCore(); |
| 105 | kernel.CurrentPhysicalCore().Idle(); | 102 | if (!physical_core.IsInterrupted()) { |
| 103 | physical_core.Idle(); | ||
| 104 | } | ||
| 105 | |||
| 106 | HandleInterrupt(); | ||
| 106 | } | 107 | } |
| 107 | } | 108 | } |
| 108 | 109 | ||
| @@ -113,80 +114,73 @@ void CpuManager::MultiCoreRunIdleThread() { | |||
| 113 | void CpuManager::SingleCoreRunGuestThread() { | 114 | void CpuManager::SingleCoreRunGuestThread() { |
| 114 | auto& kernel = system.Kernel(); | 115 | auto& kernel = system.Kernel(); |
| 115 | kernel.CurrentScheduler()->OnThreadStart(); | 116 | kernel.CurrentScheduler()->OnThreadStart(); |
| 116 | auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread(); | ||
| 117 | auto& host_context = thread->GetHostContext(); | ||
| 118 | host_context->SetRewindPoint([this] { GuestRewindFunction(); }); | ||
| 119 | SingleCoreRunGuestLoop(); | ||
| 120 | } | ||
| 121 | 117 | ||
| 122 | void CpuManager::SingleCoreRunGuestLoop() { | ||
| 123 | auto& kernel = system.Kernel(); | ||
| 124 | while (true) { | 118 | while (true) { |
| 125 | auto* physical_core = &kernel.CurrentPhysicalCore(); | 119 | auto* physical_core = &kernel.CurrentPhysicalCore(); |
| 126 | if (!physical_core->IsInterrupted()) { | 120 | if (!physical_core->IsInterrupted()) { |
| 127 | physical_core->Run(); | 121 | physical_core->Run(); |
| 128 | physical_core = &kernel.CurrentPhysicalCore(); | 122 | physical_core = &kernel.CurrentPhysicalCore(); |
| 129 | } | 123 | } |
| 124 | |||
| 130 | kernel.SetIsPhantomModeForSingleCore(true); | 125 | kernel.SetIsPhantomModeForSingleCore(true); |
| 131 | system.CoreTiming().Advance(); | 126 | system.CoreTiming().Advance(); |
| 132 | kernel.SetIsPhantomModeForSingleCore(false); | 127 | kernel.SetIsPhantomModeForSingleCore(false); |
| 133 | physical_core->ArmInterface().ClearExclusiveState(); | 128 | |
| 134 | PreemptSingleCore(); | 129 | PreemptSingleCore(); |
| 135 | auto& scheduler = kernel.Scheduler(current_core); | 130 | HandleInterrupt(); |
| 136 | scheduler.RescheduleCurrentCore(); | ||
| 137 | } | 131 | } |
| 138 | } | 132 | } |
| 139 | 133 | ||
| 140 | void CpuManager::SingleCoreRunIdleThread() { | 134 | void CpuManager::SingleCoreRunIdleThread() { |
| 141 | auto& kernel = system.Kernel(); | 135 | auto& kernel = system.Kernel(); |
| 136 | kernel.CurrentScheduler()->OnThreadStart(); | ||
| 137 | |||
| 142 | while (true) { | 138 | while (true) { |
| 143 | auto& physical_core = kernel.CurrentPhysicalCore(); | ||
| 144 | PreemptSingleCore(false); | 139 | PreemptSingleCore(false); |
| 145 | system.CoreTiming().AddTicks(1000U); | 140 | system.CoreTiming().AddTicks(1000U); |
| 146 | idle_count++; | 141 | idle_count++; |
| 147 | auto& scheduler = physical_core.Scheduler(); | 142 | HandleInterrupt(); |
| 148 | scheduler.RescheduleCurrentCore(); | ||
| 149 | } | 143 | } |
| 150 | } | 144 | } |
| 151 | 145 | ||
| 152 | void CpuManager::PreemptSingleCore(bool from_running_enviroment) { | 146 | void CpuManager::PreemptSingleCore(bool from_running_environment) { |
| 153 | { | 147 | auto& kernel = system.Kernel(); |
| 154 | auto& kernel = system.Kernel(); | ||
| 155 | auto& scheduler = kernel.Scheduler(current_core); | ||
| 156 | Kernel::KThread* current_thread = scheduler.GetSchedulerCurrentThread(); | ||
| 157 | if (idle_count >= 4 || from_running_enviroment) { | ||
| 158 | if (!from_running_enviroment) { | ||
| 159 | system.CoreTiming().Idle(); | ||
| 160 | idle_count = 0; | ||
| 161 | } | ||
| 162 | kernel.SetIsPhantomModeForSingleCore(true); | ||
| 163 | system.CoreTiming().Advance(); | ||
| 164 | kernel.SetIsPhantomModeForSingleCore(false); | ||
| 165 | } | ||
| 166 | current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES); | ||
| 167 | system.CoreTiming().ResetTicks(); | ||
| 168 | scheduler.Unload(scheduler.GetSchedulerCurrentThread()); | ||
| 169 | |||
| 170 | auto& next_scheduler = kernel.Scheduler(current_core); | ||
| 171 | Common::Fiber::YieldTo(current_thread->GetHostContext(), *next_scheduler.ControlContext()); | ||
| 172 | } | ||
| 173 | 148 | ||
| 174 | // May have changed scheduler | 149 | if (idle_count >= 4 || from_running_environment) { |
| 175 | { | 150 | if (!from_running_environment) { |
| 176 | auto& scheduler = system.Kernel().Scheduler(current_core); | 151 | system.CoreTiming().Idle(); |
| 177 | scheduler.Reload(scheduler.GetSchedulerCurrentThread()); | ||
| 178 | if (!scheduler.IsIdle()) { | ||
| 179 | idle_count = 0; | 152 | idle_count = 0; |
| 180 | } | 153 | } |
| 154 | kernel.SetIsPhantomModeForSingleCore(true); | ||
| 155 | system.CoreTiming().Advance(); | ||
| 156 | kernel.SetIsPhantomModeForSingleCore(false); | ||
| 157 | } | ||
| 158 | current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES); | ||
| 159 | system.CoreTiming().ResetTicks(); | ||
| 160 | kernel.Scheduler(current_core).PreemptSingleCore(); | ||
| 161 | |||
| 162 | // We've now been scheduled again, and we may have exchanged schedulers. | ||
| 163 | // Reload the scheduler in case it's different. | ||
| 164 | if (!kernel.Scheduler(current_core).IsIdle()) { | ||
| 165 | idle_count = 0; | ||
| 181 | } | 166 | } |
| 182 | } | 167 | } |
| 183 | 168 | ||
| 169 | void CpuManager::GuestActivate() { | ||
| 170 | // Similar to the HorizonKernelMain callback in HOS | ||
| 171 | auto& kernel = system.Kernel(); | ||
| 172 | auto* scheduler = kernel.CurrentScheduler(); | ||
| 173 | |||
| 174 | scheduler->Activate(); | ||
| 175 | UNREACHABLE(); | ||
| 176 | } | ||
| 177 | |||
| 184 | void CpuManager::ShutdownThread() { | 178 | void CpuManager::ShutdownThread() { |
| 185 | auto& kernel = system.Kernel(); | 179 | auto& kernel = system.Kernel(); |
| 180 | auto* thread = kernel.GetCurrentEmuThread(); | ||
| 186 | auto core = is_multicore ? kernel.CurrentPhysicalCoreIndex() : 0; | 181 | auto core = is_multicore ? kernel.CurrentPhysicalCoreIndex() : 0; |
| 187 | auto* current_thread = kernel.GetCurrentEmuThread(); | ||
| 188 | 182 | ||
| 189 | Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context); | 183 | Common::Fiber::YieldTo(thread->GetHostContext(), *core_data[core].host_context); |
| 190 | UNREACHABLE(); | 184 | UNREACHABLE(); |
| 191 | } | 185 | } |
| 192 | 186 | ||
| @@ -218,9 +212,12 @@ void CpuManager::RunThread(std::size_t core) { | |||
| 218 | system.GPU().ObtainContext(); | 212 | system.GPU().ObtainContext(); |
| 219 | } | 213 | } |
| 220 | 214 | ||
| 221 | auto* current_thread = system.Kernel().CurrentScheduler()->GetIdleThread(); | 215 | auto& kernel = system.Kernel(); |
| 222 | Kernel::SetCurrentThread(system.Kernel(), current_thread); | 216 | auto& scheduler = *kernel.CurrentScheduler(); |
| 223 | Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext()); | 217 | auto* thread = scheduler.GetSchedulerCurrentThread(); |
| 218 | Kernel::SetCurrentThread(kernel, thread); | ||
| 219 | |||
| 220 | Common::Fiber::YieldTo(data.host_context, *thread->GetHostContext()); | ||
| 224 | } | 221 | } |
| 225 | 222 | ||
| 226 | } // namespace Core | 223 | } // namespace Core |
diff --git a/src/core/cpu_manager.h b/src/core/cpu_manager.h index 76dc58ee1..95ea3ef39 100644 --- a/src/core/cpu_manager.h +++ b/src/core/cpu_manager.h | |||
| @@ -50,7 +50,10 @@ public: | |||
| 50 | void Initialize(); | 50 | void Initialize(); |
| 51 | void Shutdown(); | 51 | void Shutdown(); |
| 52 | 52 | ||
| 53 | std::function<void()> GetGuestThreadStartFunc() { | 53 | std::function<void()> GetGuestActivateFunc() { |
| 54 | return [this] { GuestActivate(); }; | ||
| 55 | } | ||
| 56 | std::function<void()> GetGuestThreadFunc() { | ||
| 54 | return [this] { GuestThreadFunction(); }; | 57 | return [this] { GuestThreadFunction(); }; |
| 55 | } | 58 | } |
| 56 | std::function<void()> GetIdleThreadStartFunc() { | 59 | std::function<void()> GetIdleThreadStartFunc() { |
| @@ -68,20 +71,19 @@ public: | |||
| 68 | 71 | ||
| 69 | private: | 72 | private: |
| 70 | void GuestThreadFunction(); | 73 | void GuestThreadFunction(); |
| 71 | void GuestRewindFunction(); | ||
| 72 | void IdleThreadFunction(); | 74 | void IdleThreadFunction(); |
| 73 | void ShutdownThreadFunction(); | 75 | void ShutdownThreadFunction(); |
| 74 | 76 | ||
| 75 | void MultiCoreRunGuestThread(); | 77 | void MultiCoreRunGuestThread(); |
| 76 | void MultiCoreRunGuestLoop(); | ||
| 77 | void MultiCoreRunIdleThread(); | 78 | void MultiCoreRunIdleThread(); |
| 78 | 79 | ||
| 79 | void SingleCoreRunGuestThread(); | 80 | void SingleCoreRunGuestThread(); |
| 80 | void SingleCoreRunGuestLoop(); | ||
| 81 | void SingleCoreRunIdleThread(); | 81 | void SingleCoreRunIdleThread(); |
| 82 | 82 | ||
| 83 | static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core); | 83 | static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core); |
| 84 | 84 | ||
| 85 | void GuestActivate(); | ||
| 86 | void HandleInterrupt(); | ||
| 85 | void ShutdownThread(); | 87 | void ShutdownThread(); |
| 86 | void RunThread(std::size_t core); | 88 | void RunThread(std::size_t core); |
| 87 | 89 | ||
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp index 164436b26..65576b8c4 100644 --- a/src/core/hle/kernel/global_scheduler_context.cpp +++ b/src/core/hle/kernel/global_scheduler_context.cpp | |||
| @@ -41,12 +41,7 @@ void GlobalSchedulerContext::PreemptThreads() { | |||
| 41 | ASSERT(IsLocked()); | 41 | ASSERT(IsLocked()); |
| 42 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | 42 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { |
| 43 | const u32 priority = preemption_priorities[core_id]; | 43 | const u32 priority = preemption_priorities[core_id]; |
| 44 | kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority); | 44 | KScheduler::RotateScheduledQueue(kernel, core_id, priority); |
| 45 | |||
| 46 | // Signal an interrupt occurred. For core 3, this is a certainty, as preemption will result | ||
| 47 | // in the rotator thread being scheduled. For cores 0-2, this is to simulate or system | ||
| 48 | // interrupts that may have occurred. | ||
| 49 | kernel.PhysicalCore(core_id).Interrupt(); | ||
| 50 | } | 45 | } |
| 51 | } | 46 | } |
| 52 | 47 | ||
diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp index d606a7f86..1b577a5b3 100644 --- a/src/core/hle/kernel/k_interrupt_manager.cpp +++ b/src/core/hle/kernel/k_interrupt_manager.cpp | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include "core/hle/kernel/k_scheduler.h" | 6 | #include "core/hle/kernel/k_scheduler.h" |
| 7 | #include "core/hle/kernel/k_thread.h" | 7 | #include "core/hle/kernel/k_thread.h" |
| 8 | #include "core/hle/kernel/kernel.h" | 8 | #include "core/hle/kernel/kernel.h" |
| 9 | #include "core/hle/kernel/physical_core.h" | ||
| 9 | 10 | ||
| 10 | namespace Kernel::KInterruptManager { | 11 | namespace Kernel::KInterruptManager { |
| 11 | 12 | ||
| @@ -15,6 +16,9 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) { | |||
| 15 | return; | 16 | return; |
| 16 | } | 17 | } |
| 17 | 18 | ||
| 19 | // Acknowledge the interrupt. | ||
| 20 | kernel.PhysicalCore(core_id).ClearInterrupt(); | ||
| 21 | |||
| 18 | auto& current_thread = GetCurrentThread(kernel); | 22 | auto& current_thread = GetCurrentThread(kernel); |
| 19 | 23 | ||
| 20 | // If the user disable count is set, we may need to pin the current thread. | 24 | // If the user disable count is set, we may need to pin the current thread. |
| @@ -27,6 +31,9 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) { | |||
| 27 | // Set the interrupt flag for the thread. | 31 | // Set the interrupt flag for the thread. |
| 28 | GetCurrentThread(kernel).SetInterruptFlag(); | 32 | GetCurrentThread(kernel).SetInterruptFlag(); |
| 29 | } | 33 | } |
| 34 | |||
| 35 | // Request interrupt scheduling. | ||
| 36 | kernel.CurrentScheduler()->RequestScheduleOnInterrupt(); | ||
| 30 | } | 37 | } |
| 31 | 38 | ||
| 32 | } // namespace Kernel::KInterruptManager | 39 | } // namespace Kernel::KInterruptManager |
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index d599d2bcb..c34ce7a17 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -27,69 +27,185 @@ static void IncrementScheduledCount(Kernel::KThread* thread) { | |||
| 27 | } | 27 | } |
| 28 | } | 28 | } |
| 29 | 29 | ||
| 30 | void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule) { | 30 | KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} { |
| 31 | auto scheduler = kernel.CurrentScheduler(); | 31 | m_switch_fiber = std::make_shared<Common::Fiber>([this] { |
| 32 | 32 | while (true) { | |
| 33 | u32 current_core{0xF}; | 33 | ScheduleImplFiber(); |
| 34 | bool must_context_switch{}; | ||
| 35 | if (scheduler) { | ||
| 36 | current_core = scheduler->core_id; | ||
| 37 | // TODO(bunnei): Should be set to true when we deprecate single core | ||
| 38 | must_context_switch = !kernel.IsPhantomModeForSingleCore(); | ||
| 39 | } | ||
| 40 | |||
| 41 | while (cores_pending_reschedule != 0) { | ||
| 42 | const auto core = static_cast<u32>(std::countr_zero(cores_pending_reschedule)); | ||
| 43 | ASSERT(core < Core::Hardware::NUM_CPU_CORES); | ||
| 44 | if (!must_context_switch || core != current_core) { | ||
| 45 | auto& phys_core = kernel.PhysicalCore(core); | ||
| 46 | phys_core.Interrupt(); | ||
| 47 | } | 34 | } |
| 48 | cores_pending_reschedule &= ~(1ULL << core); | 35 | }); |
| 36 | |||
| 37 | m_state.needs_scheduling = true; | ||
| 38 | } | ||
| 39 | |||
| 40 | KScheduler::~KScheduler() = default; | ||
| 41 | |||
| 42 | void KScheduler::SetInterruptTaskRunnable() { | ||
| 43 | m_state.interrupt_task_runnable = true; | ||
| 44 | m_state.needs_scheduling = true; | ||
| 45 | } | ||
| 46 | |||
| 47 | void KScheduler::RequestScheduleOnInterrupt() { | ||
| 48 | m_state.needs_scheduling = true; | ||
| 49 | |||
| 50 | if (CanSchedule(kernel)) { | ||
| 51 | ScheduleOnInterrupt(); | ||
| 49 | } | 52 | } |
| 53 | } | ||
| 50 | 54 | ||
| 51 | for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) { | 55 | void KScheduler::DisableScheduling(KernelCore& kernel) { |
| 52 | if (kernel.PhysicalCore(core_id).IsInterrupted()) { | 56 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); |
| 53 | KInterruptManager::HandleInterrupt(kernel, static_cast<s32>(core_id)); | 57 | GetCurrentThread(kernel).DisableDispatch(); |
| 54 | } | 58 | } |
| 59 | |||
| 60 | void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { | ||
| 61 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 1); | ||
| 62 | |||
| 63 | auto* scheduler{kernel.CurrentScheduler()}; | ||
| 64 | |||
| 65 | if (!scheduler || kernel.IsPhantomModeForSingleCore()) { | ||
| 66 | KScheduler::RescheduleCores(kernel, cores_needing_scheduling); | ||
| 67 | KScheduler::RescheduleCurrentHLEThread(kernel); | ||
| 68 | return; | ||
| 69 | } | ||
| 70 | |||
| 71 | scheduler->RescheduleOtherCores(cores_needing_scheduling); | ||
| 72 | |||
| 73 | if (GetCurrentThread(kernel).GetDisableDispatchCount() > 1) { | ||
| 74 | GetCurrentThread(kernel).EnableDispatch(); | ||
| 75 | } else { | ||
| 76 | scheduler->RescheduleCurrentCore(); | ||
| 77 | } | ||
| 78 | } | ||
| 79 | |||
| 80 | void KScheduler::RescheduleCurrentHLEThread(KernelCore& kernel) { | ||
| 81 | // HACK: we cannot schedule from this thread, it is not a core thread | ||
| 82 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); | ||
| 83 | |||
| 84 | // Special case to ensure dummy threads that are waiting block | ||
| 85 | GetCurrentThread(kernel).IfDummyThreadTryWait(); | ||
| 86 | |||
| 87 | ASSERT(GetCurrentThread(kernel).GetState() != ThreadState::Waiting); | ||
| 88 | GetCurrentThread(kernel).EnableDispatch(); | ||
| 89 | } | ||
| 90 | |||
| 91 | u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { | ||
| 92 | if (IsSchedulerUpdateNeeded(kernel)) { | ||
| 93 | return UpdateHighestPriorityThreadsImpl(kernel); | ||
| 94 | } else { | ||
| 95 | return 0; | ||
| 96 | } | ||
| 97 | } | ||
| 98 | |||
| 99 | void KScheduler::Schedule() { | ||
| 100 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); | ||
| 101 | ASSERT(m_core_id == GetCurrentCoreId(kernel)); | ||
| 102 | |||
| 103 | ScheduleImpl(); | ||
| 104 | } | ||
| 105 | |||
| 106 | void KScheduler::ScheduleOnInterrupt() { | ||
| 107 | GetCurrentThread(kernel).DisableDispatch(); | ||
| 108 | Schedule(); | ||
| 109 | GetCurrentThread(kernel).EnableDispatch(); | ||
| 110 | } | ||
| 111 | |||
| 112 | void KScheduler::PreemptSingleCore() { | ||
| 113 | GetCurrentThread(kernel).DisableDispatch(); | ||
| 114 | |||
| 115 | auto* thread = GetCurrentThreadPointer(kernel); | ||
| 116 | auto& previous_scheduler = kernel.Scheduler(thread->GetCurrentCore()); | ||
| 117 | previous_scheduler.Unload(thread); | ||
| 118 | |||
| 119 | Common::Fiber::YieldTo(thread->GetHostContext(), *m_switch_fiber); | ||
| 120 | |||
| 121 | GetCurrentThread(kernel).EnableDispatch(); | ||
| 122 | } | ||
| 123 | |||
| 124 | void KScheduler::RescheduleCurrentCore() { | ||
| 125 | ASSERT(!kernel.IsPhantomModeForSingleCore()); | ||
| 126 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); | ||
| 127 | |||
| 128 | GetCurrentThread(kernel).EnableDispatch(); | ||
| 129 | |||
| 130 | if (m_state.needs_scheduling.load()) { | ||
| 131 | // Disable interrupts, and then check again if rescheduling is needed. | ||
| 132 | // KScopedInterruptDisable intr_disable; | ||
| 133 | |||
| 134 | kernel.CurrentScheduler()->RescheduleCurrentCoreImpl(); | ||
| 55 | } | 135 | } |
| 136 | } | ||
| 56 | 137 | ||
| 57 | if (must_context_switch) { | 138 | void KScheduler::RescheduleCurrentCoreImpl() { |
| 58 | auto core_scheduler = kernel.CurrentScheduler(); | 139 | // Check that scheduling is needed. |
| 59 | kernel.ExitSVCProfile(); | 140 | if (m_state.needs_scheduling.load()) [[likely]] { |
| 60 | core_scheduler->RescheduleCurrentCore(); | 141 | GetCurrentThread(kernel).DisableDispatch(); |
| 61 | kernel.EnterSVCProfile(); | 142 | Schedule(); |
| 143 | GetCurrentThread(kernel).EnableDispatch(); | ||
| 62 | } | 144 | } |
| 63 | } | 145 | } |
| 64 | 146 | ||
| 147 | void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id) { | ||
| 148 | // Set core ID/idle thread/interrupt task manager. | ||
| 149 | m_core_id = core_id; | ||
| 150 | m_idle_thread = idle_thread; | ||
| 151 | // m_state.idle_thread_stack = m_idle_thread->GetStackTop(); | ||
| 152 | // m_state.interrupt_task_manager = &kernel.GetInterruptTaskManager(); | ||
| 153 | |||
| 154 | // Insert the main thread into the priority queue. | ||
| 155 | // { | ||
| 156 | // KScopedSchedulerLock lk{kernel}; | ||
| 157 | // GetPriorityQueue(kernel).PushBack(GetCurrentThreadPointer(kernel)); | ||
| 158 | // SetSchedulerUpdateNeeded(kernel); | ||
| 159 | // } | ||
| 160 | |||
| 161 | // Bind interrupt handler. | ||
| 162 | // kernel.GetInterruptManager().BindHandler( | ||
| 163 | // GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id, | ||
| 164 | // KInterruptController::PriorityLevel::Scheduler, false, false); | ||
| 165 | |||
| 166 | // Set the current thread. | ||
| 167 | m_current_thread = main_thread; | ||
| 168 | } | ||
| 169 | |||
| 170 | void KScheduler::Activate() { | ||
| 171 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); | ||
| 172 | |||
| 173 | // m_state.should_count_idle = KTargetSystem::IsDebugMode(); | ||
| 174 | m_is_active = true; | ||
| 175 | RescheduleCurrentCore(); | ||
| 176 | } | ||
| 177 | |||
| 178 | void KScheduler::OnThreadStart() { | ||
| 179 | GetCurrentThread(kernel).EnableDispatch(); | ||
| 180 | } | ||
| 181 | |||
| 65 | u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { | 182 | u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { |
| 66 | KScopedSpinLock lk{guard}; | 183 | if (KThread* prev_highest_thread = m_state.highest_priority_thread; |
| 67 | if (KThread* prev_highest_thread = state.highest_priority_thread; | 184 | prev_highest_thread != highest_thread) [[likely]] { |
| 68 | prev_highest_thread != highest_thread) { | 185 | if (prev_highest_thread != nullptr) [[likely]] { |
| 69 | if (prev_highest_thread != nullptr) { | ||
| 70 | IncrementScheduledCount(prev_highest_thread); | 186 | IncrementScheduledCount(prev_highest_thread); |
| 71 | prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks()); | 187 | prev_highest_thread->SetLastScheduledTick(kernel.System().CoreTiming().GetCPUTicks()); |
| 72 | } | 188 | } |
| 73 | if (state.should_count_idle) { | 189 | if (m_state.should_count_idle) { |
| 74 | if (highest_thread != nullptr) { | 190 | if (highest_thread != nullptr) [[likely]] { |
| 75 | if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) { | 191 | if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) { |
| 76 | process->SetRunningThread(core_id, highest_thread, state.idle_count); | 192 | process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count); |
| 77 | } | 193 | } |
| 78 | } else { | 194 | } else { |
| 79 | state.idle_count++; | 195 | m_state.idle_count++; |
| 80 | } | 196 | } |
| 81 | } | 197 | } |
| 82 | 198 | ||
| 83 | state.highest_priority_thread = highest_thread; | 199 | m_state.highest_priority_thread = highest_thread; |
| 84 | state.needs_scheduling.store(true); | 200 | m_state.needs_scheduling = true; |
| 85 | return (1ULL << core_id); | 201 | return (1ULL << m_core_id); |
| 86 | } else { | 202 | } else { |
| 87 | return 0; | 203 | return 0; |
| 88 | } | 204 | } |
| 89 | } | 205 | } |
| 90 | 206 | ||
| 91 | u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | 207 | u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { |
| 92 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | 208 | ASSERT(IsSchedulerLockedByCurrentThread(kernel)); |
| 93 | 209 | ||
| 94 | // Clear that we need to update. | 210 | // Clear that we need to update. |
| 95 | ClearSchedulerUpdateNeeded(kernel); | 211 | ClearSchedulerUpdateNeeded(kernel); |
| @@ -98,18 +214,20 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | |||
| 98 | KThread* top_threads[Core::Hardware::NUM_CPU_CORES]; | 214 | KThread* top_threads[Core::Hardware::NUM_CPU_CORES]; |
| 99 | auto& priority_queue = GetPriorityQueue(kernel); | 215 | auto& priority_queue = GetPriorityQueue(kernel); |
| 100 | 216 | ||
| 101 | /// We want to go over all cores, finding the highest priority thread and determining if | 217 | // We want to go over all cores, finding the highest priority thread and determining if |
| 102 | /// scheduling is needed for that core. | 218 | // scheduling is needed for that core. |
| 103 | for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | 219 | for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { |
| 104 | KThread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id)); | 220 | KThread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id)); |
| 105 | if (top_thread != nullptr) { | 221 | if (top_thread != nullptr) { |
| 106 | // If the thread has no waiters, we need to check if the process has a thread pinned. | 222 | // We need to check if the thread's process has a pinned thread. |
| 107 | if (top_thread->GetNumKernelWaiters() == 0) { | 223 | if (KProcess* parent = top_thread->GetOwnerProcess()) { |
| 108 | if (KProcess* parent = top_thread->GetOwnerProcess(); parent != nullptr) { | 224 | // Check that there's a pinned thread other than the current top thread. |
| 109 | if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id)); | 225 | if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id)); |
| 110 | pinned != nullptr && pinned != top_thread) { | 226 | pinned != nullptr && pinned != top_thread) { |
| 111 | // We prefer our parent's pinned thread if possible. However, we also don't | 227 | // We need to prefer threads with kernel waiters to the pinned thread. |
| 112 | // want to schedule un-runnable threads. | 228 | if (top_thread->GetNumKernelWaiters() == |
| 229 | 0 /* && top_thread != parent->GetExceptionThread() */) { | ||
| 230 | // If the pinned thread is runnable, use it. | ||
| 113 | if (pinned->GetRawState() == ThreadState::Runnable) { | 231 | if (pinned->GetRawState() == ThreadState::Runnable) { |
| 114 | top_thread = pinned; | 232 | top_thread = pinned; |
| 115 | } else { | 233 | } else { |
| @@ -129,7 +247,8 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | |||
| 129 | 247 | ||
| 130 | // Idle cores are bad. We're going to try to migrate threads to each idle core in turn. | 248 | // Idle cores are bad. We're going to try to migrate threads to each idle core in turn. |
| 131 | while (idle_cores != 0) { | 249 | while (idle_cores != 0) { |
| 132 | const auto core_id = static_cast<u32>(std::countr_zero(idle_cores)); | 250 | const s32 core_id = static_cast<s32>(std::countr_zero(idle_cores)); |
| 251 | |||
| 133 | if (KThread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) { | 252 | if (KThread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) { |
| 134 | s32 migration_candidates[Core::Hardware::NUM_CPU_CORES]; | 253 | s32 migration_candidates[Core::Hardware::NUM_CPU_CORES]; |
| 135 | size_t num_candidates = 0; | 254 | size_t num_candidates = 0; |
| @@ -150,7 +269,6 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | |||
| 150 | // The suggested thread isn't bound to its core, so we can migrate it! | 269 | // The suggested thread isn't bound to its core, so we can migrate it! |
| 151 | suggested->SetActiveCore(core_id); | 270 | suggested->SetActiveCore(core_id); |
| 152 | priority_queue.ChangeCore(suggested_core, suggested); | 271 | priority_queue.ChangeCore(suggested_core, suggested); |
| 153 | |||
| 154 | top_threads[core_id] = suggested; | 272 | top_threads[core_id] = suggested; |
| 155 | cores_needing_scheduling |= | 273 | cores_needing_scheduling |= |
| 156 | kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]); | 274 | kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]); |
| @@ -183,7 +301,6 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | |||
| 183 | // Perform the migration. | 301 | // Perform the migration. |
| 184 | suggested->SetActiveCore(core_id); | 302 | suggested->SetActiveCore(core_id); |
| 185 | priority_queue.ChangeCore(candidate_core, suggested); | 303 | priority_queue.ChangeCore(candidate_core, suggested); |
| 186 | |||
| 187 | top_threads[core_id] = suggested; | 304 | top_threads[core_id] = suggested; |
| 188 | cores_needing_scheduling |= | 305 | cores_needing_scheduling |= |
| 189 | kernel.Scheduler(core_id).UpdateHighestPriorityThread( | 306 | kernel.Scheduler(core_id).UpdateHighestPriorityThread( |
| @@ -200,24 +317,210 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | |||
| 200 | return cores_needing_scheduling; | 317 | return cores_needing_scheduling; |
| 201 | } | 318 | } |
| 202 | 319 | ||
| 320 | void KScheduler::SwitchThread(KThread* next_thread) { | ||
| 321 | KProcess* const cur_process = kernel.CurrentProcess(); | ||
| 322 | KThread* const cur_thread = GetCurrentThreadPointer(kernel); | ||
| 323 | |||
| 324 | // We never want to schedule a null thread, so use the idle thread if we don't have a next. | ||
| 325 | if (next_thread == nullptr) { | ||
| 326 | next_thread = m_idle_thread; | ||
| 327 | } | ||
| 328 | |||
| 329 | if (next_thread->GetCurrentCore() != m_core_id) { | ||
| 330 | next_thread->SetCurrentCore(m_core_id); | ||
| 331 | } | ||
| 332 | |||
| 333 | // If we're not actually switching thread, there's nothing to do. | ||
| 334 | if (next_thread == cur_thread) { | ||
| 335 | return; | ||
| 336 | } | ||
| 337 | |||
| 338 | // Next thread is now known not to be nullptr, and must not be dispatchable. | ||
| 339 | ASSERT(next_thread->GetDisableDispatchCount() == 1); | ||
| 340 | ASSERT(!next_thread->IsDummyThread()); | ||
| 341 | |||
| 342 | // Update the CPU time tracking variables. | ||
| 343 | const s64 prev_tick = m_last_context_switch_time; | ||
| 344 | const s64 cur_tick = kernel.System().CoreTiming().GetCPUTicks(); | ||
| 345 | const s64 tick_diff = cur_tick - prev_tick; | ||
| 346 | cur_thread->AddCpuTime(m_core_id, tick_diff); | ||
| 347 | if (cur_process != nullptr) { | ||
| 348 | cur_process->UpdateCPUTimeTicks(tick_diff); | ||
| 349 | } | ||
| 350 | m_last_context_switch_time = cur_tick; | ||
| 351 | |||
| 352 | // Update our previous thread. | ||
| 353 | if (cur_process != nullptr) { | ||
| 354 | if (!cur_thread->IsTerminationRequested() && cur_thread->GetActiveCore() == m_core_id) | ||
| 355 | [[likely]] { | ||
| 356 | m_state.prev_thread = cur_thread; | ||
| 357 | } else { | ||
| 358 | m_state.prev_thread = nullptr; | ||
| 359 | } | ||
| 360 | } | ||
| 361 | |||
| 362 | // Switch the current process, if we're switching processes. | ||
| 363 | // if (KProcess *next_process = next_thread->GetOwnerProcess(); next_process != cur_process) { | ||
| 364 | // KProcess::Switch(cur_process, next_process); | ||
| 365 | // } | ||
| 366 | |||
| 367 | // Set the new thread. | ||
| 368 | SetCurrentThread(kernel, next_thread); | ||
| 369 | m_current_thread = next_thread; | ||
| 370 | |||
| 371 | // Set the new Thread Local region. | ||
| 372 | // cpu::SwitchThreadLocalRegion(GetInteger(next_thread->GetThreadLocalRegionAddress())); | ||
| 373 | } | ||
| 374 | |||
| 375 | void KScheduler::ScheduleImpl() { | ||
| 376 | // First, clear the needs scheduling bool. | ||
| 377 | m_state.needs_scheduling.store(false, std::memory_order_seq_cst); | ||
| 378 | |||
| 379 | // Load the appropriate thread pointers for scheduling. | ||
| 380 | KThread* const cur_thread{GetCurrentThreadPointer(kernel)}; | ||
| 381 | KThread* highest_priority_thread{m_state.highest_priority_thread}; | ||
| 382 | |||
| 383 | // Check whether there are runnable interrupt tasks. | ||
| 384 | if (m_state.interrupt_task_runnable) { | ||
| 385 | // The interrupt task is runnable. | ||
| 386 | // We want to switch to the interrupt task/idle thread. | ||
| 387 | highest_priority_thread = nullptr; | ||
| 388 | } | ||
| 389 | |||
| 390 | // If there aren't, we want to check if the highest priority thread is the same as the current | ||
| 391 | // thread. | ||
| 392 | if (highest_priority_thread == cur_thread) { | ||
| 393 | // If they're the same, then we can just return. | ||
| 394 | return; | ||
| 395 | } | ||
| 396 | |||
| 397 | // The highest priority thread is not the same as the current thread. | ||
| 398 | // Jump to the switcher and continue executing from there. | ||
| 399 | m_switch_cur_thread = cur_thread; | ||
| 400 | m_switch_highest_priority_thread = highest_priority_thread; | ||
| 401 | m_switch_from_schedule = true; | ||
| 402 | Common::Fiber::YieldTo(cur_thread->host_context, *m_switch_fiber); | ||
| 403 | |||
| 404 | // Returning from ScheduleImpl occurs after this thread has been scheduled again. | ||
| 405 | } | ||
| 406 | |||
| 407 | void KScheduler::ScheduleImplFiber() { | ||
| 408 | KThread* const cur_thread{m_switch_cur_thread}; | ||
| 409 | KThread* highest_priority_thread{m_switch_highest_priority_thread}; | ||
| 410 | |||
| 411 | // If we're not coming from scheduling (i.e., we came from SC preemption), | ||
| 412 | // we should restart the scheduling loop directly. Not accurate to HOS. | ||
| 413 | if (!m_switch_from_schedule) { | ||
| 414 | goto retry; | ||
| 415 | } | ||
| 416 | |||
| 417 | // Mark that we are not coming from scheduling anymore. | ||
| 418 | m_switch_from_schedule = false; | ||
| 419 | |||
| 420 | // Save the original thread context. | ||
| 421 | Unload(cur_thread); | ||
| 422 | |||
| 423 | // The current thread's context has been entirely taken care of. | ||
| 424 | // Now we want to loop until we successfully switch the thread context. | ||
| 425 | while (true) { | ||
| 426 | // We're starting to try to do the context switch. | ||
| 427 | // Check if the highest priority thread is null. | ||
| 428 | if (!highest_priority_thread) { | ||
| 429 | // The next thread is nullptr! | ||
| 430 | |||
| 431 | // Switch to the idle thread. Note: HOS treats idling as a special case for | ||
| 432 | // performance. This is not *required* for yuzu's purposes, and for singlecore | ||
| 433 | // compatibility, we can just move the logic that would go here into the execution | ||
| 434 | // of the idle thread. If we ever remove singlecore, we should implement this | ||
| 435 | // accurately to HOS. | ||
| 436 | highest_priority_thread = m_idle_thread; | ||
| 437 | } | ||
| 438 | |||
| 439 | // We want to try to lock the highest priority thread's context. | ||
| 440 | // Try to take it. | ||
| 441 | while (!highest_priority_thread->context_guard.try_lock()) { | ||
| 442 | // The highest priority thread's context is already locked. | ||
| 443 | // Check if we need scheduling. If we don't, we can retry directly. | ||
| 444 | if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { | ||
| 445 | // If we do, another core is interfering, and we must start again. | ||
| 446 | goto retry; | ||
| 447 | } | ||
| 448 | } | ||
| 449 | |||
| 450 | // It's time to switch the thread. | ||
| 451 | // Switch to the highest priority thread. | ||
| 452 | SwitchThread(highest_priority_thread); | ||
| 453 | |||
| 454 | // Check if we need scheduling. If we do, then we can't complete the switch and should | ||
| 455 | // retry. | ||
| 456 | if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { | ||
| 457 | // Our switch failed. | ||
| 458 | // We should unlock the thread context, and then retry. | ||
| 459 | highest_priority_thread->context_guard.unlock(); | ||
| 460 | goto retry; | ||
| 461 | } else { | ||
| 462 | break; | ||
| 463 | } | ||
| 464 | |||
| 465 | retry: | ||
| 466 | |||
| 467 | // We failed to successfully do the context switch, and need to retry. | ||
| 468 | // Clear needs_scheduling. | ||
| 469 | m_state.needs_scheduling.store(false, std::memory_order_seq_cst); | ||
| 470 | |||
| 471 | // Refresh the highest priority thread. | ||
| 472 | highest_priority_thread = m_state.highest_priority_thread; | ||
| 473 | } | ||
| 474 | |||
| 475 | // Reload the guest thread context. | ||
| 476 | Reload(highest_priority_thread); | ||
| 477 | |||
| 478 | // Reload the host thread. | ||
| 479 | Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->host_context); | ||
| 480 | } | ||
| 481 | |||
| 482 | void KScheduler::Unload(KThread* thread) { | ||
| 483 | auto& cpu_core = kernel.System().ArmInterface(m_core_id); | ||
| 484 | cpu_core.SaveContext(thread->GetContext32()); | ||
| 485 | cpu_core.SaveContext(thread->GetContext64()); | ||
| 486 | // Save the TPIDR_EL0 system register in case it was modified. | ||
| 487 | thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | ||
| 488 | cpu_core.ClearExclusiveState(); | ||
| 489 | |||
| 490 | // Check if the thread is terminated by checking the DPC flags. | ||
| 491 | if ((thread->GetStackParameters().dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) { | ||
| 492 | // The thread isn't terminated, so we want to unlock it. | ||
| 493 | thread->context_guard.unlock(); | ||
| 494 | } | ||
| 495 | } | ||
| 496 | |||
| 497 | void KScheduler::Reload(KThread* thread) { | ||
| 498 | auto& cpu_core = kernel.System().ArmInterface(m_core_id); | ||
| 499 | cpu_core.LoadContext(thread->GetContext32()); | ||
| 500 | cpu_core.LoadContext(thread->GetContext64()); | ||
| 501 | cpu_core.SetTlsAddress(thread->GetTLSAddress()); | ||
| 502 | cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); | ||
| 503 | cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints()); | ||
| 504 | cpu_core.ClearExclusiveState(); | ||
| 505 | } | ||
| 506 | |||
| 203 | void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) { | 507 | void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) { |
| 204 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | 508 | ASSERT(IsSchedulerLockedByCurrentThread(kernel)); |
| 205 | for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; ++i) { | 509 | for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; ++i) { |
| 206 | // Get an atomic reference to the core scheduler's previous thread. | 510 | // Get an atomic reference to the core scheduler's previous thread. |
| 207 | std::atomic_ref<KThread*> prev_thread(kernel.Scheduler(static_cast<s32>(i)).prev_thread); | 511 | auto& prev_thread{kernel.Scheduler(i).m_state.prev_thread}; |
| 208 | static_assert(std::atomic_ref<KThread*>::is_always_lock_free); | ||
| 209 | 512 | ||
| 210 | // Atomically clear the previous thread if it's our target. | 513 | // Atomically clear the previous thread if it's our target. |
| 211 | KThread* compare = thread; | 514 | KThread* compare = thread; |
| 212 | prev_thread.compare_exchange_strong(compare, nullptr); | 515 | prev_thread.compare_exchange_strong(compare, nullptr, std::memory_order_seq_cst); |
| 213 | } | 516 | } |
| 214 | } | 517 | } |
| 215 | 518 | ||
| 216 | void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state) { | 519 | void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state) { |
| 217 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | 520 | ASSERT(IsSchedulerLockedByCurrentThread(kernel)); |
| 218 | 521 | ||
| 219 | // Check if the state has changed, because if it hasn't there's nothing to do. | 522 | // Check if the state has changed, because if it hasn't there's nothing to do. |
| 220 | const auto cur_state = thread->GetRawState(); | 523 | const ThreadState cur_state = thread->GetRawState(); |
| 221 | if (cur_state == old_state) { | 524 | if (cur_state == old_state) { |
| 222 | return; | 525 | return; |
| 223 | } | 526 | } |
| @@ -237,12 +540,12 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, Threa | |||
| 237 | } | 540 | } |
| 238 | 541 | ||
| 239 | void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority) { | 542 | void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority) { |
| 240 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | 543 | ASSERT(IsSchedulerLockedByCurrentThread(kernel)); |
| 241 | 544 | ||
| 242 | // If the thread is runnable, we want to change its priority in the queue. | 545 | // If the thread is runnable, we want to change its priority in the queue. |
| 243 | if (thread->GetRawState() == ThreadState::Runnable) { | 546 | if (thread->GetRawState() == ThreadState::Runnable) { |
| 244 | GetPriorityQueue(kernel).ChangePriority(old_priority, | 547 | GetPriorityQueue(kernel).ChangePriority(old_priority, |
| 245 | thread == kernel.GetCurrentEmuThread(), thread); | 548 | thread == GetCurrentThreadPointer(kernel), thread); |
| 246 | IncrementScheduledCount(thread); | 549 | IncrementScheduledCount(thread); |
| 247 | SetSchedulerUpdateNeeded(kernel); | 550 | SetSchedulerUpdateNeeded(kernel); |
| 248 | } | 551 | } |
| @@ -250,7 +553,7 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s3 | |||
| 250 | 553 | ||
| 251 | void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread, | 554 | void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread, |
| 252 | const KAffinityMask& old_affinity, s32 old_core) { | 555 | const KAffinityMask& old_affinity, s32 old_core) { |
| 253 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | 556 | ASSERT(IsSchedulerLockedByCurrentThread(kernel)); |
| 254 | 557 | ||
| 255 | // If the thread is runnable, we want to change its affinity in the queue. | 558 | // If the thread is runnable, we want to change its affinity in the queue. |
| 256 | if (thread->GetRawState() == ThreadState::Runnable) { | 559 | if (thread->GetRawState() == ThreadState::Runnable) { |
| @@ -260,15 +563,14 @@ void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread | |||
| 260 | } | 563 | } |
| 261 | } | 564 | } |
| 262 | 565 | ||
| 263 | void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | 566 | void KScheduler::RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 priority) { |
| 264 | ASSERT(system.GlobalSchedulerContext().IsLocked()); | 567 | ASSERT(IsSchedulerLockedByCurrentThread(kernel)); |
| 265 | 568 | ||
| 266 | // Get a reference to the priority queue. | 569 | // Get a reference to the priority queue. |
| 267 | auto& kernel = system.Kernel(); | ||
| 268 | auto& priority_queue = GetPriorityQueue(kernel); | 570 | auto& priority_queue = GetPriorityQueue(kernel); |
| 269 | 571 | ||
| 270 | // Rotate the front of the queue to the end. | 572 | // Rotate the front of the queue to the end. |
| 271 | KThread* top_thread = priority_queue.GetScheduledFront(cpu_core_id, priority); | 573 | KThread* top_thread = priority_queue.GetScheduledFront(core_id, priority); |
| 272 | KThread* next_thread = nullptr; | 574 | KThread* next_thread = nullptr; |
| 273 | if (top_thread != nullptr) { | 575 | if (top_thread != nullptr) { |
| 274 | next_thread = priority_queue.MoveToScheduledBack(top_thread); | 576 | next_thread = priority_queue.MoveToScheduledBack(top_thread); |
| @@ -280,7 +582,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | |||
| 280 | 582 | ||
| 281 | // While we have a suggested thread, try to migrate it! | 583 | // While we have a suggested thread, try to migrate it! |
| 282 | { | 584 | { |
| 283 | KThread* suggested = priority_queue.GetSuggestedFront(cpu_core_id, priority); | 585 | KThread* suggested = priority_queue.GetSuggestedFront(core_id, priority); |
| 284 | while (suggested != nullptr) { | 586 | while (suggested != nullptr) { |
| 285 | // Check if the suggested thread is the top thread on its core. | 587 | // Check if the suggested thread is the top thread on its core. |
| 286 | const s32 suggested_core = suggested->GetActiveCore(); | 588 | const s32 suggested_core = suggested->GetActiveCore(); |
| @@ -301,7 +603,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | |||
| 301 | // to the front of the queue. | 603 | // to the front of the queue. |
| 302 | if (top_on_suggested_core == nullptr || | 604 | if (top_on_suggested_core == nullptr || |
| 303 | top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { | 605 | top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { |
| 304 | suggested->SetActiveCore(cpu_core_id); | 606 | suggested->SetActiveCore(core_id); |
| 305 | priority_queue.ChangeCore(suggested_core, suggested, true); | 607 | priority_queue.ChangeCore(suggested_core, suggested, true); |
| 306 | IncrementScheduledCount(suggested); | 608 | IncrementScheduledCount(suggested); |
| 307 | break; | 609 | break; |
| @@ -309,22 +611,21 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | |||
| 309 | } | 611 | } |
| 310 | 612 | ||
| 311 | // Get the next suggestion. | 613 | // Get the next suggestion. |
| 312 | suggested = priority_queue.GetSamePriorityNext(cpu_core_id, suggested); | 614 | suggested = priority_queue.GetSamePriorityNext(core_id, suggested); |
| 313 | } | 615 | } |
| 314 | } | 616 | } |
| 315 | 617 | ||
| 316 | // Now that we might have migrated a thread with the same priority, check if we can do better. | 618 | // Now that we might have migrated a thread with the same priority, check if we can do better. |
| 317 | |||
| 318 | { | 619 | { |
| 319 | KThread* best_thread = priority_queue.GetScheduledFront(cpu_core_id); | 620 | KThread* best_thread = priority_queue.GetScheduledFront(core_id); |
| 320 | if (best_thread == GetCurrentThreadPointer(kernel)) { | 621 | if (best_thread == GetCurrentThreadPointer(kernel)) { |
| 321 | best_thread = priority_queue.GetScheduledNext(cpu_core_id, best_thread); | 622 | best_thread = priority_queue.GetScheduledNext(core_id, best_thread); |
| 322 | } | 623 | } |
| 323 | 624 | ||
| 324 | // If the best thread we can choose has a priority the same or worse than ours, try to | 625 | // If the best thread we can choose has a priority the same or worse than ours, try to |
| 325 | // migrate a higher priority thread. | 626 | // migrate a higher priority thread. |
| 326 | if (best_thread != nullptr && best_thread->GetPriority() >= priority) { | 627 | if (best_thread != nullptr && best_thread->GetPriority() >= priority) { |
| 327 | KThread* suggested = priority_queue.GetSuggestedFront(cpu_core_id); | 628 | KThread* suggested = priority_queue.GetSuggestedFront(core_id); |
| 328 | while (suggested != nullptr) { | 629 | while (suggested != nullptr) { |
| 329 | // If the suggestion's priority is the same as ours, don't bother. | 630 | // If the suggestion's priority is the same as ours, don't bother. |
| 330 | if (suggested->GetPriority() >= best_thread->GetPriority()) { | 631 | if (suggested->GetPriority() >= best_thread->GetPriority()) { |
| @@ -343,7 +644,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | |||
| 343 | if (top_on_suggested_core == nullptr || | 644 | if (top_on_suggested_core == nullptr || |
| 344 | top_on_suggested_core->GetPriority() >= | 645 | top_on_suggested_core->GetPriority() >= |
| 345 | HighestCoreMigrationAllowedPriority) { | 646 | HighestCoreMigrationAllowedPriority) { |
| 346 | suggested->SetActiveCore(cpu_core_id); | 647 | suggested->SetActiveCore(core_id); |
| 347 | priority_queue.ChangeCore(suggested_core, suggested, true); | 648 | priority_queue.ChangeCore(suggested_core, suggested, true); |
| 348 | IncrementScheduledCount(suggested); | 649 | IncrementScheduledCount(suggested); |
| 349 | break; | 650 | break; |
| @@ -351,7 +652,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | |||
| 351 | } | 652 | } |
| 352 | 653 | ||
| 353 | // Get the next suggestion. | 654 | // Get the next suggestion. |
| 354 | suggested = priority_queue.GetSuggestedNext(cpu_core_id, suggested); | 655 | suggested = priority_queue.GetSuggestedNext(core_id, suggested); |
| 355 | } | 656 | } |
| 356 | } | 657 | } |
| 357 | } | 658 | } |
| @@ -360,64 +661,6 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | |||
| 360 | SetSchedulerUpdateNeeded(kernel); | 661 | SetSchedulerUpdateNeeded(kernel); |
| 361 | } | 662 | } |
| 362 | 663 | ||
| 363 | bool KScheduler::CanSchedule(KernelCore& kernel) { | ||
| 364 | return kernel.GetCurrentEmuThread()->GetDisableDispatchCount() <= 1; | ||
| 365 | } | ||
| 366 | |||
| 367 | bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) { | ||
| 368 | return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire); | ||
| 369 | } | ||
| 370 | |||
| 371 | void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) { | ||
| 372 | kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release); | ||
| 373 | } | ||
| 374 | |||
| 375 | void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { | ||
| 376 | kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release); | ||
| 377 | } | ||
| 378 | |||
| 379 | void KScheduler::DisableScheduling(KernelCore& kernel) { | ||
| 380 | // If we are shutting down the kernel, none of this is relevant anymore. | ||
| 381 | if (kernel.IsShuttingDown()) { | ||
| 382 | return; | ||
| 383 | } | ||
| 384 | |||
| 385 | ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0); | ||
| 386 | GetCurrentThreadPointer(kernel)->DisableDispatch(); | ||
| 387 | } | ||
| 388 | |||
| 389 | void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { | ||
| 390 | // If we are shutting down the kernel, none of this is relevant anymore. | ||
| 391 | if (kernel.IsShuttingDown()) { | ||
| 392 | return; | ||
| 393 | } | ||
| 394 | |||
| 395 | auto* current_thread = GetCurrentThreadPointer(kernel); | ||
| 396 | |||
| 397 | ASSERT(current_thread->GetDisableDispatchCount() >= 1); | ||
| 398 | |||
| 399 | if (current_thread->GetDisableDispatchCount() > 1) { | ||
| 400 | current_thread->EnableDispatch(); | ||
| 401 | } else { | ||
| 402 | RescheduleCores(kernel, cores_needing_scheduling); | ||
| 403 | } | ||
| 404 | |||
| 405 | // Special case to ensure dummy threads that are waiting block. | ||
| 406 | current_thread->IfDummyThreadTryWait(); | ||
| 407 | } | ||
| 408 | |||
| 409 | u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { | ||
| 410 | if (IsSchedulerUpdateNeeded(kernel)) { | ||
| 411 | return UpdateHighestPriorityThreadsImpl(kernel); | ||
| 412 | } else { | ||
| 413 | return 0; | ||
| 414 | } | ||
| 415 | } | ||
| 416 | |||
| 417 | KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) { | ||
| 418 | return kernel.GlobalSchedulerContext().priority_queue; | ||
| 419 | } | ||
| 420 | |||
| 421 | void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) { | 664 | void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) { |
| 422 | // Validate preconditions. | 665 | // Validate preconditions. |
| 423 | ASSERT(CanSchedule(kernel)); | 666 | ASSERT(CanSchedule(kernel)); |
| @@ -437,7 +680,7 @@ void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) { | |||
| 437 | 680 | ||
| 438 | // Perform the yield. | 681 | // Perform the yield. |
| 439 | { | 682 | { |
| 440 | KScopedSchedulerLock lock(kernel); | 683 | KScopedSchedulerLock sl{kernel}; |
| 441 | 684 | ||
| 442 | const auto cur_state = cur_thread.GetRawState(); | 685 | const auto cur_state = cur_thread.GetRawState(); |
| 443 | if (cur_state == ThreadState::Runnable) { | 686 | if (cur_state == ThreadState::Runnable) { |
| @@ -476,7 +719,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) { | |||
| 476 | 719 | ||
| 477 | // Perform the yield. | 720 | // Perform the yield. |
| 478 | { | 721 | { |
| 479 | KScopedSchedulerLock lock(kernel); | 722 | KScopedSchedulerLock sl{kernel}; |
| 480 | 723 | ||
| 481 | const auto cur_state = cur_thread.GetRawState(); | 724 | const auto cur_state = cur_thread.GetRawState(); |
| 482 | if (cur_state == ThreadState::Runnable) { | 725 | if (cur_state == ThreadState::Runnable) { |
| @@ -496,7 +739,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) { | |||
| 496 | 739 | ||
| 497 | if (KThread* running_on_suggested_core = | 740 | if (KThread* running_on_suggested_core = |
| 498 | (suggested_core >= 0) | 741 | (suggested_core >= 0) |
| 499 | ? kernel.Scheduler(suggested_core).state.highest_priority_thread | 742 | ? kernel.Scheduler(suggested_core).m_state.highest_priority_thread |
| 500 | : nullptr; | 743 | : nullptr; |
| 501 | running_on_suggested_core != suggested) { | 744 | running_on_suggested_core != suggested) { |
| 502 | // If the current thread's priority is higher than our suggestion's we prefer | 745 | // If the current thread's priority is higher than our suggestion's we prefer |
| @@ -564,7 +807,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) { | |||
| 564 | 807 | ||
| 565 | // Perform the yield. | 808 | // Perform the yield. |
| 566 | { | 809 | { |
| 567 | KScopedSchedulerLock lock(kernel); | 810 | KScopedSchedulerLock sl{kernel}; |
| 568 | 811 | ||
| 569 | const auto cur_state = cur_thread.GetRawState(); | 812 | const auto cur_state = cur_thread.GetRawState(); |
| 570 | if (cur_state == ThreadState::Runnable) { | 813 | if (cur_state == ThreadState::Runnable) { |
| @@ -621,223 +864,19 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) { | |||
| 621 | } | 864 | } |
| 622 | } | 865 | } |
| 623 | 866 | ||
| 624 | KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, core_id{core_id_} { | 867 | void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) { |
| 625 | switch_fiber = std::make_shared<Common::Fiber>([this] { SwitchToCurrent(); }); | 868 | if (const u64 core_mask = cores_needing_scheduling & ~(1ULL << m_core_id); core_mask != 0) { |
| 626 | state.needs_scheduling.store(true); | 869 | RescheduleCores(kernel, core_mask); |
| 627 | state.interrupt_task_thread_runnable = false; | ||
| 628 | state.should_count_idle = false; | ||
| 629 | state.idle_count = 0; | ||
| 630 | state.idle_thread_stack = nullptr; | ||
| 631 | state.highest_priority_thread = nullptr; | ||
| 632 | } | ||
| 633 | |||
| 634 | void KScheduler::Finalize() { | ||
| 635 | if (idle_thread) { | ||
| 636 | idle_thread->Close(); | ||
| 637 | idle_thread = nullptr; | ||
| 638 | } | ||
| 639 | } | ||
| 640 | |||
| 641 | KScheduler::~KScheduler() { | ||
| 642 | ASSERT(!idle_thread); | ||
| 643 | } | ||
| 644 | |||
| 645 | KThread* KScheduler::GetSchedulerCurrentThread() const { | ||
| 646 | if (auto result = current_thread.load(); result) { | ||
| 647 | return result; | ||
| 648 | } | 870 | } |
| 649 | return idle_thread; | ||
| 650 | } | ||
| 651 | |||
| 652 | u64 KScheduler::GetLastContextSwitchTicks() const { | ||
| 653 | return last_context_switch_time; | ||
| 654 | } | 871 | } |
| 655 | 872 | ||
| 656 | void KScheduler::RescheduleCurrentCore() { | 873 | void KScheduler::RescheduleCores(KernelCore& kernel, u64 core_mask) { |
| 657 | ASSERT(GetCurrentThread(system.Kernel()).GetDisableDispatchCount() == 1); | 874 | // Send IPI |
| 658 | 875 | for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | |
| 659 | auto& phys_core = system.Kernel().PhysicalCore(core_id); | 876 | if (core_mask & (1ULL << i)) { |
| 660 | if (phys_core.IsInterrupted()) { | 877 | kernel.PhysicalCore(i).Interrupt(); |
| 661 | phys_core.ClearInterrupt(); | ||
| 662 | } | ||
| 663 | |||
| 664 | guard.Lock(); | ||
| 665 | if (state.needs_scheduling.load()) { | ||
| 666 | Schedule(); | ||
| 667 | } else { | ||
| 668 | GetCurrentThread(system.Kernel()).EnableDispatch(); | ||
| 669 | guard.Unlock(); | ||
| 670 | } | ||
| 671 | } | ||
| 672 | |||
| 673 | void KScheduler::OnThreadStart() { | ||
| 674 | SwitchContextStep2(); | ||
| 675 | } | ||
| 676 | |||
| 677 | void KScheduler::Unload(KThread* thread) { | ||
| 678 | ASSERT(thread); | ||
| 679 | |||
| 680 | LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); | ||
| 681 | |||
| 682 | if (thread->IsCallingSvc()) { | ||
| 683 | thread->ClearIsCallingSvc(); | ||
| 684 | } | ||
| 685 | |||
| 686 | auto& physical_core = system.Kernel().PhysicalCore(core_id); | ||
| 687 | if (!physical_core.IsInitialized()) { | ||
| 688 | return; | ||
| 689 | } | ||
| 690 | |||
| 691 | Core::ARM_Interface& cpu_core = physical_core.ArmInterface(); | ||
| 692 | cpu_core.SaveContext(thread->GetContext32()); | ||
| 693 | cpu_core.SaveContext(thread->GetContext64()); | ||
| 694 | // Save the TPIDR_EL0 system register in case it was modified. | ||
| 695 | thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | ||
| 696 | cpu_core.ClearExclusiveState(); | ||
| 697 | |||
| 698 | if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) { | ||
| 699 | prev_thread = thread; | ||
| 700 | } else { | ||
| 701 | prev_thread = nullptr; | ||
| 702 | } | ||
| 703 | |||
| 704 | thread->context_guard.unlock(); | ||
| 705 | } | ||
| 706 | |||
| 707 | void KScheduler::Reload(KThread* thread) { | ||
| 708 | LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread->GetName()); | ||
| 709 | |||
| 710 | Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); | ||
| 711 | cpu_core.LoadContext(thread->GetContext32()); | ||
| 712 | cpu_core.LoadContext(thread->GetContext64()); | ||
| 713 | cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints()); | ||
| 714 | cpu_core.SetTlsAddress(thread->GetTLSAddress()); | ||
| 715 | cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); | ||
| 716 | cpu_core.ClearExclusiveState(); | ||
| 717 | } | ||
| 718 | |||
| 719 | void KScheduler::SwitchContextStep2() { | ||
| 720 | // Load context of new thread | ||
| 721 | Reload(GetCurrentThreadPointer(system.Kernel())); | ||
| 722 | |||
| 723 | RescheduleCurrentCore(); | ||
| 724 | } | ||
| 725 | |||
| 726 | void KScheduler::Schedule() { | ||
| 727 | ASSERT(GetCurrentThread(system.Kernel()).GetDisableDispatchCount() == 1); | ||
| 728 | this->ScheduleImpl(); | ||
| 729 | } | ||
| 730 | |||
| 731 | void KScheduler::ScheduleImpl() { | ||
| 732 | KThread* previous_thread = GetCurrentThreadPointer(system.Kernel()); | ||
| 733 | KThread* next_thread = state.highest_priority_thread; | ||
| 734 | |||
| 735 | state.needs_scheduling.store(false); | ||
| 736 | |||
| 737 | // We never want to schedule a null thread, so use the idle thread if we don't have a next. | ||
| 738 | if (next_thread == nullptr) { | ||
| 739 | next_thread = idle_thread; | ||
| 740 | } | ||
| 741 | |||
| 742 | if (next_thread->GetCurrentCore() != core_id) { | ||
| 743 | next_thread->SetCurrentCore(core_id); | ||
| 744 | } | ||
| 745 | |||
| 746 | // We never want to schedule a dummy thread, as these are only used by host threads for locking. | ||
| 747 | if (next_thread->GetThreadType() == ThreadType::Dummy) { | ||
| 748 | ASSERT_MSG(false, "Dummy threads should never be scheduled!"); | ||
| 749 | next_thread = idle_thread; | ||
| 750 | } | ||
| 751 | |||
| 752 | // If we're not actually switching thread, there's nothing to do. | ||
| 753 | if (next_thread == current_thread.load()) { | ||
| 754 | previous_thread->EnableDispatch(); | ||
| 755 | guard.Unlock(); | ||
| 756 | return; | ||
| 757 | } | ||
| 758 | |||
| 759 | // Update the CPU time tracking variables. | ||
| 760 | KProcess* const previous_process = system.Kernel().CurrentProcess(); | ||
| 761 | UpdateLastContextSwitchTime(previous_thread, previous_process); | ||
| 762 | |||
| 763 | // Save context for previous thread | ||
| 764 | Unload(previous_thread); | ||
| 765 | |||
| 766 | std::shared_ptr<Common::Fiber>* old_context; | ||
| 767 | old_context = &previous_thread->GetHostContext(); | ||
| 768 | |||
| 769 | // Set the new thread. | ||
| 770 | SetCurrentThread(system.Kernel(), next_thread); | ||
| 771 | current_thread.store(next_thread); | ||
| 772 | |||
| 773 | guard.Unlock(); | ||
| 774 | |||
| 775 | Common::Fiber::YieldTo(*old_context, *switch_fiber); | ||
| 776 | /// When a thread wakes up, the scheduler may have changed to other in another core. | ||
| 777 | auto& next_scheduler = *system.Kernel().CurrentScheduler(); | ||
| 778 | next_scheduler.SwitchContextStep2(); | ||
| 779 | } | ||
| 780 | |||
| 781 | void KScheduler::SwitchToCurrent() { | ||
| 782 | while (true) { | ||
| 783 | { | ||
| 784 | KScopedSpinLock lk{guard}; | ||
| 785 | current_thread.store(state.highest_priority_thread); | ||
| 786 | state.needs_scheduling.store(false); | ||
| 787 | } | 878 | } |
| 788 | const auto is_switch_pending = [this] { | ||
| 789 | KScopedSpinLock lk{guard}; | ||
| 790 | return state.needs_scheduling.load(); | ||
| 791 | }; | ||
| 792 | do { | ||
| 793 | auto next_thread = current_thread.load(); | ||
| 794 | if (next_thread != nullptr) { | ||
| 795 | const auto locked = next_thread->context_guard.try_lock(); | ||
| 796 | if (state.needs_scheduling.load()) { | ||
| 797 | next_thread->context_guard.unlock(); | ||
| 798 | break; | ||
| 799 | } | ||
| 800 | if (next_thread->GetActiveCore() != core_id) { | ||
| 801 | next_thread->context_guard.unlock(); | ||
| 802 | break; | ||
| 803 | } | ||
| 804 | if (!locked) { | ||
| 805 | continue; | ||
| 806 | } | ||
| 807 | } | ||
| 808 | auto thread = next_thread ? next_thread : idle_thread; | ||
| 809 | SetCurrentThread(system.Kernel(), thread); | ||
| 810 | Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext()); | ||
| 811 | } while (!is_switch_pending()); | ||
| 812 | } | 879 | } |
| 813 | } | 880 | } |
| 814 | 881 | ||
| 815 | void KScheduler::UpdateLastContextSwitchTime(KThread* thread, KProcess* process) { | ||
| 816 | const u64 prev_switch_ticks = last_context_switch_time; | ||
| 817 | const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks(); | ||
| 818 | const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; | ||
| 819 | |||
| 820 | if (thread != nullptr) { | ||
| 821 | thread->AddCpuTime(core_id, update_ticks); | ||
| 822 | } | ||
| 823 | |||
| 824 | if (process != nullptr) { | ||
| 825 | process->UpdateCPUTimeTicks(update_ticks); | ||
| 826 | } | ||
| 827 | |||
| 828 | last_context_switch_time = most_recent_switch_ticks; | ||
| 829 | } | ||
| 830 | |||
| 831 | void KScheduler::Initialize() { | ||
| 832 | idle_thread = KThread::Create(system.Kernel()); | ||
| 833 | ASSERT(KThread::InitializeIdleThread(system, idle_thread, core_id).IsSuccess()); | ||
| 834 | idle_thread->SetName(fmt::format("IdleThread:{}", core_id)); | ||
| 835 | idle_thread->EnableDispatch(); | ||
| 836 | } | ||
| 837 | |||
| 838 | KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel) | ||
| 839 | : KScopedLock(kernel.GlobalSchedulerContext().SchedulerLock()) {} | ||
| 840 | |||
| 841 | KScopedSchedulerLock::~KScopedSchedulerLock() = default; | ||
| 842 | |||
| 843 | } // namespace Kernel | 882 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 6a4760eca..534321d8d 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include "core/hle/kernel/k_scheduler_lock.h" | 11 | #include "core/hle/kernel/k_scheduler_lock.h" |
| 12 | #include "core/hle/kernel/k_scoped_lock.h" | 12 | #include "core/hle/kernel/k_scoped_lock.h" |
| 13 | #include "core/hle/kernel/k_spin_lock.h" | 13 | #include "core/hle/kernel/k_spin_lock.h" |
| 14 | #include "core/hle/kernel/k_thread.h" | ||
| 14 | 15 | ||
| 15 | namespace Common { | 16 | namespace Common { |
| 16 | class Fiber; | 17 | class Fiber; |
| @@ -23,184 +24,150 @@ class System; | |||
| 23 | namespace Kernel { | 24 | namespace Kernel { |
| 24 | 25 | ||
| 25 | class KernelCore; | 26 | class KernelCore; |
| 27 | class KInterruptTaskManager; | ||
| 26 | class KProcess; | 28 | class KProcess; |
| 27 | class SchedulerLock; | ||
| 28 | class KThread; | 29 | class KThread; |
| 30 | class KScopedDisableDispatch; | ||
| 31 | class KScopedSchedulerLock; | ||
| 32 | class KScopedSchedulerLockAndSleep; | ||
| 29 | 33 | ||
| 30 | class KScheduler final { | 34 | class KScheduler final { |
| 31 | public: | 35 | public: |
| 32 | explicit KScheduler(Core::System& system_, s32 core_id_); | 36 | YUZU_NON_COPYABLE(KScheduler); |
| 33 | ~KScheduler(); | 37 | YUZU_NON_MOVEABLE(KScheduler); |
| 34 | |||
| 35 | void Finalize(); | ||
| 36 | 38 | ||
| 37 | /// Reschedules to the next available thread (call after current thread is suspended) | 39 | using LockType = KAbstractSchedulerLock<KScheduler>; |
| 38 | void RescheduleCurrentCore(); | ||
| 39 | 40 | ||
| 40 | /// Reschedules cores pending reschedule, to be called on EnableScheduling. | 41 | explicit KScheduler(KernelCore& kernel); |
| 41 | static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule); | 42 | ~KScheduler(); |
| 42 | 43 | ||
| 43 | /// The next two are for SingleCore Only. | 44 | void Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id); |
| 44 | /// Unload current thread before preempting core. | 45 | void Activate(); |
| 46 | void OnThreadStart(); | ||
| 45 | void Unload(KThread* thread); | 47 | void Unload(KThread* thread); |
| 46 | |||
| 47 | /// Reload current thread after core preemption. | ||
| 48 | void Reload(KThread* thread); | 48 | void Reload(KThread* thread); |
| 49 | 49 | ||
| 50 | /// Gets the current running thread | 50 | void SetInterruptTaskRunnable(); |
| 51 | [[nodiscard]] KThread* GetSchedulerCurrentThread() const; | 51 | void RequestScheduleOnInterrupt(); |
| 52 | void PreemptSingleCore(); | ||
| 52 | 53 | ||
| 53 | /// Gets the idle thread | 54 | u64 GetIdleCount() { |
| 54 | [[nodiscard]] KThread* GetIdleThread() const { | 55 | return m_state.idle_count; |
| 55 | return idle_thread; | ||
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | /// Returns true if the scheduler is idle | 58 | KThread* GetIdleThread() const { |
| 59 | [[nodiscard]] bool IsIdle() const { | 59 | return m_idle_thread; |
| 60 | return GetSchedulerCurrentThread() == idle_thread; | ||
| 61 | } | 60 | } |
| 62 | 61 | ||
| 63 | /// Gets the timestamp for the last context switch in ticks. | 62 | bool IsIdle() const { |
| 64 | [[nodiscard]] u64 GetLastContextSwitchTicks() const; | 63 | return m_current_thread.load() == m_idle_thread; |
| 65 | |||
| 66 | [[nodiscard]] bool ContextSwitchPending() const { | ||
| 67 | return state.needs_scheduling.load(std::memory_order_relaxed); | ||
| 68 | } | 64 | } |
| 69 | 65 | ||
| 70 | void Initialize(); | 66 | KThread* GetPreviousThread() const { |
| 67 | return m_state.prev_thread; | ||
| 68 | } | ||
| 71 | 69 | ||
| 72 | void OnThreadStart(); | 70 | KThread* GetSchedulerCurrentThread() const { |
| 71 | return m_current_thread.load(); | ||
| 72 | } | ||
| 73 | 73 | ||
| 74 | [[nodiscard]] std::shared_ptr<Common::Fiber>& ControlContext() { | 74 | s64 GetLastContextSwitchTime() const { |
| 75 | return switch_fiber; | 75 | return m_last_context_switch_time; |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | [[nodiscard]] const std::shared_ptr<Common::Fiber>& ControlContext() const { | 78 | // Static public API. |
| 79 | return switch_fiber; | 79 | static bool CanSchedule(KernelCore& kernel) { |
| 80 | return GetCurrentThread(kernel).GetDisableDispatchCount() == 0; | ||
| 81 | } | ||
| 82 | static bool IsSchedulerLockedByCurrentThread(KernelCore& kernel) { | ||
| 83 | return kernel.GlobalSchedulerContext().scheduler_lock.IsLockedByCurrentThread(); | ||
| 80 | } | 84 | } |
| 81 | 85 | ||
| 82 | [[nodiscard]] u64 UpdateHighestPriorityThread(KThread* highest_thread); | 86 | static bool IsSchedulerUpdateNeeded(KernelCore& kernel) { |
| 87 | return kernel.GlobalSchedulerContext().scheduler_update_needed; | ||
| 88 | } | ||
| 89 | static void SetSchedulerUpdateNeeded(KernelCore& kernel) { | ||
| 90 | kernel.GlobalSchedulerContext().scheduler_update_needed = true; | ||
| 91 | } | ||
| 92 | static void ClearSchedulerUpdateNeeded(KernelCore& kernel) { | ||
| 93 | kernel.GlobalSchedulerContext().scheduler_update_needed = false; | ||
| 94 | } | ||
| 83 | 95 | ||
| 84 | /** | 96 | static void DisableScheduling(KernelCore& kernel); |
| 85 | * Takes a thread and moves it to the back of the it's priority list. | 97 | static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling); |
| 86 | * | ||
| 87 | * @note This operation can be redundant and no scheduling is changed if marked as so. | ||
| 88 | */ | ||
| 89 | static void YieldWithoutCoreMigration(KernelCore& kernel); | ||
| 90 | 98 | ||
| 91 | /** | 99 | static u64 UpdateHighestPriorityThreads(KernelCore& kernel); |
| 92 | * Takes a thread and moves it to the back of the it's priority list. | ||
| 93 | * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or | ||
| 94 | * a better priority than the next thread in the core. | ||
| 95 | * | ||
| 96 | * @note This operation can be redundant and no scheduling is changed if marked as so. | ||
| 97 | */ | ||
| 98 | static void YieldWithCoreMigration(KernelCore& kernel); | ||
| 99 | |||
| 100 | /** | ||
| 101 | * Takes a thread and moves it out of the scheduling queue. | ||
| 102 | * and into the suggested queue. If no thread can be scheduled afterwards in that core, | ||
| 103 | * a suggested thread is obtained instead. | ||
| 104 | * | ||
| 105 | * @note This operation can be redundant and no scheduling is changed if marked as so. | ||
| 106 | */ | ||
| 107 | static void YieldToAnyThread(KernelCore& kernel); | ||
| 108 | 100 | ||
| 109 | static void ClearPreviousThread(KernelCore& kernel, KThread* thread); | 101 | static void ClearPreviousThread(KernelCore& kernel, KThread* thread); |
| 110 | 102 | ||
| 111 | /// Notify the scheduler a thread's status has changed. | ||
| 112 | static void OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state); | 103 | static void OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state); |
| 113 | |||
| 114 | /// Notify the scheduler a thread's priority has changed. | ||
| 115 | static void OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority); | 104 | static void OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority); |
| 116 | |||
| 117 | /// Notify the scheduler a thread's core and/or affinity mask has changed. | ||
| 118 | static void OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread, | 105 | static void OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread, |
| 119 | const KAffinityMask& old_affinity, s32 old_core); | 106 | const KAffinityMask& old_affinity, s32 old_core); |
| 120 | 107 | ||
| 121 | static bool CanSchedule(KernelCore& kernel); | 108 | static void RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 priority); |
| 122 | static bool IsSchedulerUpdateNeeded(const KernelCore& kernel); | 109 | static void RescheduleCores(KernelCore& kernel, u64 cores_needing_scheduling); |
| 123 | static void SetSchedulerUpdateNeeded(KernelCore& kernel); | 110 | |
| 124 | static void ClearSchedulerUpdateNeeded(KernelCore& kernel); | 111 | static void YieldWithoutCoreMigration(KernelCore& kernel); |
| 125 | static void DisableScheduling(KernelCore& kernel); | 112 | static void YieldWithCoreMigration(KernelCore& kernel); |
| 126 | static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling); | 113 | static void YieldToAnyThread(KernelCore& kernel); |
| 127 | [[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel); | ||
| 128 | 114 | ||
| 129 | private: | 115 | private: |
| 130 | friend class GlobalSchedulerContext; | 116 | // Static private API. |
| 131 | 117 | static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel) { | |
| 132 | /** | 118 | return kernel.GlobalSchedulerContext().priority_queue; |
| 133 | * Takes care of selecting the new scheduled threads in three steps: | 119 | } |
| 134 | * | 120 | static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); |
| 135 | * 1. First a thread is selected from the top of the priority queue. If no thread | ||
| 136 | * is obtained then we move to step two, else we are done. | ||
| 137 | * | ||
| 138 | * 2. Second we try to get a suggested thread that's not assigned to any core or | ||
| 139 | * that is not the top thread in that core. | ||
| 140 | * | ||
| 141 | * 3. Third is no suggested thread is found, we do a second pass and pick a running | ||
| 142 | * thread in another core and swap it with its current thread. | ||
| 143 | * | ||
| 144 | * returns the cores needing scheduling. | ||
| 145 | */ | ||
| 146 | [[nodiscard]] static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); | ||
| 147 | |||
| 148 | [[nodiscard]] static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel); | ||
| 149 | |||
| 150 | void RotateScheduledQueue(s32 cpu_core_id, s32 priority); | ||
| 151 | 121 | ||
| 152 | void Schedule(); | 122 | static void RescheduleCurrentHLEThread(KernelCore& kernel); |
| 153 | 123 | ||
| 154 | /// Switches the CPU's active thread context to that of the specified thread | 124 | // Instanced private API. |
| 155 | void ScheduleImpl(); | 125 | void ScheduleImpl(); |
| 126 | void ScheduleImplFiber(); | ||
| 127 | void SwitchThread(KThread* next_thread); | ||
| 156 | 128 | ||
| 157 | /// When a thread wakes up, it must run this through it's new scheduler | 129 | void Schedule(); |
| 158 | void SwitchContextStep2(); | 130 | void ScheduleOnInterrupt(); |
| 159 | |||
| 160 | /** | ||
| 161 | * Called on every context switch to update the internal timestamp | ||
| 162 | * This also updates the running time ticks for the given thread and | ||
| 163 | * process using the following difference: | ||
| 164 | * | ||
| 165 | * ticks += most_recent_ticks - last_context_switch_ticks | ||
| 166 | * | ||
| 167 | * The internal tick timestamp for the scheduler is simply the | ||
| 168 | * most recent tick count retrieved. No special arithmetic is | ||
| 169 | * applied to it. | ||
| 170 | */ | ||
| 171 | void UpdateLastContextSwitchTime(KThread* thread, KProcess* process); | ||
| 172 | |||
| 173 | void SwitchToCurrent(); | ||
| 174 | 131 | ||
| 175 | KThread* prev_thread{}; | 132 | void RescheduleOtherCores(u64 cores_needing_scheduling); |
| 176 | std::atomic<KThread*> current_thread{}; | 133 | void RescheduleCurrentCore(); |
| 134 | void RescheduleCurrentCoreImpl(); | ||
| 177 | 135 | ||
| 178 | KThread* idle_thread{}; | 136 | u64 UpdateHighestPriorityThread(KThread* thread); |
| 179 | 137 | ||
| 180 | std::shared_ptr<Common::Fiber> switch_fiber{}; | 138 | private: |
| 139 | friend class KScopedDisableDispatch; | ||
| 181 | 140 | ||
| 182 | struct SchedulingState { | 141 | struct SchedulingState { |
| 183 | std::atomic<bool> needs_scheduling{}; | 142 | std::atomic<bool> needs_scheduling{false}; |
| 184 | bool interrupt_task_thread_runnable{}; | 143 | bool interrupt_task_runnable{false}; |
| 185 | bool should_count_idle{}; | 144 | bool should_count_idle{false}; |
| 186 | u64 idle_count{}; | 145 | u64 idle_count{0}; |
| 187 | KThread* highest_priority_thread{}; | 146 | KThread* highest_priority_thread{nullptr}; |
| 188 | void* idle_thread_stack{}; | 147 | void* idle_thread_stack{nullptr}; |
| 148 | std::atomic<KThread*> prev_thread{nullptr}; | ||
| 149 | KInterruptTaskManager* interrupt_task_manager{nullptr}; | ||
| 189 | }; | 150 | }; |
| 190 | 151 | ||
| 191 | SchedulingState state; | 152 | KernelCore& kernel; |
| 192 | 153 | SchedulingState m_state; | |
| 193 | Core::System& system; | 154 | bool m_is_active{false}; |
| 194 | u64 last_context_switch_time{}; | 155 | s32 m_core_id{0}; |
| 195 | const s32 core_id; | 156 | s64 m_last_context_switch_time{0}; |
| 196 | 157 | KThread* m_idle_thread{nullptr}; | |
| 197 | KSpinLock guard{}; | 158 | std::atomic<KThread*> m_current_thread{nullptr}; |
| 159 | |||
| 160 | std::shared_ptr<Common::Fiber> m_switch_fiber{}; | ||
| 161 | KThread* m_switch_cur_thread{}; | ||
| 162 | KThread* m_switch_highest_priority_thread{}; | ||
| 163 | bool m_switch_from_schedule{}; | ||
| 198 | }; | 164 | }; |
| 199 | 165 | ||
| 200 | class [[nodiscard]] KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> { | 166 | class KScopedSchedulerLock : public KScopedLock<KScheduler::LockType> { |
| 201 | public: | 167 | public: |
| 202 | explicit KScopedSchedulerLock(KernelCore& kernel); | 168 | explicit KScopedSchedulerLock(KernelCore& kernel) |
| 203 | ~KScopedSchedulerLock(); | 169 | : KScopedLock(kernel.GlobalSchedulerContext().scheduler_lock) {} |
| 170 | ~KScopedSchedulerLock() = default; | ||
| 204 | }; | 171 | }; |
| 205 | 172 | ||
| 206 | } // namespace Kernel | 173 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index 4fa256970..73314b45e 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h | |||
| @@ -5,9 +5,11 @@ | |||
| 5 | 5 | ||
| 6 | #include <atomic> | 6 | #include <atomic> |
| 7 | #include "common/assert.h" | 7 | #include "common/assert.h" |
| 8 | #include "core/hle/kernel/k_interrupt_manager.h" | ||
| 8 | #include "core/hle/kernel/k_spin_lock.h" | 9 | #include "core/hle/kernel/k_spin_lock.h" |
| 9 | #include "core/hle/kernel/k_thread.h" | 10 | #include "core/hle/kernel/k_thread.h" |
| 10 | #include "core/hle/kernel/kernel.h" | 11 | #include "core/hle/kernel/kernel.h" |
| 12 | #include "core/hle/kernel/physical_core.h" | ||
| 11 | 13 | ||
| 12 | namespace Kernel { | 14 | namespace Kernel { |
| 13 | 15 | ||
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 90de86770..174afc80d 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp | |||
| @@ -258,7 +258,18 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_ | |||
| 258 | } | 258 | } |
| 259 | 259 | ||
| 260 | Result KThread::InitializeDummyThread(KThread* thread) { | 260 | Result KThread::InitializeDummyThread(KThread* thread) { |
| 261 | return thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy); | 261 | // Initialize the thread. |
| 262 | R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy)); | ||
| 263 | |||
| 264 | // Initialize emulation parameters. | ||
| 265 | thread->stack_parameters.disable_count = 0; | ||
| 266 | |||
| 267 | return ResultSuccess; | ||
| 268 | } | ||
| 269 | |||
| 270 | Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) { | ||
| 271 | return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, | ||
| 272 | system.GetCpuManager().GetGuestActivateFunc()); | ||
| 262 | } | 273 | } |
| 263 | 274 | ||
| 264 | Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { | 275 | Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { |
| @@ -277,7 +288,7 @@ Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThr | |||
| 277 | KProcess* owner) { | 288 | KProcess* owner) { |
| 278 | system.Kernel().GlobalSchedulerContext().AddThread(thread); | 289 | system.Kernel().GlobalSchedulerContext().AddThread(thread); |
| 279 | return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, | 290 | return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, |
| 280 | ThreadType::User, system.GetCpuManager().GetGuestThreadStartFunc()); | 291 | ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()); |
| 281 | } | 292 | } |
| 282 | 293 | ||
| 283 | void KThread::PostDestroy(uintptr_t arg) { | 294 | void KThread::PostDestroy(uintptr_t arg) { |
| @@ -1058,6 +1069,8 @@ void KThread::Exit() { | |||
| 1058 | // Register the thread as a work task. | 1069 | // Register the thread as a work task. |
| 1059 | KWorkerTaskManager::AddTask(kernel, KWorkerTaskManager::WorkerType::Exit, this); | 1070 | KWorkerTaskManager::AddTask(kernel, KWorkerTaskManager::WorkerType::Exit, this); |
| 1060 | } | 1071 | } |
| 1072 | |||
| 1073 | UNREACHABLE_MSG("KThread::Exit() would return"); | ||
| 1061 | } | 1074 | } |
| 1062 | 1075 | ||
| 1063 | Result KThread::Sleep(s64 timeout) { | 1076 | Result KThread::Sleep(s64 timeout) { |
| @@ -1093,6 +1106,8 @@ void KThread::IfDummyThreadTryWait() { | |||
| 1093 | return; | 1106 | return; |
| 1094 | } | 1107 | } |
| 1095 | 1108 | ||
| 1109 | ASSERT(!kernel.IsPhantomModeForSingleCore()); | ||
| 1110 | |||
| 1096 | // Block until we are no longer waiting. | 1111 | // Block until we are no longer waiting. |
| 1097 | std::unique_lock lk(dummy_wait_lock); | 1112 | std::unique_lock lk(dummy_wait_lock); |
| 1098 | dummy_wait_cv.wait( | 1113 | dummy_wait_cv.wait( |
| @@ -1197,16 +1212,13 @@ KScopedDisableDispatch::~KScopedDisableDispatch() { | |||
| 1197 | return; | 1212 | return; |
| 1198 | } | 1213 | } |
| 1199 | 1214 | ||
| 1200 | // Skip the reschedule if single-core, as dispatch tracking is disabled here. | ||
| 1201 | if (!Settings::values.use_multi_core.GetValue()) { | ||
| 1202 | return; | ||
| 1203 | } | ||
| 1204 | |||
| 1205 | if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) { | 1215 | if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) { |
| 1206 | auto scheduler = kernel.CurrentScheduler(); | 1216 | auto* scheduler = kernel.CurrentScheduler(); |
| 1207 | 1217 | ||
| 1208 | if (scheduler) { | 1218 | if (scheduler && !kernel.IsPhantomModeForSingleCore()) { |
| 1209 | scheduler->RescheduleCurrentCore(); | 1219 | scheduler->RescheduleCurrentCore(); |
| 1220 | } else { | ||
| 1221 | KScheduler::RescheduleCurrentHLEThread(kernel); | ||
| 1210 | } | 1222 | } |
| 1211 | } else { | 1223 | } else { |
| 1212 | GetCurrentThread(kernel).EnableDispatch(); | 1224 | GetCurrentThread(kernel).EnableDispatch(); |
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index 28cd7ecb0..9ee20208e 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h | |||
| @@ -413,6 +413,9 @@ public: | |||
| 413 | 413 | ||
| 414 | [[nodiscard]] static Result InitializeDummyThread(KThread* thread); | 414 | [[nodiscard]] static Result InitializeDummyThread(KThread* thread); |
| 415 | 415 | ||
| 416 | [[nodiscard]] static Result InitializeMainThread(Core::System& system, KThread* thread, | ||
| 417 | s32 virt_core); | ||
| 418 | |||
| 416 | [[nodiscard]] static Result InitializeIdleThread(Core::System& system, KThread* thread, | 419 | [[nodiscard]] static Result InitializeIdleThread(Core::System& system, KThread* thread, |
| 417 | s32 virt_core); | 420 | s32 virt_core); |
| 418 | 421 | ||
| @@ -480,39 +483,16 @@ public: | |||
| 480 | return per_core_priority_queue_entry[core]; | 483 | return per_core_priority_queue_entry[core]; |
| 481 | } | 484 | } |
| 482 | 485 | ||
| 483 | [[nodiscard]] bool IsKernelThread() const { | ||
| 484 | return GetActiveCore() == 3; | ||
| 485 | } | ||
| 486 | |||
| 487 | [[nodiscard]] bool IsDispatchTrackingDisabled() const { | ||
| 488 | return is_single_core || IsKernelThread(); | ||
| 489 | } | ||
| 490 | |||
| 491 | [[nodiscard]] s32 GetDisableDispatchCount() const { | 486 | [[nodiscard]] s32 GetDisableDispatchCount() const { |
| 492 | if (IsDispatchTrackingDisabled()) { | ||
| 493 | // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||
| 494 | return 1; | ||
| 495 | } | ||
| 496 | |||
| 497 | return this->GetStackParameters().disable_count; | 487 | return this->GetStackParameters().disable_count; |
| 498 | } | 488 | } |
| 499 | 489 | ||
| 500 | void DisableDispatch() { | 490 | void DisableDispatch() { |
| 501 | if (IsDispatchTrackingDisabled()) { | ||
| 502 | // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||
| 503 | return; | ||
| 504 | } | ||
| 505 | |||
| 506 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); | 491 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); |
| 507 | this->GetStackParameters().disable_count++; | 492 | this->GetStackParameters().disable_count++; |
| 508 | } | 493 | } |
| 509 | 494 | ||
| 510 | void EnableDispatch() { | 495 | void EnableDispatch() { |
| 511 | if (IsDispatchTrackingDisabled()) { | ||
| 512 | // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||
| 513 | return; | ||
| 514 | } | ||
| 515 | |||
| 516 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); | 496 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); |
| 517 | this->GetStackParameters().disable_count--; | 497 | this->GetStackParameters().disable_count--; |
| 518 | } | 498 | } |
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index f23c629dc..f4072e1c3 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -64,8 +64,6 @@ struct KernelCore::Impl { | |||
| 64 | 64 | ||
| 65 | is_phantom_mode_for_singlecore = false; | 65 | is_phantom_mode_for_singlecore = false; |
| 66 | 66 | ||
| 67 | InitializePhysicalCores(); | ||
| 68 | |||
| 69 | // Derive the initial memory layout from the emulated board | 67 | // Derive the initial memory layout from the emulated board |
| 70 | Init::InitializeSlabResourceCounts(kernel); | 68 | Init::InitializeSlabResourceCounts(kernel); |
| 71 | DeriveInitialMemoryLayout(); | 69 | DeriveInitialMemoryLayout(); |
| @@ -75,9 +73,9 @@ struct KernelCore::Impl { | |||
| 75 | InitializeSystemResourceLimit(kernel, system.CoreTiming()); | 73 | InitializeSystemResourceLimit(kernel, system.CoreTiming()); |
| 76 | InitializeMemoryLayout(); | 74 | InitializeMemoryLayout(); |
| 77 | Init::InitializeKPageBufferSlabHeap(system); | 75 | Init::InitializeKPageBufferSlabHeap(system); |
| 78 | InitializeSchedulers(); | ||
| 79 | InitializeShutdownThreads(); | 76 | InitializeShutdownThreads(); |
| 80 | InitializePreemption(kernel); | 77 | InitializePreemption(kernel); |
| 78 | InitializePhysicalCores(); | ||
| 81 | 79 | ||
| 82 | RegisterHostThread(); | 80 | RegisterHostThread(); |
| 83 | } | 81 | } |
| @@ -136,7 +134,6 @@ struct KernelCore::Impl { | |||
| 136 | shutdown_threads[core_id] = nullptr; | 134 | shutdown_threads[core_id] = nullptr; |
| 137 | } | 135 | } |
| 138 | 136 | ||
| 139 | schedulers[core_id]->Finalize(); | ||
| 140 | schedulers[core_id].reset(); | 137 | schedulers[core_id].reset(); |
| 141 | } | 138 | } |
| 142 | 139 | ||
| @@ -199,14 +196,21 @@ struct KernelCore::Impl { | |||
| 199 | exclusive_monitor = | 196 | exclusive_monitor = |
| 200 | Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); | 197 | Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); |
| 201 | for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | 198 | for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { |
| 202 | schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i); | 199 | const s32 core{static_cast<s32>(i)}; |
| 200 | |||
| 201 | schedulers[i] = std::make_unique<Kernel::KScheduler>(system.Kernel()); | ||
| 203 | cores.emplace_back(i, system, *schedulers[i], interrupts); | 202 | cores.emplace_back(i, system, *schedulers[i], interrupts); |
| 204 | } | ||
| 205 | } | ||
| 206 | 203 | ||
| 207 | void InitializeSchedulers() { | 204 | auto* main_thread{Kernel::KThread::Create(system.Kernel())}; |
| 208 | for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | 205 | main_thread->SetName(fmt::format("MainThread:{}", core)); |
| 209 | cores[i].Scheduler().Initialize(); | 206 | main_thread->SetCurrentCore(core); |
| 207 | ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, core).IsSuccess()); | ||
| 208 | |||
| 209 | auto* idle_thread{Kernel::KThread::Create(system.Kernel())}; | ||
| 210 | idle_thread->SetCurrentCore(core); | ||
| 211 | ASSERT(Kernel::KThread::InitializeIdleThread(system, idle_thread, core).IsSuccess()); | ||
| 212 | |||
| 213 | schedulers[i]->Initialize(main_thread, idle_thread, core); | ||
| 210 | } | 214 | } |
| 211 | } | 215 | } |
| 212 | 216 | ||
| @@ -1109,10 +1113,11 @@ void KernelCore::Suspend(bool suspended) { | |||
| 1109 | } | 1113 | } |
| 1110 | 1114 | ||
| 1111 | void KernelCore::ShutdownCores() { | 1115 | void KernelCore::ShutdownCores() { |
| 1116 | KScopedSchedulerLock lk{*this}; | ||
| 1117 | |||
| 1112 | for (auto* thread : impl->shutdown_threads) { | 1118 | for (auto* thread : impl->shutdown_threads) { |
| 1113 | void(thread->Run()); | 1119 | void(thread->Run()); |
| 1114 | } | 1120 | } |
| 1115 | InterruptAllPhysicalCores(); | ||
| 1116 | } | 1121 | } |
| 1117 | 1122 | ||
| 1118 | bool KernelCore::IsMulticore() const { | 1123 | bool KernelCore::IsMulticore() const { |
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp index a5b16ae2e..6e7dacf97 100644 --- a/src/core/hle/kernel/physical_core.cpp +++ b/src/core/hle/kernel/physical_core.cpp | |||
| @@ -43,6 +43,7 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) { | |||
| 43 | 43 | ||
| 44 | void PhysicalCore::Run() { | 44 | void PhysicalCore::Run() { |
| 45 | arm_interface->Run(); | 45 | arm_interface->Run(); |
| 46 | arm_interface->ClearExclusiveState(); | ||
| 46 | } | 47 | } |
| 47 | 48 | ||
| 48 | void PhysicalCore::Idle() { | 49 | void PhysicalCore::Idle() { |
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 8655506b0..27e5a805d 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -887,7 +887,7 @@ static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle han | |||
| 887 | const auto* const current_thread = GetCurrentThreadPointer(system.Kernel()); | 887 | const auto* const current_thread = GetCurrentThreadPointer(system.Kernel()); |
| 888 | const bool same_thread = current_thread == thread.GetPointerUnsafe(); | 888 | const bool same_thread = current_thread == thread.GetPointerUnsafe(); |
| 889 | 889 | ||
| 890 | const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks(); | 890 | const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTime(); |
| 891 | u64 out_ticks = 0; | 891 | u64 out_ticks = 0; |
| 892 | if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) { | 892 | if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) { |
| 893 | const u64 thread_ticks = current_thread->GetCpuTime(); | 893 | const u64 thread_ticks = current_thread->GetCpuTime(); |
| @@ -3026,11 +3026,6 @@ void Call(Core::System& system, u32 immediate) { | |||
| 3026 | } | 3026 | } |
| 3027 | 3027 | ||
| 3028 | kernel.ExitSVCProfile(); | 3028 | kernel.ExitSVCProfile(); |
| 3029 | |||
| 3030 | if (!thread->IsCallingSvc()) { | ||
| 3031 | auto* host_context = thread->GetHostContext().get(); | ||
| 3032 | host_context->Rewind(); | ||
| 3033 | } | ||
| 3034 | } | 3029 | } |
| 3035 | 3030 | ||
| 3036 | } // namespace Kernel::Svc | 3031 | } // namespace Kernel::Svc |