diff options
| author | 2022-06-26 18:52:16 -0400 | |
|---|---|---|
| committer | 2022-07-14 22:47:18 -0400 | |
| commit | 0624c880bd5af45ae9095465e079fa55458515f6 (patch) | |
| tree | a8b9f3adf516af30cad021fc32f8669426946cd5 | |
| parent | Merge pull request #8540 from lat9nq/copy-nv-ffmpeg (diff) | |
| download | yuzu-0624c880bd5af45ae9095465e079fa55458515f6.tar.gz yuzu-0624c880bd5af45ae9095465e079fa55458515f6.tar.xz yuzu-0624c880bd5af45ae9095465e079fa55458515f6.zip | |
kernel: use KScheduler from mesosphere
Diffstat (limited to '')
| -rw-r--r-- | src/core/arm/arm_interface.cpp | 3 | ||||
| -rw-r--r-- | src/core/cpu_manager.cpp | 161 | ||||
| -rw-r--r-- | src/core/cpu_manager.h | 14 | ||||
| -rw-r--r-- | src/core/hle/kernel/global_scheduler_context.cpp | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_interrupt_manager.cpp | 7 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 723 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.h | 220 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler_lock.h | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_thread.cpp | 11 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_thread.h | 5 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 10 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc.cpp | 7 |
12 files changed, 563 insertions, 602 deletions
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp index cef79b245..cdf388fb9 100644 --- a/src/core/arm/arm_interface.cpp +++ b/src/core/arm/arm_interface.cpp | |||
| @@ -155,9 +155,10 @@ void ARM_Interface::Run() { | |||
| 155 | break; | 155 | break; |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | // Handle syscalls and scheduling (this may change the current thread) | 158 | // Handle syscalls and scheduling (this may change the current thread/core) |
| 159 | if (Has(hr, svc_call)) { | 159 | if (Has(hr, svc_call)) { |
| 160 | Kernel::Svc::Call(system, GetSvcNumber()); | 160 | Kernel::Svc::Call(system, GetSvcNumber()); |
| 161 | break; | ||
| 161 | } | 162 | } |
| 162 | if (Has(hr, break_loop) || !uses_wall_clock) { | 163 | if (Has(hr, break_loop) || !uses_wall_clock) { |
| 163 | break; | 164 | break; |
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp index 37d3d83b9..428194129 100644 --- a/src/core/cpu_manager.cpp +++ b/src/core/cpu_manager.cpp | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include "core/core.h" | 8 | #include "core/core.h" |
| 9 | #include "core/core_timing.h" | 9 | #include "core/core_timing.h" |
| 10 | #include "core/cpu_manager.h" | 10 | #include "core/cpu_manager.h" |
| 11 | #include "core/hle/kernel/k_interrupt_manager.h" | ||
| 11 | #include "core/hle/kernel/k_scheduler.h" | 12 | #include "core/hle/kernel/k_scheduler.h" |
| 12 | #include "core/hle/kernel/k_thread.h" | 13 | #include "core/hle/kernel/k_thread.h" |
| 13 | #include "core/hle/kernel/kernel.h" | 14 | #include "core/hle/kernel/kernel.h" |
| @@ -41,44 +42,65 @@ void CpuManager::Shutdown() { | |||
| 41 | } | 42 | } |
| 42 | } | 43 | } |
| 43 | 44 | ||
| 44 | void CpuManager::GuestThreadFunction() { | 45 | void CpuManager::GuestActivateFunction() { |
| 45 | if (is_multicore) { | 46 | if (is_multicore) { |
| 46 | MultiCoreRunGuestThread(); | 47 | MultiCoreGuestActivate(); |
| 47 | } else { | 48 | } else { |
| 48 | SingleCoreRunGuestThread(); | 49 | SingleCoreGuestActivate(); |
| 49 | } | 50 | } |
| 50 | } | 51 | } |
| 51 | 52 | ||
| 52 | void CpuManager::GuestRewindFunction() { | 53 | void CpuManager::GuestThreadFunction() { |
| 53 | if (is_multicore) { | 54 | if (is_multicore) { |
| 54 | MultiCoreRunGuestLoop(); | 55 | MultiCoreRunGuestThread(); |
| 55 | } else { | 56 | } else { |
| 56 | SingleCoreRunGuestLoop(); | 57 | SingleCoreRunGuestThread(); |
| 57 | } | 58 | } |
| 58 | } | 59 | } |
| 59 | 60 | ||
| 60 | void CpuManager::IdleThreadFunction() { | 61 | void CpuManager::ShutdownThreadFunction() { |
| 61 | if (is_multicore) { | 62 | ShutdownThread(); |
| 62 | MultiCoreRunIdleThread(); | 63 | } |
| 63 | } else { | 64 | |
| 64 | SingleCoreRunIdleThread(); | 65 | void CpuManager::WaitForAndHandleInterrupt() { |
| 66 | auto& kernel = system.Kernel(); | ||
| 67 | auto& physical_core = kernel.CurrentPhysicalCore(); | ||
| 68 | |||
| 69 | ASSERT(Kernel::GetCurrentThread(kernel).GetDisableDispatchCount() == 1); | ||
| 70 | |||
| 71 | if (!physical_core.IsInterrupted()) { | ||
| 72 | physical_core.Idle(); | ||
| 65 | } | 73 | } |
| 74 | |||
| 75 | HandleInterrupt(); | ||
| 66 | } | 76 | } |
| 67 | 77 | ||
| 68 | void CpuManager::ShutdownThreadFunction() { | 78 | void CpuManager::HandleInterrupt() { |
| 69 | ShutdownThread(); | 79 | auto& kernel = system.Kernel(); |
| 80 | auto core_index = kernel.CurrentPhysicalCoreIndex(); | ||
| 81 | |||
| 82 | Kernel::KInterruptManager::HandleInterrupt(kernel, static_cast<s32>(core_index)); | ||
| 70 | } | 83 | } |
| 71 | 84 | ||
| 72 | /////////////////////////////////////////////////////////////////////////////// | 85 | /////////////////////////////////////////////////////////////////////////////// |
| 73 | /// MultiCore /// | 86 | /// MultiCore /// |
| 74 | /////////////////////////////////////////////////////////////////////////////// | 87 | /////////////////////////////////////////////////////////////////////////////// |
| 75 | 88 | ||
| 89 | void CpuManager::MultiCoreGuestActivate() { | ||
| 90 | // Similar to the HorizonKernelMain callback in HOS | ||
| 91 | auto& kernel = system.Kernel(); | ||
| 92 | auto* scheduler = kernel.CurrentScheduler(); | ||
| 93 | |||
| 94 | scheduler->Activate(); | ||
| 95 | UNREACHABLE(); | ||
| 96 | } | ||
| 97 | |||
| 76 | void CpuManager::MultiCoreRunGuestThread() { | 98 | void CpuManager::MultiCoreRunGuestThread() { |
| 99 | // Similar to UserModeThreadStarter in HOS | ||
| 77 | auto& kernel = system.Kernel(); | 100 | auto& kernel = system.Kernel(); |
| 78 | kernel.CurrentScheduler()->OnThreadStart(); | 101 | auto* thread = kernel.GetCurrentEmuThread(); |
| 79 | auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread(); | 102 | thread->EnableDispatch(); |
| 80 | auto& host_context = thread->GetHostContext(); | 103 | |
| 81 | host_context->SetRewindPoint([this] { GuestRewindFunction(); }); | ||
| 82 | MultiCoreRunGuestLoop(); | 104 | MultiCoreRunGuestLoop(); |
| 83 | } | 105 | } |
| 84 | 106 | ||
| @@ -91,18 +113,8 @@ void CpuManager::MultiCoreRunGuestLoop() { | |||
| 91 | physical_core->Run(); | 113 | physical_core->Run(); |
| 92 | physical_core = &kernel.CurrentPhysicalCore(); | 114 | physical_core = &kernel.CurrentPhysicalCore(); |
| 93 | } | 115 | } |
| 94 | { | ||
| 95 | Kernel::KScopedDisableDispatch dd(kernel); | ||
| 96 | physical_core->ArmInterface().ClearExclusiveState(); | ||
| 97 | } | ||
| 98 | } | ||
| 99 | } | ||
| 100 | 116 | ||
| 101 | void CpuManager::MultiCoreRunIdleThread() { | 117 | HandleInterrupt(); |
| 102 | auto& kernel = system.Kernel(); | ||
| 103 | while (true) { | ||
| 104 | Kernel::KScopedDisableDispatch dd(kernel); | ||
| 105 | kernel.CurrentPhysicalCore().Idle(); | ||
| 106 | } | 118 | } |
| 107 | } | 119 | } |
| 108 | 120 | ||
| @@ -110,83 +122,20 @@ void CpuManager::MultiCoreRunIdleThread() { | |||
| 110 | /// SingleCore /// | 122 | /// SingleCore /// |
| 111 | /////////////////////////////////////////////////////////////////////////////// | 123 | /////////////////////////////////////////////////////////////////////////////// |
| 112 | 124 | ||
| 113 | void CpuManager::SingleCoreRunGuestThread() { | 125 | void CpuManager::SingleCoreGuestActivate() {} |
| 114 | auto& kernel = system.Kernel(); | ||
| 115 | kernel.CurrentScheduler()->OnThreadStart(); | ||
| 116 | auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread(); | ||
| 117 | auto& host_context = thread->GetHostContext(); | ||
| 118 | host_context->SetRewindPoint([this] { GuestRewindFunction(); }); | ||
| 119 | SingleCoreRunGuestLoop(); | ||
| 120 | } | ||
| 121 | |||
| 122 | void CpuManager::SingleCoreRunGuestLoop() { | ||
| 123 | auto& kernel = system.Kernel(); | ||
| 124 | while (true) { | ||
| 125 | auto* physical_core = &kernel.CurrentPhysicalCore(); | ||
| 126 | if (!physical_core->IsInterrupted()) { | ||
| 127 | physical_core->Run(); | ||
| 128 | physical_core = &kernel.CurrentPhysicalCore(); | ||
| 129 | } | ||
| 130 | kernel.SetIsPhantomModeForSingleCore(true); | ||
| 131 | system.CoreTiming().Advance(); | ||
| 132 | kernel.SetIsPhantomModeForSingleCore(false); | ||
| 133 | physical_core->ArmInterface().ClearExclusiveState(); | ||
| 134 | PreemptSingleCore(); | ||
| 135 | auto& scheduler = kernel.Scheduler(current_core); | ||
| 136 | scheduler.RescheduleCurrentCore(); | ||
| 137 | } | ||
| 138 | } | ||
| 139 | |||
| 140 | void CpuManager::SingleCoreRunIdleThread() { | ||
| 141 | auto& kernel = system.Kernel(); | ||
| 142 | while (true) { | ||
| 143 | auto& physical_core = kernel.CurrentPhysicalCore(); | ||
| 144 | PreemptSingleCore(false); | ||
| 145 | system.CoreTiming().AddTicks(1000U); | ||
| 146 | idle_count++; | ||
| 147 | auto& scheduler = physical_core.Scheduler(); | ||
| 148 | scheduler.RescheduleCurrentCore(); | ||
| 149 | } | ||
| 150 | } | ||
| 151 | 126 | ||
| 152 | void CpuManager::PreemptSingleCore(bool from_running_enviroment) { | 127 | void CpuManager::SingleCoreRunGuestThread() {} |
| 153 | { | ||
| 154 | auto& kernel = system.Kernel(); | ||
| 155 | auto& scheduler = kernel.Scheduler(current_core); | ||
| 156 | Kernel::KThread* current_thread = scheduler.GetSchedulerCurrentThread(); | ||
| 157 | if (idle_count >= 4 || from_running_enviroment) { | ||
| 158 | if (!from_running_enviroment) { | ||
| 159 | system.CoreTiming().Idle(); | ||
| 160 | idle_count = 0; | ||
| 161 | } | ||
| 162 | kernel.SetIsPhantomModeForSingleCore(true); | ||
| 163 | system.CoreTiming().Advance(); | ||
| 164 | kernel.SetIsPhantomModeForSingleCore(false); | ||
| 165 | } | ||
| 166 | current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES); | ||
| 167 | system.CoreTiming().ResetTicks(); | ||
| 168 | scheduler.Unload(scheduler.GetSchedulerCurrentThread()); | ||
| 169 | 128 | ||
| 170 | auto& next_scheduler = kernel.Scheduler(current_core); | 129 | void CpuManager::SingleCoreRunGuestLoop() {} |
| 171 | Common::Fiber::YieldTo(current_thread->GetHostContext(), *next_scheduler.ControlContext()); | ||
| 172 | } | ||
| 173 | 130 | ||
| 174 | // May have changed scheduler | 131 | void CpuManager::PreemptSingleCore(bool from_running_enviroment) {} |
| 175 | { | ||
| 176 | auto& scheduler = system.Kernel().Scheduler(current_core); | ||
| 177 | scheduler.Reload(scheduler.GetSchedulerCurrentThread()); | ||
| 178 | if (!scheduler.IsIdle()) { | ||
| 179 | idle_count = 0; | ||
| 180 | } | ||
| 181 | } | ||
| 182 | } | ||
| 183 | 132 | ||
| 184 | void CpuManager::ShutdownThread() { | 133 | void CpuManager::ShutdownThread() { |
| 185 | auto& kernel = system.Kernel(); | 134 | auto& kernel = system.Kernel(); |
| 135 | auto* thread = kernel.GetCurrentEmuThread(); | ||
| 186 | auto core = is_multicore ? kernel.CurrentPhysicalCoreIndex() : 0; | 136 | auto core = is_multicore ? kernel.CurrentPhysicalCoreIndex() : 0; |
| 187 | auto* current_thread = kernel.GetCurrentEmuThread(); | ||
| 188 | 137 | ||
| 189 | Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context); | 138 | Common::Fiber::YieldTo(thread->GetHostContext(), *core_data[core].host_context); |
| 190 | UNREACHABLE(); | 139 | UNREACHABLE(); |
| 191 | } | 140 | } |
| 192 | 141 | ||
| @@ -218,9 +167,21 @@ void CpuManager::RunThread(std::size_t core) { | |||
| 218 | system.GPU().ObtainContext(); | 167 | system.GPU().ObtainContext(); |
| 219 | } | 168 | } |
| 220 | 169 | ||
| 221 | auto* current_thread = system.Kernel().CurrentScheduler()->GetIdleThread(); | 170 | auto& kernel = system.Kernel(); |
| 222 | Kernel::SetCurrentThread(system.Kernel(), current_thread); | 171 | |
| 223 | Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext()); | 172 | auto* main_thread = Kernel::KThread::Create(kernel); |
| 173 | main_thread->SetName(fmt::format("MainThread:{}", core)); | ||
| 174 | ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, static_cast<s32>(core)) | ||
| 175 | .IsSuccess()); | ||
| 176 | |||
| 177 | auto* idle_thread = Kernel::KThread::Create(kernel); | ||
| 178 | ASSERT(Kernel::KThread::InitializeIdleThread(system, idle_thread, static_cast<s32>(core)) | ||
| 179 | .IsSuccess()); | ||
| 180 | |||
| 181 | kernel.SetCurrentEmuThread(main_thread); | ||
| 182 | kernel.CurrentScheduler()->Initialize(idle_thread); | ||
| 183 | |||
| 184 | Common::Fiber::YieldTo(data.host_context, *main_thread->GetHostContext()); | ||
| 224 | } | 185 | } |
| 225 | 186 | ||
| 226 | } // namespace Core | 187 | } // namespace Core |
diff --git a/src/core/cpu_manager.h b/src/core/cpu_manager.h index 76dc58ee1..8143424af 100644 --- a/src/core/cpu_manager.h +++ b/src/core/cpu_manager.h | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | 5 | ||
| 6 | #include <array> | 6 | #include <array> |
| 7 | #include <atomic> | 7 | #include <atomic> |
| 8 | #include <csetjmp> | ||
| 8 | #include <functional> | 9 | #include <functional> |
| 9 | #include <memory> | 10 | #include <memory> |
| 10 | #include <thread> | 11 | #include <thread> |
| @@ -47,10 +48,14 @@ public: | |||
| 47 | gpu_barrier->Sync(); | 48 | gpu_barrier->Sync(); |
| 48 | } | 49 | } |
| 49 | 50 | ||
| 51 | void WaitForAndHandleInterrupt(); | ||
| 50 | void Initialize(); | 52 | void Initialize(); |
| 51 | void Shutdown(); | 53 | void Shutdown(); |
| 52 | 54 | ||
| 53 | std::function<void()> GetGuestThreadStartFunc() { | 55 | std::function<void()> GetGuestActivateFunc() { |
| 56 | return [this] { GuestActivateFunction(); }; | ||
| 57 | } | ||
| 58 | std::function<void()> GetGuestThreadFunc() { | ||
| 54 | return [this] { GuestThreadFunction(); }; | 59 | return [this] { GuestThreadFunction(); }; |
| 55 | } | 60 | } |
| 56 | std::function<void()> GetIdleThreadStartFunc() { | 61 | std::function<void()> GetIdleThreadStartFunc() { |
| @@ -67,21 +72,22 @@ public: | |||
| 67 | } | 72 | } |
| 68 | 73 | ||
| 69 | private: | 74 | private: |
| 75 | void GuestActivateFunction(); | ||
| 70 | void GuestThreadFunction(); | 76 | void GuestThreadFunction(); |
| 71 | void GuestRewindFunction(); | ||
| 72 | void IdleThreadFunction(); | 77 | void IdleThreadFunction(); |
| 73 | void ShutdownThreadFunction(); | 78 | void ShutdownThreadFunction(); |
| 74 | 79 | ||
| 80 | void MultiCoreGuestActivate(); | ||
| 75 | void MultiCoreRunGuestThread(); | 81 | void MultiCoreRunGuestThread(); |
| 76 | void MultiCoreRunGuestLoop(); | 82 | void MultiCoreRunGuestLoop(); |
| 77 | void MultiCoreRunIdleThread(); | ||
| 78 | 83 | ||
| 84 | void SingleCoreGuestActivate(); | ||
| 79 | void SingleCoreRunGuestThread(); | 85 | void SingleCoreRunGuestThread(); |
| 80 | void SingleCoreRunGuestLoop(); | 86 | void SingleCoreRunGuestLoop(); |
| 81 | void SingleCoreRunIdleThread(); | ||
| 82 | 87 | ||
| 83 | static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core); | 88 | static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core); |
| 84 | 89 | ||
| 90 | void HandleInterrupt(); | ||
| 85 | void ShutdownThread(); | 91 | void ShutdownThread(); |
| 86 | void RunThread(std::size_t core); | 92 | void RunThread(std::size_t core); |
| 87 | 93 | ||
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp index 164436b26..21fd5cb67 100644 --- a/src/core/hle/kernel/global_scheduler_context.cpp +++ b/src/core/hle/kernel/global_scheduler_context.cpp | |||
| @@ -41,7 +41,7 @@ void GlobalSchedulerContext::PreemptThreads() { | |||
| 41 | ASSERT(IsLocked()); | 41 | ASSERT(IsLocked()); |
| 42 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | 42 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { |
| 43 | const u32 priority = preemption_priorities[core_id]; | 43 | const u32 priority = preemption_priorities[core_id]; |
| 44 | kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority); | 44 | KScheduler::RotateScheduledQueue(kernel, core_id, priority); |
| 45 | 45 | ||
| 46 | // Signal an interrupt occurred. For core 3, this is a certainty, as preemption will result | 46 | // Signal an interrupt occurred. For core 3, this is a certainty, as preemption will result |
| 47 | // in the rotator thread being scheduled. For cores 0-2, this is to simulate or system | 47 | // in the rotator thread being scheduled. For cores 0-2, this is to simulate or system |
diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp index d606a7f86..1b577a5b3 100644 --- a/src/core/hle/kernel/k_interrupt_manager.cpp +++ b/src/core/hle/kernel/k_interrupt_manager.cpp | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include "core/hle/kernel/k_scheduler.h" | 6 | #include "core/hle/kernel/k_scheduler.h" |
| 7 | #include "core/hle/kernel/k_thread.h" | 7 | #include "core/hle/kernel/k_thread.h" |
| 8 | #include "core/hle/kernel/kernel.h" | 8 | #include "core/hle/kernel/kernel.h" |
| 9 | #include "core/hle/kernel/physical_core.h" | ||
| 9 | 10 | ||
| 10 | namespace Kernel::KInterruptManager { | 11 | namespace Kernel::KInterruptManager { |
| 11 | 12 | ||
| @@ -15,6 +16,9 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) { | |||
| 15 | return; | 16 | return; |
| 16 | } | 17 | } |
| 17 | 18 | ||
| 19 | // Acknowledge the interrupt. | ||
| 20 | kernel.PhysicalCore(core_id).ClearInterrupt(); | ||
| 21 | |||
| 18 | auto& current_thread = GetCurrentThread(kernel); | 22 | auto& current_thread = GetCurrentThread(kernel); |
| 19 | 23 | ||
| 20 | // If the user disable count is set, we may need to pin the current thread. | 24 | // If the user disable count is set, we may need to pin the current thread. |
| @@ -27,6 +31,9 @@ void HandleInterrupt(KernelCore& kernel, s32 core_id) { | |||
| 27 | // Set the interrupt flag for the thread. | 31 | // Set the interrupt flag for the thread. |
| 28 | GetCurrentThread(kernel).SetInterruptFlag(); | 32 | GetCurrentThread(kernel).SetInterruptFlag(); |
| 29 | } | 33 | } |
| 34 | |||
| 35 | // Request interrupt scheduling. | ||
| 36 | kernel.CurrentScheduler()->RequestScheduleOnInterrupt(); | ||
| 30 | } | 37 | } |
| 31 | 38 | ||
| 32 | } // namespace Kernel::KInterruptManager | 39 | } // namespace Kernel::KInterruptManager |
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index d599d2bcb..13915dbd9 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -27,69 +27,162 @@ static void IncrementScheduledCount(Kernel::KThread* thread) { | |||
| 27 | } | 27 | } |
| 28 | } | 28 | } |
| 29 | 29 | ||
| 30 | void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule) { | 30 | KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} { |
| 31 | auto scheduler = kernel.CurrentScheduler(); | 31 | m_idle_stack = std::make_shared<Common::Fiber>([this] { |
| 32 | 32 | while (true) { | |
| 33 | u32 current_core{0xF}; | 33 | ScheduleImplOffStack(); |
| 34 | bool must_context_switch{}; | ||
| 35 | if (scheduler) { | ||
| 36 | current_core = scheduler->core_id; | ||
| 37 | // TODO(bunnei): Should be set to true when we deprecate single core | ||
| 38 | must_context_switch = !kernel.IsPhantomModeForSingleCore(); | ||
| 39 | } | ||
| 40 | |||
| 41 | while (cores_pending_reschedule != 0) { | ||
| 42 | const auto core = static_cast<u32>(std::countr_zero(cores_pending_reschedule)); | ||
| 43 | ASSERT(core < Core::Hardware::NUM_CPU_CORES); | ||
| 44 | if (!must_context_switch || core != current_core) { | ||
| 45 | auto& phys_core = kernel.PhysicalCore(core); | ||
| 46 | phys_core.Interrupt(); | ||
| 47 | } | 34 | } |
| 48 | cores_pending_reschedule &= ~(1ULL << core); | 35 | }); |
| 36 | |||
| 37 | m_state.needs_scheduling = true; | ||
| 38 | } | ||
| 39 | |||
| 40 | KScheduler::~KScheduler() = default; | ||
| 41 | |||
| 42 | void KScheduler::SetInterruptTaskRunnable() { | ||
| 43 | m_state.interrupt_task_runnable = true; | ||
| 44 | m_state.needs_scheduling = true; | ||
| 45 | } | ||
| 46 | |||
| 47 | void KScheduler::RequestScheduleOnInterrupt() { | ||
| 48 | m_state.needs_scheduling = true; | ||
| 49 | |||
| 50 | if (CanSchedule(kernel)) { | ||
| 51 | ScheduleOnInterrupt(); | ||
| 49 | } | 52 | } |
| 53 | } | ||
| 50 | 54 | ||
| 51 | for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) { | 55 | void KScheduler::DisableScheduling(KernelCore& kernel) { |
| 52 | if (kernel.PhysicalCore(core_id).IsInterrupted()) { | 56 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); |
| 53 | KInterruptManager::HandleInterrupt(kernel, static_cast<s32>(core_id)); | 57 | GetCurrentThread(kernel).DisableDispatch(); |
| 58 | } | ||
| 59 | |||
| 60 | void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { | ||
| 61 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 1); | ||
| 62 | |||
| 63 | auto* scheduler = kernel.CurrentScheduler(); | ||
| 64 | |||
| 65 | if (!scheduler) { | ||
| 66 | // HACK: we cannot schedule from this thread, it is not a core thread | ||
| 67 | RescheduleCores(kernel, cores_needing_scheduling); | ||
| 68 | if (GetCurrentThread(kernel).GetDisableDispatchCount() == 1) { | ||
| 69 | // Special case to ensure dummy threads that are waiting block | ||
| 70 | GetCurrentThread(kernel).IfDummyThreadTryWait(); | ||
| 54 | } | 71 | } |
| 72 | GetCurrentThread(kernel).EnableDispatch(); | ||
| 73 | return; | ||
| 74 | } | ||
| 75 | |||
| 76 | scheduler->RescheduleOtherCores(cores_needing_scheduling); | ||
| 77 | |||
| 78 | if (GetCurrentThread(kernel).GetDisableDispatchCount() > 1) { | ||
| 79 | GetCurrentThread(kernel).EnableDispatch(); | ||
| 80 | } else { | ||
| 81 | scheduler->RescheduleCurrentCore(); | ||
| 82 | } | ||
| 83 | } | ||
| 84 | |||
| 85 | u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { | ||
| 86 | if (IsSchedulerUpdateNeeded(kernel)) { | ||
| 87 | return UpdateHighestPriorityThreadsImpl(kernel); | ||
| 88 | } else { | ||
| 89 | return 0; | ||
| 90 | } | ||
| 91 | } | ||
| 92 | |||
| 93 | void KScheduler::Schedule() { | ||
| 94 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); | ||
| 95 | ASSERT(m_core_id == GetCurrentCoreId(kernel)); | ||
| 96 | |||
| 97 | ScheduleImpl(); | ||
| 98 | } | ||
| 99 | |||
| 100 | void KScheduler::ScheduleOnInterrupt() { | ||
| 101 | GetCurrentThread(kernel).DisableDispatch(); | ||
| 102 | Schedule(); | ||
| 103 | GetCurrentThread(kernel).EnableDispatch(); | ||
| 104 | } | ||
| 105 | |||
| 106 | void KScheduler::RescheduleCurrentCore() { | ||
| 107 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); | ||
| 108 | |||
| 109 | GetCurrentThread(kernel).EnableDispatch(); | ||
| 110 | |||
| 111 | if (m_state.needs_scheduling.load()) { | ||
| 112 | // Disable interrupts, and then check again if rescheduling is needed. | ||
| 113 | // KScopedInterruptDisable intr_disable; | ||
| 114 | |||
| 115 | kernel.CurrentScheduler()->RescheduleCurrentCoreImpl(); | ||
| 55 | } | 116 | } |
| 117 | } | ||
| 56 | 118 | ||
| 57 | if (must_context_switch) { | 119 | void KScheduler::RescheduleCurrentCoreImpl() { |
| 58 | auto core_scheduler = kernel.CurrentScheduler(); | 120 | // Check that scheduling is needed. |
| 59 | kernel.ExitSVCProfile(); | 121 | if (m_state.needs_scheduling.load()) [[likely]] { |
| 60 | core_scheduler->RescheduleCurrentCore(); | 122 | GetCurrentThread(kernel).DisableDispatch(); |
| 61 | kernel.EnterSVCProfile(); | 123 | Schedule(); |
| 124 | GetCurrentThread(kernel).EnableDispatch(); | ||
| 62 | } | 125 | } |
| 63 | } | 126 | } |
| 64 | 127 | ||
| 128 | void KScheduler::Initialize(KThread* idle_thread) { | ||
| 129 | // Set core ID/idle thread/interrupt task manager. | ||
| 130 | m_core_id = GetCurrentCoreId(kernel); | ||
| 131 | m_idle_thread = idle_thread; | ||
| 132 | // m_state.idle_thread_stack = m_idle_thread->GetStackTop(); | ||
| 133 | // m_state.interrupt_task_manager = &kernel.GetInterruptTaskManager(); | ||
| 134 | |||
| 135 | // Insert the main thread into the priority queue. | ||
| 136 | // { | ||
| 137 | // KScopedSchedulerLock lk{kernel}; | ||
| 138 | // GetPriorityQueue(kernel).PushBack(GetCurrentThreadPointer(kernel)); | ||
| 139 | // SetSchedulerUpdateNeeded(kernel); | ||
| 140 | // } | ||
| 141 | |||
| 142 | // Bind interrupt handler. | ||
| 143 | // kernel.GetInterruptManager().BindHandler( | ||
| 144 | // GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id, | ||
| 145 | // KInterruptController::PriorityLevel_Scheduler, false, false); | ||
| 146 | |||
| 147 | // Set the current thread. | ||
| 148 | m_current_thread = GetCurrentThreadPointer(kernel); | ||
| 149 | } | ||
| 150 | |||
| 151 | void KScheduler::Activate() { | ||
| 152 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); | ||
| 153 | |||
| 154 | // m_state.should_count_idle = KTargetSystem::IsDebugMode(); | ||
| 155 | m_is_active = true; | ||
| 156 | RescheduleCurrentCore(); | ||
| 157 | } | ||
| 158 | |||
| 65 | u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { | 159 | u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { |
| 66 | KScopedSpinLock lk{guard}; | 160 | if (KThread* prev_highest_thread = m_state.highest_priority_thread; |
| 67 | if (KThread* prev_highest_thread = state.highest_priority_thread; | 161 | prev_highest_thread != highest_thread) [[likely]] { |
| 68 | prev_highest_thread != highest_thread) { | 162 | if (prev_highest_thread != nullptr) [[likely]] { |
| 69 | if (prev_highest_thread != nullptr) { | ||
| 70 | IncrementScheduledCount(prev_highest_thread); | 163 | IncrementScheduledCount(prev_highest_thread); |
| 71 | prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks()); | 164 | prev_highest_thread->SetLastScheduledTick(kernel.System().CoreTiming().GetCPUTicks()); |
| 72 | } | 165 | } |
| 73 | if (state.should_count_idle) { | 166 | if (m_state.should_count_idle) { |
| 74 | if (highest_thread != nullptr) { | 167 | if (highest_thread != nullptr) [[likely]] { |
| 75 | if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) { | 168 | if (KProcess* process = highest_thread->GetOwnerProcess(); process != nullptr) { |
| 76 | process->SetRunningThread(core_id, highest_thread, state.idle_count); | 169 | process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count); |
| 77 | } | 170 | } |
| 78 | } else { | 171 | } else { |
| 79 | state.idle_count++; | 172 | m_state.idle_count++; |
| 80 | } | 173 | } |
| 81 | } | 174 | } |
| 82 | 175 | ||
| 83 | state.highest_priority_thread = highest_thread; | 176 | m_state.highest_priority_thread = highest_thread; |
| 84 | state.needs_scheduling.store(true); | 177 | m_state.needs_scheduling = true; |
| 85 | return (1ULL << core_id); | 178 | return (1ULL << m_core_id); |
| 86 | } else { | 179 | } else { |
| 87 | return 0; | 180 | return 0; |
| 88 | } | 181 | } |
| 89 | } | 182 | } |
| 90 | 183 | ||
| 91 | u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | 184 | u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { |
| 92 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | 185 | ASSERT(IsSchedulerLockedByCurrentThread(kernel)); |
| 93 | 186 | ||
| 94 | // Clear that we need to update. | 187 | // Clear that we need to update. |
| 95 | ClearSchedulerUpdateNeeded(kernel); | 188 | ClearSchedulerUpdateNeeded(kernel); |
| @@ -98,18 +191,20 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | |||
| 98 | KThread* top_threads[Core::Hardware::NUM_CPU_CORES]; | 191 | KThread* top_threads[Core::Hardware::NUM_CPU_CORES]; |
| 99 | auto& priority_queue = GetPriorityQueue(kernel); | 192 | auto& priority_queue = GetPriorityQueue(kernel); |
| 100 | 193 | ||
| 101 | /// We want to go over all cores, finding the highest priority thread and determining if | 194 | // We want to go over all cores, finding the highest priority thread and determining if |
| 102 | /// scheduling is needed for that core. | 195 | // scheduling is needed for that core. |
| 103 | for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | 196 | for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { |
| 104 | KThread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id)); | 197 | KThread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id)); |
| 105 | if (top_thread != nullptr) { | 198 | if (top_thread != nullptr) { |
| 106 | // If the thread has no waiters, we need to check if the process has a thread pinned. | 199 | // We need to check if the thread's process has a pinned thread. |
| 107 | if (top_thread->GetNumKernelWaiters() == 0) { | 200 | if (KProcess* parent = top_thread->GetOwnerProcess()) { |
| 108 | if (KProcess* parent = top_thread->GetOwnerProcess(); parent != nullptr) { | 201 | // Check that there's a pinned thread other than the current top thread. |
| 109 | if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id)); | 202 | if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id)); |
| 110 | pinned != nullptr && pinned != top_thread) { | 203 | pinned != nullptr && pinned != top_thread) { |
| 111 | // We prefer our parent's pinned thread if possible. However, we also don't | 204 | // We need to prefer threads with kernel waiters to the pinned thread. |
| 112 | // want to schedule un-runnable threads. | 205 | if (top_thread->GetNumKernelWaiters() == |
| 206 | 0 /* && top_thread != parent->GetExceptionThread() */) { | ||
| 207 | // If the pinned thread is runnable, use it. | ||
| 113 | if (pinned->GetRawState() == ThreadState::Runnable) { | 208 | if (pinned->GetRawState() == ThreadState::Runnable) { |
| 114 | top_thread = pinned; | 209 | top_thread = pinned; |
| 115 | } else { | 210 | } else { |
| @@ -129,7 +224,8 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | |||
| 129 | 224 | ||
| 130 | // Idle cores are bad. We're going to try to migrate threads to each idle core in turn. | 225 | // Idle cores are bad. We're going to try to migrate threads to each idle core in turn. |
| 131 | while (idle_cores != 0) { | 226 | while (idle_cores != 0) { |
| 132 | const auto core_id = static_cast<u32>(std::countr_zero(idle_cores)); | 227 | const s32 core_id = static_cast<s32>(std::countr_zero(idle_cores)); |
| 228 | |||
| 133 | if (KThread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) { | 229 | if (KThread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) { |
| 134 | s32 migration_candidates[Core::Hardware::NUM_CPU_CORES]; | 230 | s32 migration_candidates[Core::Hardware::NUM_CPU_CORES]; |
| 135 | size_t num_candidates = 0; | 231 | size_t num_candidates = 0; |
| @@ -150,7 +246,6 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | |||
| 150 | // The suggested thread isn't bound to its core, so we can migrate it! | 246 | // The suggested thread isn't bound to its core, so we can migrate it! |
| 151 | suggested->SetActiveCore(core_id); | 247 | suggested->SetActiveCore(core_id); |
| 152 | priority_queue.ChangeCore(suggested_core, suggested); | 248 | priority_queue.ChangeCore(suggested_core, suggested); |
| 153 | |||
| 154 | top_threads[core_id] = suggested; | 249 | top_threads[core_id] = suggested; |
| 155 | cores_needing_scheduling |= | 250 | cores_needing_scheduling |= |
| 156 | kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]); | 251 | kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]); |
| @@ -183,7 +278,6 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | |||
| 183 | // Perform the migration. | 278 | // Perform the migration. |
| 184 | suggested->SetActiveCore(core_id); | 279 | suggested->SetActiveCore(core_id); |
| 185 | priority_queue.ChangeCore(candidate_core, suggested); | 280 | priority_queue.ChangeCore(candidate_core, suggested); |
| 186 | |||
| 187 | top_threads[core_id] = suggested; | 281 | top_threads[core_id] = suggested; |
| 188 | cores_needing_scheduling |= | 282 | cores_needing_scheduling |= |
| 189 | kernel.Scheduler(core_id).UpdateHighestPriorityThread( | 283 | kernel.Scheduler(core_id).UpdateHighestPriorityThread( |
| @@ -200,24 +294,223 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | |||
| 200 | return cores_needing_scheduling; | 294 | return cores_needing_scheduling; |
| 201 | } | 295 | } |
| 202 | 296 | ||
| 297 | void KScheduler::SwitchThread(KThread* next_thread) { | ||
| 298 | KProcess* const cur_process = kernel.CurrentProcess(); | ||
| 299 | KThread* const cur_thread = GetCurrentThreadPointer(kernel); | ||
| 300 | |||
| 301 | // We never want to schedule a null thread, so use the idle thread if we don't have a next. | ||
| 302 | if (next_thread == nullptr) { | ||
| 303 | next_thread = m_idle_thread; | ||
| 304 | } | ||
| 305 | |||
| 306 | if (next_thread->GetCurrentCore() != m_core_id) { | ||
| 307 | next_thread->SetCurrentCore(m_core_id); | ||
| 308 | } | ||
| 309 | |||
| 310 | // If we're not actually switching thread, there's nothing to do. | ||
| 311 | if (next_thread == cur_thread) { | ||
| 312 | return; | ||
| 313 | } | ||
| 314 | |||
| 315 | // Next thread is now known not to be nullptr, and must not be dispatchable. | ||
| 316 | ASSERT(next_thread->GetDisableDispatchCount() == 1); | ||
| 317 | ASSERT(!next_thread->IsDummyThread()); | ||
| 318 | |||
| 319 | // Update the CPU time tracking variables. | ||
| 320 | const s64 prev_tick = m_last_context_switch_time; | ||
| 321 | const s64 cur_tick = kernel.System().CoreTiming().GetCPUTicks(); | ||
| 322 | const s64 tick_diff = cur_tick - prev_tick; | ||
| 323 | cur_thread->AddCpuTime(m_core_id, tick_diff); | ||
| 324 | if (cur_process != nullptr) { | ||
| 325 | cur_process->UpdateCPUTimeTicks(tick_diff); | ||
| 326 | } | ||
| 327 | m_last_context_switch_time = cur_tick; | ||
| 328 | |||
| 329 | // Update our previous thread. | ||
| 330 | if (cur_process != nullptr) { | ||
| 331 | if (!cur_thread->IsTerminationRequested() && cur_thread->GetActiveCore() == m_core_id) | ||
| 332 | [[likely]] { | ||
| 333 | m_state.prev_thread = cur_thread; | ||
| 334 | } else { | ||
| 335 | m_state.prev_thread = nullptr; | ||
| 336 | } | ||
| 337 | } | ||
| 338 | |||
| 339 | // Switch the current process, if we're switching processes. | ||
| 340 | // if (KProcess *next_process = next_thread->GetOwnerProcess(); next_process != cur_process) { | ||
| 341 | // KProcess::Switch(cur_process, next_process); | ||
| 342 | // } | ||
| 343 | |||
| 344 | // Set the new thread. | ||
| 345 | SetCurrentThread(kernel, next_thread); | ||
| 346 | m_current_thread = next_thread; | ||
| 347 | |||
| 348 | // Set the new Thread Local region. | ||
| 349 | // cpu::SwitchThreadLocalRegion(GetInteger(next_thread->GetThreadLocalRegionAddress())); | ||
| 350 | } | ||
| 351 | |||
| 352 | void KScheduler::ScheduleImpl() { | ||
| 353 | // First, clear the needs scheduling bool. | ||
| 354 | m_state.needs_scheduling.store(false, std::memory_order_seq_cst); | ||
| 355 | |||
| 356 | // Load the appropriate thread pointers for scheduling. | ||
| 357 | KThread* const cur_thread{GetCurrentThreadPointer(kernel)}; | ||
| 358 | KThread* highest_priority_thread{m_state.highest_priority_thread}; | ||
| 359 | |||
| 360 | // Check whether there are runnable interrupt tasks. | ||
| 361 | if (m_state.interrupt_task_runnable) { | ||
| 362 | // The interrupt task is runnable. | ||
| 363 | // We want to switch to the interrupt task/idle thread. | ||
| 364 | highest_priority_thread = nullptr; | ||
| 365 | } | ||
| 366 | |||
| 367 | // If there aren't, we want to check if the highest priority thread is the same as the current | ||
| 368 | // thread. | ||
| 369 | if (highest_priority_thread == cur_thread) { | ||
| 370 | // If they're the same, then we can just return. | ||
| 371 | return; | ||
| 372 | } | ||
| 373 | |||
| 374 | // The highest priority thread is not the same as the current thread. | ||
| 375 | // Switch to the idle thread stack and continue executing from there. | ||
| 376 | m_idle_cur_thread = cur_thread; | ||
| 377 | m_idle_highest_priority_thread = highest_priority_thread; | ||
| 378 | Common::Fiber::YieldTo(cur_thread->host_context, *m_idle_stack); | ||
| 379 | |||
| 380 | // Returning from ScheduleImpl occurs after this thread has been scheduled again. | ||
| 381 | } | ||
| 382 | |||
| 383 | void KScheduler::ScheduleImplOffStack() { | ||
| 384 | KThread* const cur_thread{m_idle_cur_thread}; | ||
| 385 | KThread* highest_priority_thread{m_idle_highest_priority_thread}; | ||
| 386 | |||
| 387 | // Get a reference to the current thread's stack parameters. | ||
| 388 | auto& sp{cur_thread->GetStackParameters()}; | ||
| 389 | |||
| 390 | // Save the original thread context. | ||
| 391 | { | ||
| 392 | auto& physical_core = kernel.System().CurrentPhysicalCore(); | ||
| 393 | auto& cpu_core = physical_core.ArmInterface(); | ||
| 394 | cpu_core.SaveContext(cur_thread->GetContext32()); | ||
| 395 | cpu_core.SaveContext(cur_thread->GetContext64()); | ||
| 396 | // Save the TPIDR_EL0 system register in case it was modified. | ||
| 397 | cur_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | ||
| 398 | cpu_core.ClearExclusiveState(); | ||
| 399 | } | ||
| 400 | |||
| 401 | // Check if the thread is terminated by checking the DPC flags. | ||
| 402 | if ((sp.dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) { | ||
| 403 | // The thread isn't terminated, so we want to unlock it. | ||
| 404 | sp.m_lock.store(false, std::memory_order_seq_cst); | ||
| 405 | } | ||
| 406 | |||
| 407 | // The current thread's context has been entirely taken care of. | ||
| 408 | // Now we want to loop until we successfully switch the thread context. | ||
| 409 | while (true) { | ||
| 410 | // We're starting to try to do the context switch. | ||
| 411 | // Check if the highest priority thread is null. | ||
| 412 | if (!highest_priority_thread) { | ||
| 413 | // The next thread is nullptr! | ||
| 414 | // Switch to nullptr. This will actually switch to the idle thread. | ||
| 415 | SwitchThread(nullptr); | ||
| 416 | |||
| 417 | // We've switched to the idle thread, so we want to process interrupt tasks until we | ||
| 418 | // schedule a non-idle thread. | ||
| 419 | while (!m_state.interrupt_task_runnable) { | ||
| 420 | // Check if we need scheduling. | ||
| 421 | if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { | ||
| 422 | goto retry; | ||
| 423 | } | ||
| 424 | |||
| 425 | // Clear the previous thread. | ||
| 426 | m_state.prev_thread = nullptr; | ||
| 427 | |||
| 428 | // Wait for an interrupt before checking again. | ||
| 429 | kernel.System().GetCpuManager().WaitForAndHandleInterrupt(); | ||
| 430 | } | ||
| 431 | |||
| 432 | // Execute any pending interrupt tasks. | ||
| 433 | // m_state.interrupt_task_manager->DoTasks(); | ||
| 434 | |||
| 435 | // Clear the interrupt task thread as runnable. | ||
| 436 | m_state.interrupt_task_runnable = false; | ||
| 437 | |||
| 438 | // Retry the scheduling loop. | ||
| 439 | goto retry; | ||
| 440 | } else { | ||
| 441 | // We want to try to lock the highest priority thread's context. | ||
| 442 | // Try to take it. | ||
| 443 | bool expected{false}; | ||
| 444 | while (!highest_priority_thread->stack_parameters.m_lock.compare_exchange_strong( | ||
| 445 | expected, true, std::memory_order_seq_cst)) { | ||
| 446 | // The highest priority thread's context is already locked. | ||
| 447 | // Check if we need scheduling. If we don't, we can retry directly. | ||
| 448 | if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { | ||
| 449 | // If we do, another core is interfering, and we must start again. | ||
| 450 | goto retry; | ||
| 451 | } | ||
| 452 | expected = false; | ||
| 453 | } | ||
| 454 | |||
| 455 | // It's time to switch the thread. | ||
| 456 | // Switch to the highest priority thread. | ||
| 457 | SwitchThread(highest_priority_thread); | ||
| 458 | |||
| 459 | // Check if we need scheduling. If we do, then we can't complete the switch and should | ||
| 460 | // retry. | ||
| 461 | if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { | ||
| 462 | // Our switch failed. | ||
| 463 | // We should unlock the thread context, and then retry. | ||
| 464 | highest_priority_thread->stack_parameters.m_lock.store(false, | ||
| 465 | std::memory_order_seq_cst); | ||
| 466 | goto retry; | ||
| 467 | } else { | ||
| 468 | break; | ||
| 469 | } | ||
| 470 | } | ||
| 471 | |||
| 472 | retry: | ||
| 473 | |||
| 474 | // We failed to successfully do the context switch, and need to retry. | ||
| 475 | // Clear needs_scheduling. | ||
| 476 | m_state.needs_scheduling.store(false, std::memory_order_seq_cst); | ||
| 477 | |||
| 478 | // Refresh the highest priority thread. | ||
| 479 | highest_priority_thread = m_state.highest_priority_thread; | ||
| 480 | } | ||
| 481 | |||
| 482 | // Reload the guest thread context. | ||
| 483 | { | ||
| 484 | auto& cpu_core = kernel.System().CurrentArmInterface(); | ||
| 485 | cpu_core.LoadContext(highest_priority_thread->GetContext32()); | ||
| 486 | cpu_core.LoadContext(highest_priority_thread->GetContext64()); | ||
| 487 | cpu_core.SetTlsAddress(highest_priority_thread->GetTLSAddress()); | ||
| 488 | cpu_core.SetTPIDR_EL0(highest_priority_thread->GetTPIDR_EL0()); | ||
| 489 | cpu_core.LoadWatchpointArray(highest_priority_thread->GetOwnerProcess()->GetWatchpoints()); | ||
| 490 | cpu_core.ClearExclusiveState(); | ||
| 491 | } | ||
| 492 | |||
| 493 | // Reload the host thread. | ||
| 494 | Common::Fiber::YieldTo(m_idle_stack, *highest_priority_thread->host_context); | ||
| 495 | } | ||
| 496 | |||
| 203 | void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) { | 497 | void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) { |
| 204 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | 498 | ASSERT(IsSchedulerLockedByCurrentThread(kernel)); |
| 205 | for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; ++i) { | 499 | for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; ++i) { |
| 206 | // Get an atomic reference to the core scheduler's previous thread. | 500 | // Get an atomic reference to the core scheduler's previous thread. |
| 207 | std::atomic_ref<KThread*> prev_thread(kernel.Scheduler(static_cast<s32>(i)).prev_thread); | 501 | auto& prev_thread{kernel.Scheduler(i).m_state.prev_thread}; |
| 208 | static_assert(std::atomic_ref<KThread*>::is_always_lock_free); | ||
| 209 | 502 | ||
| 210 | // Atomically clear the previous thread if it's our target. | 503 | // Atomically clear the previous thread if it's our target. |
| 211 | KThread* compare = thread; | 504 | KThread* compare = thread; |
| 212 | prev_thread.compare_exchange_strong(compare, nullptr); | 505 | prev_thread.compare_exchange_strong(compare, nullptr, std::memory_order_seq_cst); |
| 213 | } | 506 | } |
| 214 | } | 507 | } |
| 215 | 508 | ||
| 216 | void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state) { | 509 | void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state) { |
| 217 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | 510 | ASSERT(IsSchedulerLockedByCurrentThread(kernel)); |
| 218 | 511 | ||
| 219 | // Check if the state has changed, because if it hasn't there's nothing to do. | 512 | // Check if the state has changed, because if it hasn't there's nothing to do. |
| 220 | const auto cur_state = thread->GetRawState(); | 513 | const ThreadState cur_state = thread->GetRawState(); |
| 221 | if (cur_state == old_state) { | 514 | if (cur_state == old_state) { |
| 222 | return; | 515 | return; |
| 223 | } | 516 | } |
| @@ -237,12 +530,12 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, Threa | |||
| 237 | } | 530 | } |
| 238 | 531 | ||
| 239 | void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority) { | 532 | void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority) { |
| 240 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | 533 | ASSERT(IsSchedulerLockedByCurrentThread(kernel)); |
| 241 | 534 | ||
| 242 | // If the thread is runnable, we want to change its priority in the queue. | 535 | // If the thread is runnable, we want to change its priority in the queue. |
| 243 | if (thread->GetRawState() == ThreadState::Runnable) { | 536 | if (thread->GetRawState() == ThreadState::Runnable) { |
| 244 | GetPriorityQueue(kernel).ChangePriority(old_priority, | 537 | GetPriorityQueue(kernel).ChangePriority(old_priority, |
| 245 | thread == kernel.GetCurrentEmuThread(), thread); | 538 | thread == GetCurrentThreadPointer(kernel), thread); |
| 246 | IncrementScheduledCount(thread); | 539 | IncrementScheduledCount(thread); |
| 247 | SetSchedulerUpdateNeeded(kernel); | 540 | SetSchedulerUpdateNeeded(kernel); |
| 248 | } | 541 | } |
| @@ -250,7 +543,7 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s3 | |||
| 250 | 543 | ||
| 251 | void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread, | 544 | void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread, |
| 252 | const KAffinityMask& old_affinity, s32 old_core) { | 545 | const KAffinityMask& old_affinity, s32 old_core) { |
| 253 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | 546 | ASSERT(IsSchedulerLockedByCurrentThread(kernel)); |
| 254 | 547 | ||
| 255 | // If the thread is runnable, we want to change its affinity in the queue. | 548 | // If the thread is runnable, we want to change its affinity in the queue. |
| 256 | if (thread->GetRawState() == ThreadState::Runnable) { | 549 | if (thread->GetRawState() == ThreadState::Runnable) { |
| @@ -260,15 +553,14 @@ void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread | |||
| 260 | } | 553 | } |
| 261 | } | 554 | } |
| 262 | 555 | ||
| 263 | void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | 556 | void KScheduler::RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 priority) { |
| 264 | ASSERT(system.GlobalSchedulerContext().IsLocked()); | 557 | ASSERT(IsSchedulerLockedByCurrentThread(kernel)); |
| 265 | 558 | ||
| 266 | // Get a reference to the priority queue. | 559 | // Get a reference to the priority queue. |
| 267 | auto& kernel = system.Kernel(); | ||
| 268 | auto& priority_queue = GetPriorityQueue(kernel); | 560 | auto& priority_queue = GetPriorityQueue(kernel); |
| 269 | 561 | ||
| 270 | // Rotate the front of the queue to the end. | 562 | // Rotate the front of the queue to the end. |
| 271 | KThread* top_thread = priority_queue.GetScheduledFront(cpu_core_id, priority); | 563 | KThread* top_thread = priority_queue.GetScheduledFront(core_id, priority); |
| 272 | KThread* next_thread = nullptr; | 564 | KThread* next_thread = nullptr; |
| 273 | if (top_thread != nullptr) { | 565 | if (top_thread != nullptr) { |
| 274 | next_thread = priority_queue.MoveToScheduledBack(top_thread); | 566 | next_thread = priority_queue.MoveToScheduledBack(top_thread); |
| @@ -280,7 +572,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | |||
| 280 | 572 | ||
| 281 | // While we have a suggested thread, try to migrate it! | 573 | // While we have a suggested thread, try to migrate it! |
| 282 | { | 574 | { |
| 283 | KThread* suggested = priority_queue.GetSuggestedFront(cpu_core_id, priority); | 575 | KThread* suggested = priority_queue.GetSuggestedFront(core_id, priority); |
| 284 | while (suggested != nullptr) { | 576 | while (suggested != nullptr) { |
| 285 | // Check if the suggested thread is the top thread on its core. | 577 | // Check if the suggested thread is the top thread on its core. |
| 286 | const s32 suggested_core = suggested->GetActiveCore(); | 578 | const s32 suggested_core = suggested->GetActiveCore(); |
| @@ -301,7 +593,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | |||
| 301 | // to the front of the queue. | 593 | // to the front of the queue. |
| 302 | if (top_on_suggested_core == nullptr || | 594 | if (top_on_suggested_core == nullptr || |
| 303 | top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { | 595 | top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { |
| 304 | suggested->SetActiveCore(cpu_core_id); | 596 | suggested->SetActiveCore(core_id); |
| 305 | priority_queue.ChangeCore(suggested_core, suggested, true); | 597 | priority_queue.ChangeCore(suggested_core, suggested, true); |
| 306 | IncrementScheduledCount(suggested); | 598 | IncrementScheduledCount(suggested); |
| 307 | break; | 599 | break; |
| @@ -309,22 +601,21 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | |||
| 309 | } | 601 | } |
| 310 | 602 | ||
| 311 | // Get the next suggestion. | 603 | // Get the next suggestion. |
| 312 | suggested = priority_queue.GetSamePriorityNext(cpu_core_id, suggested); | 604 | suggested = priority_queue.GetSamePriorityNext(core_id, suggested); |
| 313 | } | 605 | } |
| 314 | } | 606 | } |
| 315 | 607 | ||
| 316 | // Now that we might have migrated a thread with the same priority, check if we can do better. | 608 | // Now that we might have migrated a thread with the same priority, check if we can do better. |
| 317 | |||
| 318 | { | 609 | { |
| 319 | KThread* best_thread = priority_queue.GetScheduledFront(cpu_core_id); | 610 | KThread* best_thread = priority_queue.GetScheduledFront(core_id); |
| 320 | if (best_thread == GetCurrentThreadPointer(kernel)) { | 611 | if (best_thread == GetCurrentThreadPointer(kernel)) { |
| 321 | best_thread = priority_queue.GetScheduledNext(cpu_core_id, best_thread); | 612 | best_thread = priority_queue.GetScheduledNext(core_id, best_thread); |
| 322 | } | 613 | } |
| 323 | 614 | ||
| 324 | // If the best thread we can choose has a priority the same or worse than ours, try to | 615 | // If the best thread we can choose has a priority the same or worse than ours, try to |
| 325 | // migrate a higher priority thread. | 616 | // migrate a higher priority thread. |
| 326 | if (best_thread != nullptr && best_thread->GetPriority() >= priority) { | 617 | if (best_thread != nullptr && best_thread->GetPriority() >= priority) { |
| 327 | KThread* suggested = priority_queue.GetSuggestedFront(cpu_core_id); | 618 | KThread* suggested = priority_queue.GetSuggestedFront(core_id); |
| 328 | while (suggested != nullptr) { | 619 | while (suggested != nullptr) { |
| 329 | // If the suggestion's priority is the same as ours, don't bother. | 620 | // If the suggestion's priority is the same as ours, don't bother. |
| 330 | if (suggested->GetPriority() >= best_thread->GetPriority()) { | 621 | if (suggested->GetPriority() >= best_thread->GetPriority()) { |
| @@ -343,7 +634,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | |||
| 343 | if (top_on_suggested_core == nullptr || | 634 | if (top_on_suggested_core == nullptr || |
| 344 | top_on_suggested_core->GetPriority() >= | 635 | top_on_suggested_core->GetPriority() >= |
| 345 | HighestCoreMigrationAllowedPriority) { | 636 | HighestCoreMigrationAllowedPriority) { |
| 346 | suggested->SetActiveCore(cpu_core_id); | 637 | suggested->SetActiveCore(core_id); |
| 347 | priority_queue.ChangeCore(suggested_core, suggested, true); | 638 | priority_queue.ChangeCore(suggested_core, suggested, true); |
| 348 | IncrementScheduledCount(suggested); | 639 | IncrementScheduledCount(suggested); |
| 349 | break; | 640 | break; |
| @@ -351,7 +642,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | |||
| 351 | } | 642 | } |
| 352 | 643 | ||
| 353 | // Get the next suggestion. | 644 | // Get the next suggestion. |
| 354 | suggested = priority_queue.GetSuggestedNext(cpu_core_id, suggested); | 645 | suggested = priority_queue.GetSuggestedNext(core_id, suggested); |
| 355 | } | 646 | } |
| 356 | } | 647 | } |
| 357 | } | 648 | } |
| @@ -360,64 +651,6 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { | |||
| 360 | SetSchedulerUpdateNeeded(kernel); | 651 | SetSchedulerUpdateNeeded(kernel); |
| 361 | } | 652 | } |
| 362 | 653 | ||
| 363 | bool KScheduler::CanSchedule(KernelCore& kernel) { | ||
| 364 | return kernel.GetCurrentEmuThread()->GetDisableDispatchCount() <= 1; | ||
| 365 | } | ||
| 366 | |||
| 367 | bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) { | ||
| 368 | return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire); | ||
| 369 | } | ||
| 370 | |||
| 371 | void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) { | ||
| 372 | kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release); | ||
| 373 | } | ||
| 374 | |||
| 375 | void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { | ||
| 376 | kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release); | ||
| 377 | } | ||
| 378 | |||
| 379 | void KScheduler::DisableScheduling(KernelCore& kernel) { | ||
| 380 | // If we are shutting down the kernel, none of this is relevant anymore. | ||
| 381 | if (kernel.IsShuttingDown()) { | ||
| 382 | return; | ||
| 383 | } | ||
| 384 | |||
| 385 | ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0); | ||
| 386 | GetCurrentThreadPointer(kernel)->DisableDispatch(); | ||
| 387 | } | ||
| 388 | |||
| 389 | void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { | ||
| 390 | // If we are shutting down the kernel, none of this is relevant anymore. | ||
| 391 | if (kernel.IsShuttingDown()) { | ||
| 392 | return; | ||
| 393 | } | ||
| 394 | |||
| 395 | auto* current_thread = GetCurrentThreadPointer(kernel); | ||
| 396 | |||
| 397 | ASSERT(current_thread->GetDisableDispatchCount() >= 1); | ||
| 398 | |||
| 399 | if (current_thread->GetDisableDispatchCount() > 1) { | ||
| 400 | current_thread->EnableDispatch(); | ||
| 401 | } else { | ||
| 402 | RescheduleCores(kernel, cores_needing_scheduling); | ||
| 403 | } | ||
| 404 | |||
| 405 | // Special case to ensure dummy threads that are waiting block. | ||
| 406 | current_thread->IfDummyThreadTryWait(); | ||
| 407 | } | ||
| 408 | |||
| 409 | u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { | ||
| 410 | if (IsSchedulerUpdateNeeded(kernel)) { | ||
| 411 | return UpdateHighestPriorityThreadsImpl(kernel); | ||
| 412 | } else { | ||
| 413 | return 0; | ||
| 414 | } | ||
| 415 | } | ||
| 416 | |||
| 417 | KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) { | ||
| 418 | return kernel.GlobalSchedulerContext().priority_queue; | ||
| 419 | } | ||
| 420 | |||
| 421 | void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) { | 654 | void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) { |
| 422 | // Validate preconditions. | 655 | // Validate preconditions. |
| 423 | ASSERT(CanSchedule(kernel)); | 656 | ASSERT(CanSchedule(kernel)); |
| @@ -437,7 +670,7 @@ void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) { | |||
| 437 | 670 | ||
| 438 | // Perform the yield. | 671 | // Perform the yield. |
| 439 | { | 672 | { |
| 440 | KScopedSchedulerLock lock(kernel); | 673 | KScopedSchedulerLock sl{kernel}; |
| 441 | 674 | ||
| 442 | const auto cur_state = cur_thread.GetRawState(); | 675 | const auto cur_state = cur_thread.GetRawState(); |
| 443 | if (cur_state == ThreadState::Runnable) { | 676 | if (cur_state == ThreadState::Runnable) { |
| @@ -476,7 +709,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) { | |||
| 476 | 709 | ||
| 477 | // Perform the yield. | 710 | // Perform the yield. |
| 478 | { | 711 | { |
| 479 | KScopedSchedulerLock lock(kernel); | 712 | KScopedSchedulerLock sl{kernel}; |
| 480 | 713 | ||
| 481 | const auto cur_state = cur_thread.GetRawState(); | 714 | const auto cur_state = cur_thread.GetRawState(); |
| 482 | if (cur_state == ThreadState::Runnable) { | 715 | if (cur_state == ThreadState::Runnable) { |
| @@ -496,7 +729,7 @@ void KScheduler::YieldWithCoreMigration(KernelCore& kernel) { | |||
| 496 | 729 | ||
| 497 | if (KThread* running_on_suggested_core = | 730 | if (KThread* running_on_suggested_core = |
| 498 | (suggested_core >= 0) | 731 | (suggested_core >= 0) |
| 499 | ? kernel.Scheduler(suggested_core).state.highest_priority_thread | 732 | ? kernel.Scheduler(suggested_core).m_state.highest_priority_thread |
| 500 | : nullptr; | 733 | : nullptr; |
| 501 | running_on_suggested_core != suggested) { | 734 | running_on_suggested_core != suggested) { |
| 502 | // If the current thread's priority is higher than our suggestion's we prefer | 735 | // If the current thread's priority is higher than our suggestion's we prefer |
| @@ -564,7 +797,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) { | |||
| 564 | 797 | ||
| 565 | // Perform the yield. | 798 | // Perform the yield. |
| 566 | { | 799 | { |
| 567 | KScopedSchedulerLock lock(kernel); | 800 | KScopedSchedulerLock sl{kernel}; |
| 568 | 801 | ||
| 569 | const auto cur_state = cur_thread.GetRawState(); | 802 | const auto cur_state = cur_thread.GetRawState(); |
| 570 | if (cur_state == ThreadState::Runnable) { | 803 | if (cur_state == ThreadState::Runnable) { |
| @@ -621,223 +854,19 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) { | |||
| 621 | } | 854 | } |
| 622 | } | 855 | } |
| 623 | 856 | ||
| 624 | KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, core_id{core_id_} { | 857 | void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) { |
| 625 | switch_fiber = std::make_shared<Common::Fiber>([this] { SwitchToCurrent(); }); | 858 | if (const u64 core_mask = cores_needing_scheduling & ~(1ULL << m_core_id); core_mask != 0) { |
| 626 | state.needs_scheduling.store(true); | 859 | RescheduleCores(kernel, core_mask); |
| 627 | state.interrupt_task_thread_runnable = false; | ||
| 628 | state.should_count_idle = false; | ||
| 629 | state.idle_count = 0; | ||
| 630 | state.idle_thread_stack = nullptr; | ||
| 631 | state.highest_priority_thread = nullptr; | ||
| 632 | } | ||
| 633 | |||
| 634 | void KScheduler::Finalize() { | ||
| 635 | if (idle_thread) { | ||
| 636 | idle_thread->Close(); | ||
| 637 | idle_thread = nullptr; | ||
| 638 | } | ||
| 639 | } | ||
| 640 | |||
| 641 | KScheduler::~KScheduler() { | ||
| 642 | ASSERT(!idle_thread); | ||
| 643 | } | ||
| 644 | |||
| 645 | KThread* KScheduler::GetSchedulerCurrentThread() const { | ||
| 646 | if (auto result = current_thread.load(); result) { | ||
| 647 | return result; | ||
| 648 | } | 860 | } |
| 649 | return idle_thread; | ||
| 650 | } | ||
| 651 | |||
| 652 | u64 KScheduler::GetLastContextSwitchTicks() const { | ||
| 653 | return last_context_switch_time; | ||
| 654 | } | 861 | } |
| 655 | 862 | ||
| 656 | void KScheduler::RescheduleCurrentCore() { | 863 | void KScheduler::RescheduleCores(KernelCore& kernel, u64 core_mask) { |
| 657 | ASSERT(GetCurrentThread(system.Kernel()).GetDisableDispatchCount() == 1); | 864 | // Send IPI |
| 658 | 865 | for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | |
| 659 | auto& phys_core = system.Kernel().PhysicalCore(core_id); | 866 | if (core_mask & (1ULL << i)) { |
| 660 | if (phys_core.IsInterrupted()) { | 867 | kernel.PhysicalCore(i).Interrupt(); |
| 661 | phys_core.ClearInterrupt(); | ||
| 662 | } | ||
| 663 | |||
| 664 | guard.Lock(); | ||
| 665 | if (state.needs_scheduling.load()) { | ||
| 666 | Schedule(); | ||
| 667 | } else { | ||
| 668 | GetCurrentThread(system.Kernel()).EnableDispatch(); | ||
| 669 | guard.Unlock(); | ||
| 670 | } | ||
| 671 | } | ||
| 672 | |||
| 673 | void KScheduler::OnThreadStart() { | ||
| 674 | SwitchContextStep2(); | ||
| 675 | } | ||
| 676 | |||
| 677 | void KScheduler::Unload(KThread* thread) { | ||
| 678 | ASSERT(thread); | ||
| 679 | |||
| 680 | LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); | ||
| 681 | |||
| 682 | if (thread->IsCallingSvc()) { | ||
| 683 | thread->ClearIsCallingSvc(); | ||
| 684 | } | ||
| 685 | |||
| 686 | auto& physical_core = system.Kernel().PhysicalCore(core_id); | ||
| 687 | if (!physical_core.IsInitialized()) { | ||
| 688 | return; | ||
| 689 | } | ||
| 690 | |||
| 691 | Core::ARM_Interface& cpu_core = physical_core.ArmInterface(); | ||
| 692 | cpu_core.SaveContext(thread->GetContext32()); | ||
| 693 | cpu_core.SaveContext(thread->GetContext64()); | ||
| 694 | // Save the TPIDR_EL0 system register in case it was modified. | ||
| 695 | thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | ||
| 696 | cpu_core.ClearExclusiveState(); | ||
| 697 | |||
| 698 | if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) { | ||
| 699 | prev_thread = thread; | ||
| 700 | } else { | ||
| 701 | prev_thread = nullptr; | ||
| 702 | } | ||
| 703 | |||
| 704 | thread->context_guard.unlock(); | ||
| 705 | } | ||
| 706 | |||
| 707 | void KScheduler::Reload(KThread* thread) { | ||
| 708 | LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread->GetName()); | ||
| 709 | |||
| 710 | Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); | ||
| 711 | cpu_core.LoadContext(thread->GetContext32()); | ||
| 712 | cpu_core.LoadContext(thread->GetContext64()); | ||
| 713 | cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints()); | ||
| 714 | cpu_core.SetTlsAddress(thread->GetTLSAddress()); | ||
| 715 | cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); | ||
| 716 | cpu_core.ClearExclusiveState(); | ||
| 717 | } | ||
| 718 | |||
| 719 | void KScheduler::SwitchContextStep2() { | ||
| 720 | // Load context of new thread | ||
| 721 | Reload(GetCurrentThreadPointer(system.Kernel())); | ||
| 722 | |||
| 723 | RescheduleCurrentCore(); | ||
| 724 | } | ||
| 725 | |||
| 726 | void KScheduler::Schedule() { | ||
| 727 | ASSERT(GetCurrentThread(system.Kernel()).GetDisableDispatchCount() == 1); | ||
| 728 | this->ScheduleImpl(); | ||
| 729 | } | ||
| 730 | |||
| 731 | void KScheduler::ScheduleImpl() { | ||
| 732 | KThread* previous_thread = GetCurrentThreadPointer(system.Kernel()); | ||
| 733 | KThread* next_thread = state.highest_priority_thread; | ||
| 734 | |||
| 735 | state.needs_scheduling.store(false); | ||
| 736 | |||
| 737 | // We never want to schedule a null thread, so use the idle thread if we don't have a next. | ||
| 738 | if (next_thread == nullptr) { | ||
| 739 | next_thread = idle_thread; | ||
| 740 | } | ||
| 741 | |||
| 742 | if (next_thread->GetCurrentCore() != core_id) { | ||
| 743 | next_thread->SetCurrentCore(core_id); | ||
| 744 | } | ||
| 745 | |||
| 746 | // We never want to schedule a dummy thread, as these are only used by host threads for locking. | ||
| 747 | if (next_thread->GetThreadType() == ThreadType::Dummy) { | ||
| 748 | ASSERT_MSG(false, "Dummy threads should never be scheduled!"); | ||
| 749 | next_thread = idle_thread; | ||
| 750 | } | ||
| 751 | |||
| 752 | // If we're not actually switching thread, there's nothing to do. | ||
| 753 | if (next_thread == current_thread.load()) { | ||
| 754 | previous_thread->EnableDispatch(); | ||
| 755 | guard.Unlock(); | ||
| 756 | return; | ||
| 757 | } | ||
| 758 | |||
| 759 | // Update the CPU time tracking variables. | ||
| 760 | KProcess* const previous_process = system.Kernel().CurrentProcess(); | ||
| 761 | UpdateLastContextSwitchTime(previous_thread, previous_process); | ||
| 762 | |||
| 763 | // Save context for previous thread | ||
| 764 | Unload(previous_thread); | ||
| 765 | |||
| 766 | std::shared_ptr<Common::Fiber>* old_context; | ||
| 767 | old_context = &previous_thread->GetHostContext(); | ||
| 768 | |||
| 769 | // Set the new thread. | ||
| 770 | SetCurrentThread(system.Kernel(), next_thread); | ||
| 771 | current_thread.store(next_thread); | ||
| 772 | |||
| 773 | guard.Unlock(); | ||
| 774 | |||
| 775 | Common::Fiber::YieldTo(*old_context, *switch_fiber); | ||
| 776 | /// When a thread wakes up, the scheduler may have changed to other in another core. | ||
| 777 | auto& next_scheduler = *system.Kernel().CurrentScheduler(); | ||
| 778 | next_scheduler.SwitchContextStep2(); | ||
| 779 | } | ||
| 780 | |||
| 781 | void KScheduler::SwitchToCurrent() { | ||
| 782 | while (true) { | ||
| 783 | { | ||
| 784 | KScopedSpinLock lk{guard}; | ||
| 785 | current_thread.store(state.highest_priority_thread); | ||
| 786 | state.needs_scheduling.store(false); | ||
| 787 | } | 868 | } |
| 788 | const auto is_switch_pending = [this] { | ||
| 789 | KScopedSpinLock lk{guard}; | ||
| 790 | return state.needs_scheduling.load(); | ||
| 791 | }; | ||
| 792 | do { | ||
| 793 | auto next_thread = current_thread.load(); | ||
| 794 | if (next_thread != nullptr) { | ||
| 795 | const auto locked = next_thread->context_guard.try_lock(); | ||
| 796 | if (state.needs_scheduling.load()) { | ||
| 797 | next_thread->context_guard.unlock(); | ||
| 798 | break; | ||
| 799 | } | ||
| 800 | if (next_thread->GetActiveCore() != core_id) { | ||
| 801 | next_thread->context_guard.unlock(); | ||
| 802 | break; | ||
| 803 | } | ||
| 804 | if (!locked) { | ||
| 805 | continue; | ||
| 806 | } | ||
| 807 | } | ||
| 808 | auto thread = next_thread ? next_thread : idle_thread; | ||
| 809 | SetCurrentThread(system.Kernel(), thread); | ||
| 810 | Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext()); | ||
| 811 | } while (!is_switch_pending()); | ||
| 812 | } | 869 | } |
| 813 | } | 870 | } |
| 814 | 871 | ||
| 815 | void KScheduler::UpdateLastContextSwitchTime(KThread* thread, KProcess* process) { | ||
| 816 | const u64 prev_switch_ticks = last_context_switch_time; | ||
| 817 | const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks(); | ||
| 818 | const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; | ||
| 819 | |||
| 820 | if (thread != nullptr) { | ||
| 821 | thread->AddCpuTime(core_id, update_ticks); | ||
| 822 | } | ||
| 823 | |||
| 824 | if (process != nullptr) { | ||
| 825 | process->UpdateCPUTimeTicks(update_ticks); | ||
| 826 | } | ||
| 827 | |||
| 828 | last_context_switch_time = most_recent_switch_ticks; | ||
| 829 | } | ||
| 830 | |||
| 831 | void KScheduler::Initialize() { | ||
| 832 | idle_thread = KThread::Create(system.Kernel()); | ||
| 833 | ASSERT(KThread::InitializeIdleThread(system, idle_thread, core_id).IsSuccess()); | ||
| 834 | idle_thread->SetName(fmt::format("IdleThread:{}", core_id)); | ||
| 835 | idle_thread->EnableDispatch(); | ||
| 836 | } | ||
| 837 | |||
| 838 | KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel) | ||
| 839 | : KScopedLock(kernel.GlobalSchedulerContext().SchedulerLock()) {} | ||
| 840 | |||
| 841 | KScopedSchedulerLock::~KScopedSchedulerLock() = default; | ||
| 842 | |||
| 843 | } // namespace Kernel | 872 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 6a4760eca..8f4eebf6a 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include "core/hle/kernel/k_scheduler_lock.h" | 11 | #include "core/hle/kernel/k_scheduler_lock.h" |
| 12 | #include "core/hle/kernel/k_scoped_lock.h" | 12 | #include "core/hle/kernel/k_scoped_lock.h" |
| 13 | #include "core/hle/kernel/k_spin_lock.h" | 13 | #include "core/hle/kernel/k_spin_lock.h" |
| 14 | #include "core/hle/kernel/k_thread.h" | ||
| 14 | 15 | ||
| 15 | namespace Common { | 16 | namespace Common { |
| 16 | class Fiber; | 17 | class Fiber; |
| @@ -23,184 +24,139 @@ class System; | |||
| 23 | namespace Kernel { | 24 | namespace Kernel { |
| 24 | 25 | ||
| 25 | class KernelCore; | 26 | class KernelCore; |
| 27 | class KInterruptTaskManager; | ||
| 26 | class KProcess; | 28 | class KProcess; |
| 27 | class SchedulerLock; | ||
| 28 | class KThread; | 29 | class KThread; |
| 30 | class KScopedDisableDispatch; | ||
| 31 | class KScopedSchedulerLock; | ||
| 32 | class KScopedSchedulerLockAndSleep; | ||
| 29 | 33 | ||
| 30 | class KScheduler final { | 34 | class KScheduler final { |
| 31 | public: | 35 | public: |
| 32 | explicit KScheduler(Core::System& system_, s32 core_id_); | 36 | YUZU_NON_COPYABLE(KScheduler); |
| 33 | ~KScheduler(); | 37 | YUZU_NON_MOVEABLE(KScheduler); |
| 34 | |||
| 35 | void Finalize(); | ||
| 36 | 38 | ||
| 37 | /// Reschedules to the next available thread (call after current thread is suspended) | 39 | using LockType = KAbstractSchedulerLock<KScheduler>; |
| 38 | void RescheduleCurrentCore(); | ||
| 39 | 40 | ||
| 40 | /// Reschedules cores pending reschedule, to be called on EnableScheduling. | 41 | explicit KScheduler(KernelCore& kernel); |
| 41 | static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule); | 42 | ~KScheduler(); |
| 42 | |||
| 43 | /// The next two are for SingleCore Only. | ||
| 44 | /// Unload current thread before preempting core. | ||
| 45 | void Unload(KThread* thread); | ||
| 46 | 43 | ||
| 47 | /// Reload current thread after core preemption. | 44 | void Initialize(KThread* idle_thread); |
| 48 | void Reload(KThread* thread); | 45 | void Activate(); |
| 49 | 46 | ||
| 50 | /// Gets the current running thread | 47 | void SetInterruptTaskRunnable(); |
| 51 | [[nodiscard]] KThread* GetSchedulerCurrentThread() const; | 48 | void RequestScheduleOnInterrupt(); |
| 52 | 49 | ||
| 53 | /// Gets the idle thread | 50 | u64 GetIdleCount() { |
| 54 | [[nodiscard]] KThread* GetIdleThread() const { | 51 | return m_state.idle_count; |
| 55 | return idle_thread; | ||
| 56 | } | 52 | } |
| 57 | 53 | ||
| 58 | /// Returns true if the scheduler is idle | 54 | KThread* GetIdleThread() const { |
| 59 | [[nodiscard]] bool IsIdle() const { | 55 | return m_idle_thread; |
| 60 | return GetSchedulerCurrentThread() == idle_thread; | ||
| 61 | } | 56 | } |
| 62 | 57 | ||
| 63 | /// Gets the timestamp for the last context switch in ticks. | 58 | KThread* GetPreviousThread() const { |
| 64 | [[nodiscard]] u64 GetLastContextSwitchTicks() const; | 59 | return m_state.prev_thread; |
| 65 | |||
| 66 | [[nodiscard]] bool ContextSwitchPending() const { | ||
| 67 | return state.needs_scheduling.load(std::memory_order_relaxed); | ||
| 68 | } | 60 | } |
| 69 | 61 | ||
| 70 | void Initialize(); | 62 | KThread* GetSchedulerCurrentThread() const { |
| 71 | 63 | return m_current_thread.load(); | |
| 72 | void OnThreadStart(); | ||
| 73 | |||
| 74 | [[nodiscard]] std::shared_ptr<Common::Fiber>& ControlContext() { | ||
| 75 | return switch_fiber; | ||
| 76 | } | 64 | } |
| 77 | 65 | ||
| 78 | [[nodiscard]] const std::shared_ptr<Common::Fiber>& ControlContext() const { | 66 | s64 GetLastContextSwitchTime() const { |
| 79 | return switch_fiber; | 67 | return m_last_context_switch_time; |
| 80 | } | 68 | } |
| 81 | 69 | ||
| 82 | [[nodiscard]] u64 UpdateHighestPriorityThread(KThread* highest_thread); | 70 | // Static public API. |
| 71 | static bool CanSchedule(KernelCore& kernel) { | ||
| 72 | return kernel.GetCurrentEmuThread()->GetDisableDispatchCount() == 0; | ||
| 73 | } | ||
| 74 | static bool IsSchedulerLockedByCurrentThread(KernelCore& kernel) { | ||
| 75 | return kernel.GlobalSchedulerContext().scheduler_lock.IsLockedByCurrentThread(); | ||
| 76 | } | ||
| 83 | 77 | ||
| 84 | /** | 78 | static bool IsSchedulerUpdateNeeded(KernelCore& kernel) { |
| 85 | * Takes a thread and moves it to the back of the it's priority list. | 79 | return kernel.GlobalSchedulerContext().scheduler_update_needed; |
| 86 | * | 80 | } |
| 87 | * @note This operation can be redundant and no scheduling is changed if marked as so. | 81 | static void SetSchedulerUpdateNeeded(KernelCore& kernel) { |
| 88 | */ | 82 | kernel.GlobalSchedulerContext().scheduler_update_needed = true; |
| 89 | static void YieldWithoutCoreMigration(KernelCore& kernel); | 83 | } |
| 84 | static void ClearSchedulerUpdateNeeded(KernelCore& kernel) { | ||
| 85 | kernel.GlobalSchedulerContext().scheduler_update_needed = false; | ||
| 86 | } | ||
| 90 | 87 | ||
| 91 | /** | 88 | static void DisableScheduling(KernelCore& kernel); |
| 92 | * Takes a thread and moves it to the back of the it's priority list. | 89 | static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling); |
| 93 | * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or | ||
| 94 | * a better priority than the next thread in the core. | ||
| 95 | * | ||
| 96 | * @note This operation can be redundant and no scheduling is changed if marked as so. | ||
| 97 | */ | ||
| 98 | static void YieldWithCoreMigration(KernelCore& kernel); | ||
| 99 | 90 | ||
| 100 | /** | 91 | static u64 UpdateHighestPriorityThreads(KernelCore& kernel); |
| 101 | * Takes a thread and moves it out of the scheduling queue. | ||
| 102 | * and into the suggested queue. If no thread can be scheduled afterwards in that core, | ||
| 103 | * a suggested thread is obtained instead. | ||
| 104 | * | ||
| 105 | * @note This operation can be redundant and no scheduling is changed if marked as so. | ||
| 106 | */ | ||
| 107 | static void YieldToAnyThread(KernelCore& kernel); | ||
| 108 | 92 | ||
| 109 | static void ClearPreviousThread(KernelCore& kernel, KThread* thread); | 93 | static void ClearPreviousThread(KernelCore& kernel, KThread* thread); |
| 110 | 94 | ||
| 111 | /// Notify the scheduler a thread's status has changed. | ||
| 112 | static void OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state); | 95 | static void OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state); |
| 113 | |||
| 114 | /// Notify the scheduler a thread's priority has changed. | ||
| 115 | static void OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority); | 96 | static void OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority); |
| 116 | |||
| 117 | /// Notify the scheduler a thread's core and/or affinity mask has changed. | ||
| 118 | static void OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread, | 97 | static void OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread, |
| 119 | const KAffinityMask& old_affinity, s32 old_core); | 98 | const KAffinityMask& old_affinity, s32 old_core); |
| 120 | 99 | ||
| 121 | static bool CanSchedule(KernelCore& kernel); | 100 | static void RotateScheduledQueue(KernelCore& kernel, s32 core_id, s32 priority); |
| 122 | static bool IsSchedulerUpdateNeeded(const KernelCore& kernel); | 101 | static void RescheduleCores(KernelCore& kernel, u64 cores_needing_scheduling); |
| 123 | static void SetSchedulerUpdateNeeded(KernelCore& kernel); | ||
| 124 | static void ClearSchedulerUpdateNeeded(KernelCore& kernel); | ||
| 125 | static void DisableScheduling(KernelCore& kernel); | ||
| 126 | static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling); | ||
| 127 | [[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel); | ||
| 128 | 102 | ||
| 129 | private: | 103 | static void YieldWithoutCoreMigration(KernelCore& kernel); |
| 130 | friend class GlobalSchedulerContext; | 104 | static void YieldWithCoreMigration(KernelCore& kernel); |
| 131 | 105 | static void YieldToAnyThread(KernelCore& kernel); | |
| 132 | /** | ||
| 133 | * Takes care of selecting the new scheduled threads in three steps: | ||
| 134 | * | ||
| 135 | * 1. First a thread is selected from the top of the priority queue. If no thread | ||
| 136 | * is obtained then we move to step two, else we are done. | ||
| 137 | * | ||
| 138 | * 2. Second we try to get a suggested thread that's not assigned to any core or | ||
| 139 | * that is not the top thread in that core. | ||
| 140 | * | ||
| 141 | * 3. Third is no suggested thread is found, we do a second pass and pick a running | ||
| 142 | * thread in another core and swap it with its current thread. | ||
| 143 | * | ||
| 144 | * returns the cores needing scheduling. | ||
| 145 | */ | ||
| 146 | [[nodiscard]] static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); | ||
| 147 | |||
| 148 | [[nodiscard]] static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel); | ||
| 149 | |||
| 150 | void RotateScheduledQueue(s32 cpu_core_id, s32 priority); | ||
| 151 | 106 | ||
| 152 | void Schedule(); | 107 | private: |
| 108 | // Static private API. | ||
| 109 | static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel) { | ||
| 110 | return kernel.GlobalSchedulerContext().priority_queue; | ||
| 111 | } | ||
| 112 | static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); | ||
| 153 | 113 | ||
| 154 | /// Switches the CPU's active thread context to that of the specified thread | 114 | // Instanced private API. |
| 155 | void ScheduleImpl(); | 115 | void ScheduleImpl(); |
| 116 | void ScheduleImplOffStack(); | ||
| 117 | void SwitchThread(KThread* next_thread); | ||
| 156 | 118 | ||
| 157 | /// When a thread wakes up, it must run this through it's new scheduler | 119 | void Schedule(); |
| 158 | void SwitchContextStep2(); | 120 | void ScheduleOnInterrupt(); |
| 159 | |||
| 160 | /** | ||
| 161 | * Called on every context switch to update the internal timestamp | ||
| 162 | * This also updates the running time ticks for the given thread and | ||
| 163 | * process using the following difference: | ||
| 164 | * | ||
| 165 | * ticks += most_recent_ticks - last_context_switch_ticks | ||
| 166 | * | ||
| 167 | * The internal tick timestamp for the scheduler is simply the | ||
| 168 | * most recent tick count retrieved. No special arithmetic is | ||
| 169 | * applied to it. | ||
| 170 | */ | ||
| 171 | void UpdateLastContextSwitchTime(KThread* thread, KProcess* process); | ||
| 172 | |||
| 173 | void SwitchToCurrent(); | ||
| 174 | 121 | ||
| 175 | KThread* prev_thread{}; | 122 | void RescheduleOtherCores(u64 cores_needing_scheduling); |
| 176 | std::atomic<KThread*> current_thread{}; | 123 | void RescheduleCurrentCore(); |
| 124 | void RescheduleCurrentCoreImpl(); | ||
| 177 | 125 | ||
| 178 | KThread* idle_thread{}; | 126 | u64 UpdateHighestPriorityThread(KThread* thread); |
| 179 | 127 | ||
| 180 | std::shared_ptr<Common::Fiber> switch_fiber{}; | 128 | private: |
| 129 | friend class KScopedDisableDispatch; | ||
| 181 | 130 | ||
| 182 | struct SchedulingState { | 131 | struct SchedulingState { |
| 183 | std::atomic<bool> needs_scheduling{}; | 132 | std::atomic<bool> needs_scheduling{false}; |
| 184 | bool interrupt_task_thread_runnable{}; | 133 | bool interrupt_task_runnable{false}; |
| 185 | bool should_count_idle{}; | 134 | bool should_count_idle{false}; |
| 186 | u64 idle_count{}; | 135 | u64 idle_count{0}; |
| 187 | KThread* highest_priority_thread{}; | 136 | KThread* highest_priority_thread{nullptr}; |
| 188 | void* idle_thread_stack{}; | 137 | void* idle_thread_stack{nullptr}; |
| 138 | std::atomic<KThread*> prev_thread{nullptr}; | ||
| 139 | KInterruptTaskManager* interrupt_task_manager{nullptr}; | ||
| 189 | }; | 140 | }; |
| 190 | 141 | ||
| 191 | SchedulingState state; | 142 | KernelCore& kernel; |
| 192 | 143 | SchedulingState m_state; | |
| 193 | Core::System& system; | 144 | bool m_is_active{false}; |
| 194 | u64 last_context_switch_time{}; | 145 | s32 m_core_id{0}; |
| 195 | const s32 core_id; | 146 | s64 m_last_context_switch_time{0}; |
| 196 | 147 | KThread* m_idle_thread{nullptr}; | |
| 197 | KSpinLock guard{}; | 148 | std::atomic<KThread*> m_current_thread{nullptr}; |
| 149 | |||
| 150 | std::shared_ptr<Common::Fiber> m_idle_stack{}; | ||
| 151 | KThread* m_idle_cur_thread{}; | ||
| 152 | KThread* m_idle_highest_priority_thread{}; | ||
| 198 | }; | 153 | }; |
| 199 | 154 | ||
| 200 | class [[nodiscard]] KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> { | 155 | class KScopedSchedulerLock : public KScopedLock<KScheduler::LockType> { |
| 201 | public: | 156 | public: |
| 202 | explicit KScopedSchedulerLock(KernelCore& kernel); | 157 | explicit KScopedSchedulerLock(KernelCore& kernel) |
| 203 | ~KScopedSchedulerLock(); | 158 | : KScopedLock(kernel.GlobalSchedulerContext().scheduler_lock) {} |
| 159 | ~KScopedSchedulerLock() = default; | ||
| 204 | }; | 160 | }; |
| 205 | 161 | ||
| 206 | } // namespace Kernel | 162 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index 4fa256970..73314b45e 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h | |||
| @@ -5,9 +5,11 @@ | |||
| 5 | 5 | ||
| 6 | #include <atomic> | 6 | #include <atomic> |
| 7 | #include "common/assert.h" | 7 | #include "common/assert.h" |
| 8 | #include "core/hle/kernel/k_interrupt_manager.h" | ||
| 8 | #include "core/hle/kernel/k_spin_lock.h" | 9 | #include "core/hle/kernel/k_spin_lock.h" |
| 9 | #include "core/hle/kernel/k_thread.h" | 10 | #include "core/hle/kernel/k_thread.h" |
| 10 | #include "core/hle/kernel/kernel.h" | 11 | #include "core/hle/kernel/kernel.h" |
| 12 | #include "core/hle/kernel/physical_core.h" | ||
| 11 | 13 | ||
| 12 | namespace Kernel { | 14 | namespace Kernel { |
| 13 | 15 | ||
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 90de86770..9daa589b5 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp | |||
| @@ -261,9 +261,14 @@ Result KThread::InitializeDummyThread(KThread* thread) { | |||
| 261 | return thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy); | 261 | return thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy); |
| 262 | } | 262 | } |
| 263 | 263 | ||
| 264 | Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) { | ||
| 265 | return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, | ||
| 266 | system.GetCpuManager().GetGuestActivateFunc()); | ||
| 267 | } | ||
| 268 | |||
| 264 | Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { | 269 | Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { |
| 265 | return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, | 270 | return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, |
| 266 | system.GetCpuManager().GetIdleThreadStartFunc()); | 271 | abort); |
| 267 | } | 272 | } |
| 268 | 273 | ||
| 269 | Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, | 274 | Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, |
| @@ -277,7 +282,7 @@ Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThr | |||
| 277 | KProcess* owner) { | 282 | KProcess* owner) { |
| 278 | system.Kernel().GlobalSchedulerContext().AddThread(thread); | 283 | system.Kernel().GlobalSchedulerContext().AddThread(thread); |
| 279 | return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, | 284 | return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, |
| 280 | ThreadType::User, system.GetCpuManager().GetGuestThreadStartFunc()); | 285 | ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()); |
| 281 | } | 286 | } |
| 282 | 287 | ||
| 283 | void KThread::PostDestroy(uintptr_t arg) { | 288 | void KThread::PostDestroy(uintptr_t arg) { |
| @@ -1058,6 +1063,8 @@ void KThread::Exit() { | |||
| 1058 | // Register the thread as a work task. | 1063 | // Register the thread as a work task. |
| 1059 | KWorkerTaskManager::AddTask(kernel, KWorkerTaskManager::WorkerType::Exit, this); | 1064 | KWorkerTaskManager::AddTask(kernel, KWorkerTaskManager::WorkerType::Exit, this); |
| 1060 | } | 1065 | } |
| 1066 | |||
| 1067 | UNREACHABLE_MSG("KThread::Exit() would return"); | ||
| 1061 | } | 1068 | } |
| 1062 | 1069 | ||
| 1063 | Result KThread::Sleep(s64 timeout) { | 1070 | Result KThread::Sleep(s64 timeout) { |
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index 28cd7ecb0..416a861a9 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h | |||
| @@ -110,6 +110,7 @@ void SetCurrentThread(KernelCore& kernel, KThread* thread); | |||
| 110 | [[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel); | 110 | [[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel); |
| 111 | [[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel); | 111 | [[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel); |
| 112 | [[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel); | 112 | [[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel); |
| 113 | size_t CaptureBacktrace(void** buffer, size_t max); | ||
| 113 | 114 | ||
| 114 | class KThread final : public KAutoObjectWithSlabHeapAndContainer<KThread, KWorkerTask>, | 115 | class KThread final : public KAutoObjectWithSlabHeapAndContainer<KThread, KWorkerTask>, |
| 115 | public boost::intrusive::list_base_hook<> { | 116 | public boost::intrusive::list_base_hook<> { |
| @@ -413,6 +414,9 @@ public: | |||
| 413 | 414 | ||
| 414 | [[nodiscard]] static Result InitializeDummyThread(KThread* thread); | 415 | [[nodiscard]] static Result InitializeDummyThread(KThread* thread); |
| 415 | 416 | ||
| 417 | [[nodiscard]] static Result InitializeMainThread(Core::System& system, KThread* thread, | ||
| 418 | s32 virt_core); | ||
| 419 | |||
| 416 | [[nodiscard]] static Result InitializeIdleThread(Core::System& system, KThread* thread, | 420 | [[nodiscard]] static Result InitializeIdleThread(Core::System& system, KThread* thread, |
| 417 | s32 virt_core); | 421 | s32 virt_core); |
| 418 | 422 | ||
| @@ -435,6 +439,7 @@ public: | |||
| 435 | bool is_pinned; | 439 | bool is_pinned; |
| 436 | s32 disable_count; | 440 | s32 disable_count; |
| 437 | KThread* cur_thread; | 441 | KThread* cur_thread; |
| 442 | std::atomic<bool> m_lock; | ||
| 438 | }; | 443 | }; |
| 439 | 444 | ||
| 440 | [[nodiscard]] StackParameters& GetStackParameters() { | 445 | [[nodiscard]] StackParameters& GetStackParameters() { |
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 7307cf262..10e1f47f6 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -75,7 +75,6 @@ struct KernelCore::Impl { | |||
| 75 | InitializeSystemResourceLimit(kernel, system.CoreTiming()); | 75 | InitializeSystemResourceLimit(kernel, system.CoreTiming()); |
| 76 | InitializeMemoryLayout(); | 76 | InitializeMemoryLayout(); |
| 77 | Init::InitializeKPageBufferSlabHeap(system); | 77 | Init::InitializeKPageBufferSlabHeap(system); |
| 78 | InitializeSchedulers(); | ||
| 79 | InitializeShutdownThreads(); | 78 | InitializeShutdownThreads(); |
| 80 | InitializePreemption(kernel); | 79 | InitializePreemption(kernel); |
| 81 | 80 | ||
| @@ -148,7 +147,6 @@ struct KernelCore::Impl { | |||
| 148 | shutdown_threads[core_id] = nullptr; | 147 | shutdown_threads[core_id] = nullptr; |
| 149 | } | 148 | } |
| 150 | 149 | ||
| 151 | schedulers[core_id]->Finalize(); | ||
| 152 | schedulers[core_id].reset(); | 150 | schedulers[core_id].reset(); |
| 153 | } | 151 | } |
| 154 | 152 | ||
| @@ -195,17 +193,11 @@ struct KernelCore::Impl { | |||
| 195 | exclusive_monitor = | 193 | exclusive_monitor = |
| 196 | Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); | 194 | Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); |
| 197 | for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | 195 | for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { |
| 198 | schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i); | 196 | schedulers[i] = std::make_unique<Kernel::KScheduler>(system.Kernel()); |
| 199 | cores.emplace_back(i, system, *schedulers[i], interrupts); | 197 | cores.emplace_back(i, system, *schedulers[i], interrupts); |
| 200 | } | 198 | } |
| 201 | } | 199 | } |
| 202 | 200 | ||
| 203 | void InitializeSchedulers() { | ||
| 204 | for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | ||
| 205 | cores[i].Scheduler().Initialize(); | ||
| 206 | } | ||
| 207 | } | ||
| 208 | |||
| 209 | // Creates the default system resource limit | 201 | // Creates the default system resource limit |
| 210 | void InitializeSystemResourceLimit(KernelCore& kernel, | 202 | void InitializeSystemResourceLimit(KernelCore& kernel, |
| 211 | const Core::Timing::CoreTiming& core_timing) { | 203 | const Core::Timing::CoreTiming& core_timing) { |
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 8655506b0..27e5a805d 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -887,7 +887,7 @@ static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle han | |||
| 887 | const auto* const current_thread = GetCurrentThreadPointer(system.Kernel()); | 887 | const auto* const current_thread = GetCurrentThreadPointer(system.Kernel()); |
| 888 | const bool same_thread = current_thread == thread.GetPointerUnsafe(); | 888 | const bool same_thread = current_thread == thread.GetPointerUnsafe(); |
| 889 | 889 | ||
| 890 | const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks(); | 890 | const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTime(); |
| 891 | u64 out_ticks = 0; | 891 | u64 out_ticks = 0; |
| 892 | if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) { | 892 | if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) { |
| 893 | const u64 thread_ticks = current_thread->GetCpuTime(); | 893 | const u64 thread_ticks = current_thread->GetCpuTime(); |
| @@ -3026,11 +3026,6 @@ void Call(Core::System& system, u32 immediate) { | |||
| 3026 | } | 3026 | } |
| 3027 | 3027 | ||
| 3028 | kernel.ExitSVCProfile(); | 3028 | kernel.ExitSVCProfile(); |
| 3029 | |||
| 3030 | if (!thread->IsCallingSvc()) { | ||
| 3031 | auto* host_context = thread->GetHostContext().get(); | ||
| 3032 | host_context->Rewind(); | ||
| 3033 | } | ||
| 3034 | } | 3029 | } |
| 3035 | 3030 | ||
| 3036 | } // namespace Kernel::Svc | 3031 | } // namespace Kernel::Svc |