diff options
| author | 2022-07-05 23:27:25 -0400 | |
|---|---|---|
| committer | 2022-07-14 22:47:18 -0400 | |
| commit | 21945ae127480c8332c1110ceada2df4a42a5848 (patch) | |
| tree | a385c64a14b0d8e8dd71410eaa47575462f8f368 | |
| parent | kernel: use KScheduler from mesosphere (diff) | |
| download | yuzu-21945ae127480c8332c1110ceada2df4a42a5848.tar.gz yuzu-21945ae127480c8332c1110ceada2df4a42a5848.tar.xz yuzu-21945ae127480c8332c1110ceada2df4a42a5848.zip | |
kernel: fix issues with single core mode
Diffstat (limited to '')
| -rw-r--r-- | src/core/cpu_manager.cpp | 152 | ||||
| -rw-r--r-- | src/core/cpu_manager.h | 11 | ||||
| -rw-r--r-- | src/core/hle/kernel/global_scheduler_context.cpp | 5 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 173 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.h | 24 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_thread.cpp | 5 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_thread.h | 24 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 19 | ||||
| -rw-r--r-- | src/core/hle/kernel/physical_core.cpp | 1 |
9 files changed, 225 insertions, 189 deletions
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp index 428194129..838d6be21 100644 --- a/src/core/cpu_manager.cpp +++ b/src/core/cpu_manager.cpp | |||
| @@ -42,19 +42,19 @@ void CpuManager::Shutdown() { | |||
| 42 | } | 42 | } |
| 43 | } | 43 | } |
| 44 | 44 | ||
| 45 | void CpuManager::GuestActivateFunction() { | 45 | void CpuManager::GuestThreadFunction() { |
| 46 | if (is_multicore) { | 46 | if (is_multicore) { |
| 47 | MultiCoreGuestActivate(); | 47 | MultiCoreRunGuestThread(); |
| 48 | } else { | 48 | } else { |
| 49 | SingleCoreGuestActivate(); | 49 | SingleCoreRunGuestThread(); |
| 50 | } | 50 | } |
| 51 | } | 51 | } |
| 52 | 52 | ||
| 53 | void CpuManager::GuestThreadFunction() { | 53 | void CpuManager::IdleThreadFunction() { |
| 54 | if (is_multicore) { | 54 | if (is_multicore) { |
| 55 | MultiCoreRunGuestThread(); | 55 | MultiCoreRunIdleThread(); |
| 56 | } else { | 56 | } else { |
| 57 | SingleCoreRunGuestThread(); | 57 | SingleCoreRunIdleThread(); |
| 58 | } | 58 | } |
| 59 | } | 59 | } |
| 60 | 60 | ||
| @@ -62,19 +62,6 @@ void CpuManager::ShutdownThreadFunction() { | |||
| 62 | ShutdownThread(); | 62 | ShutdownThread(); |
| 63 | } | 63 | } |
| 64 | 64 | ||
| 65 | void CpuManager::WaitForAndHandleInterrupt() { | ||
| 66 | auto& kernel = system.Kernel(); | ||
| 67 | auto& physical_core = kernel.CurrentPhysicalCore(); | ||
| 68 | |||
| 69 | ASSERT(Kernel::GetCurrentThread(kernel).GetDisableDispatchCount() == 1); | ||
| 70 | |||
| 71 | if (!physical_core.IsInterrupted()) { | ||
| 72 | physical_core.Idle(); | ||
| 73 | } | ||
| 74 | |||
| 75 | HandleInterrupt(); | ||
| 76 | } | ||
| 77 | |||
| 78 | void CpuManager::HandleInterrupt() { | 65 | void CpuManager::HandleInterrupt() { |
| 79 | auto& kernel = system.Kernel(); | 66 | auto& kernel = system.Kernel(); |
| 80 | auto core_index = kernel.CurrentPhysicalCoreIndex(); | 67 | auto core_index = kernel.CurrentPhysicalCoreIndex(); |
| @@ -86,49 +73,121 @@ void CpuManager::HandleInterrupt() { | |||
| 86 | /// MultiCore /// | 73 | /// MultiCore /// |
| 87 | /////////////////////////////////////////////////////////////////////////////// | 74 | /////////////////////////////////////////////////////////////////////////////// |
| 88 | 75 | ||
| 89 | void CpuManager::MultiCoreGuestActivate() { | 76 | void CpuManager::MultiCoreRunGuestThread() { |
| 90 | // Similar to the HorizonKernelMain callback in HOS | 77 | // Similar to UserModeThreadStarter in HOS |
| 91 | auto& kernel = system.Kernel(); | 78 | auto& kernel = system.Kernel(); |
| 92 | auto* scheduler = kernel.CurrentScheduler(); | 79 | kernel.CurrentScheduler()->OnThreadStart(); |
| 93 | 80 | ||
| 94 | scheduler->Activate(); | 81 | while (true) { |
| 95 | UNREACHABLE(); | 82 | auto* physical_core = &kernel.CurrentPhysicalCore(); |
| 83 | while (!physical_core->IsInterrupted()) { | ||
| 84 | physical_core->Run(); | ||
| 85 | physical_core = &kernel.CurrentPhysicalCore(); | ||
| 86 | } | ||
| 87 | |||
| 88 | HandleInterrupt(); | ||
| 89 | } | ||
| 96 | } | 90 | } |
| 97 | 91 | ||
| 98 | void CpuManager::MultiCoreRunGuestThread() { | 92 | void CpuManager::MultiCoreRunIdleThread() { |
| 99 | // Similar to UserModeThreadStarter in HOS | 93 | // Not accurate to HOS. Remove this entire method when singlecore is removed. |
| 94 | // See notes in KScheduler::ScheduleImpl for more information about why this | ||
| 95 | // is inaccurate. | ||
| 96 | |||
| 100 | auto& kernel = system.Kernel(); | 97 | auto& kernel = system.Kernel(); |
| 101 | auto* thread = kernel.GetCurrentEmuThread(); | 98 | kernel.CurrentScheduler()->OnThreadStart(); |
| 102 | thread->EnableDispatch(); | 99 | |
| 100 | while (true) { | ||
| 101 | auto& physical_core = kernel.CurrentPhysicalCore(); | ||
| 102 | if (!physical_core.IsInterrupted()) { | ||
| 103 | physical_core.Idle(); | ||
| 104 | } | ||
| 103 | 105 | ||
| 104 | MultiCoreRunGuestLoop(); | 106 | HandleInterrupt(); |
| 107 | } | ||
| 105 | } | 108 | } |
| 106 | 109 | ||
| 107 | void CpuManager::MultiCoreRunGuestLoop() { | 110 | /////////////////////////////////////////////////////////////////////////////// |
| 111 | /// SingleCore /// | ||
| 112 | /////////////////////////////////////////////////////////////////////////////// | ||
| 113 | |||
| 114 | void CpuManager::SingleCoreRunGuestThread() { | ||
| 108 | auto& kernel = system.Kernel(); | 115 | auto& kernel = system.Kernel(); |
| 116 | kernel.CurrentScheduler()->OnThreadStart(); | ||
| 109 | 117 | ||
| 110 | while (true) { | 118 | while (true) { |
| 111 | auto* physical_core = &kernel.CurrentPhysicalCore(); | 119 | auto* physical_core = &kernel.CurrentPhysicalCore(); |
| 112 | while (!physical_core->IsInterrupted()) { | 120 | if (!physical_core->IsInterrupted()) { |
| 113 | physical_core->Run(); | 121 | physical_core->Run(); |
| 114 | physical_core = &kernel.CurrentPhysicalCore(); | 122 | physical_core = &kernel.CurrentPhysicalCore(); |
| 115 | } | 123 | } |
| 116 | 124 | ||
| 125 | kernel.SetIsPhantomModeForSingleCore(true); | ||
| 126 | system.CoreTiming().Advance(); | ||
| 127 | kernel.SetIsPhantomModeForSingleCore(false); | ||
| 128 | |||
| 129 | PreemptSingleCore(); | ||
| 117 | HandleInterrupt(); | 130 | HandleInterrupt(); |
| 118 | } | 131 | } |
| 119 | } | 132 | } |
| 120 | 133 | ||
| 121 | /////////////////////////////////////////////////////////////////////////////// | 134 | void CpuManager::SingleCoreRunIdleThread() { |
| 122 | /// SingleCore /// | 135 | auto& kernel = system.Kernel(); |
| 123 | /////////////////////////////////////////////////////////////////////////////// | 136 | kernel.CurrentScheduler()->OnThreadStart(); |
| 137 | |||
| 138 | while (true) { | ||
| 139 | PreemptSingleCore(false); | ||
| 140 | system.CoreTiming().AddTicks(1000U); | ||
| 141 | idle_count++; | ||
| 142 | HandleInterrupt(); | ||
| 143 | } | ||
| 144 | } | ||
| 124 | 145 | ||
| 125 | void CpuManager::SingleCoreGuestActivate() {} | 146 | void CpuManager::PreemptSingleCore(bool from_running_environment) { |
| 147 | { | ||
| 148 | auto& kernel = system.Kernel(); | ||
| 149 | auto& scheduler = kernel.Scheduler(current_core); | ||
| 150 | |||
| 151 | Kernel::KThread* current_thread = scheduler.GetSchedulerCurrentThread(); | ||
| 152 | if (idle_count >= 4 || from_running_environment) { | ||
| 153 | if (!from_running_environment) { | ||
| 154 | system.CoreTiming().Idle(); | ||
| 155 | idle_count = 0; | ||
| 156 | } | ||
| 157 | kernel.SetIsPhantomModeForSingleCore(true); | ||
| 158 | system.CoreTiming().Advance(); | ||
| 159 | kernel.SetIsPhantomModeForSingleCore(false); | ||
| 160 | } | ||
| 161 | current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES); | ||
| 162 | system.CoreTiming().ResetTicks(); | ||
| 163 | scheduler.Unload(scheduler.GetSchedulerCurrentThread()); | ||
| 126 | 164 | ||
| 127 | void CpuManager::SingleCoreRunGuestThread() {} | 165 | auto& next_scheduler = kernel.Scheduler(current_core); |
| 128 | 166 | ||
| 129 | void CpuManager::SingleCoreRunGuestLoop() {} | 167 | // Disable dispatch. We're about to preempt this thread. |
| 168 | Kernel::KScopedDisableDispatch dd{kernel}; | ||
| 169 | Common::Fiber::YieldTo(current_thread->GetHostContext(), *next_scheduler.GetSwitchFiber()); | ||
| 170 | } | ||
| 130 | 171 | ||
| 131 | void CpuManager::PreemptSingleCore(bool from_running_enviroment) {} | 172 | // We've now been scheduled again, and we may have exchanged schedulers. |
| 173 | // Reload the scheduler in case it's different. | ||
| 174 | { | ||
| 175 | auto& scheduler = system.Kernel().Scheduler(current_core); | ||
| 176 | scheduler.Reload(scheduler.GetSchedulerCurrentThread()); | ||
| 177 | if (!scheduler.IsIdle()) { | ||
| 178 | idle_count = 0; | ||
| 179 | } | ||
| 180 | } | ||
| 181 | } | ||
| 182 | |||
| 183 | void CpuManager::GuestActivate() { | ||
| 184 | // Similar to the HorizonKernelMain callback in HOS | ||
| 185 | auto& kernel = system.Kernel(); | ||
| 186 | auto* scheduler = kernel.CurrentScheduler(); | ||
| 187 | |||
| 188 | scheduler->Activate(); | ||
| 189 | UNREACHABLE(); | ||
| 190 | } | ||
| 132 | 191 | ||
| 133 | void CpuManager::ShutdownThread() { | 192 | void CpuManager::ShutdownThread() { |
| 134 | auto& kernel = system.Kernel(); | 193 | auto& kernel = system.Kernel(); |
| @@ -168,20 +227,11 @@ void CpuManager::RunThread(std::size_t core) { | |||
| 168 | } | 227 | } |
| 169 | 228 | ||
| 170 | auto& kernel = system.Kernel(); | 229 | auto& kernel = system.Kernel(); |
| 230 | auto& scheduler = *kernel.CurrentScheduler(); | ||
| 231 | auto* thread = scheduler.GetSchedulerCurrentThread(); | ||
| 232 | Kernel::SetCurrentThread(kernel, thread); | ||
| 171 | 233 | ||
| 172 | auto* main_thread = Kernel::KThread::Create(kernel); | 234 | Common::Fiber::YieldTo(data.host_context, *thread->GetHostContext()); |
| 173 | main_thread->SetName(fmt::format("MainThread:{}", core)); | ||
| 174 | ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, static_cast<s32>(core)) | ||
| 175 | .IsSuccess()); | ||
| 176 | |||
| 177 | auto* idle_thread = Kernel::KThread::Create(kernel); | ||
| 178 | ASSERT(Kernel::KThread::InitializeIdleThread(system, idle_thread, static_cast<s32>(core)) | ||
| 179 | .IsSuccess()); | ||
| 180 | |||
| 181 | kernel.SetCurrentEmuThread(main_thread); | ||
| 182 | kernel.CurrentScheduler()->Initialize(idle_thread); | ||
| 183 | |||
| 184 | Common::Fiber::YieldTo(data.host_context, *main_thread->GetHostContext()); | ||
| 185 | } | 235 | } |
| 186 | 236 | ||
| 187 | } // namespace Core | 237 | } // namespace Core |
diff --git a/src/core/cpu_manager.h b/src/core/cpu_manager.h index 8143424af..835505b92 100644 --- a/src/core/cpu_manager.h +++ b/src/core/cpu_manager.h | |||
| @@ -48,12 +48,11 @@ public: | |||
| 48 | gpu_barrier->Sync(); | 48 | gpu_barrier->Sync(); |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | void WaitForAndHandleInterrupt(); | ||
| 52 | void Initialize(); | 51 | void Initialize(); |
| 53 | void Shutdown(); | 52 | void Shutdown(); |
| 54 | 53 | ||
| 55 | std::function<void()> GetGuestActivateFunc() { | 54 | std::function<void()> GetGuestActivateFunc() { |
| 56 | return [this] { GuestActivateFunction(); }; | 55 | return [this] { GuestActivate(); }; |
| 57 | } | 56 | } |
| 58 | std::function<void()> GetGuestThreadFunc() { | 57 | std::function<void()> GetGuestThreadFunc() { |
| 59 | return [this] { GuestThreadFunction(); }; | 58 | return [this] { GuestThreadFunction(); }; |
| @@ -72,21 +71,19 @@ public: | |||
| 72 | } | 71 | } |
| 73 | 72 | ||
| 74 | private: | 73 | private: |
| 75 | void GuestActivateFunction(); | ||
| 76 | void GuestThreadFunction(); | 74 | void GuestThreadFunction(); |
| 77 | void IdleThreadFunction(); | 75 | void IdleThreadFunction(); |
| 78 | void ShutdownThreadFunction(); | 76 | void ShutdownThreadFunction(); |
| 79 | 77 | ||
| 80 | void MultiCoreGuestActivate(); | ||
| 81 | void MultiCoreRunGuestThread(); | 78 | void MultiCoreRunGuestThread(); |
| 82 | void MultiCoreRunGuestLoop(); | 79 | void MultiCoreRunIdleThread(); |
| 83 | 80 | ||
| 84 | void SingleCoreGuestActivate(); | ||
| 85 | void SingleCoreRunGuestThread(); | 81 | void SingleCoreRunGuestThread(); |
| 86 | void SingleCoreRunGuestLoop(); | 82 | void SingleCoreRunIdleThread(); |
| 87 | 83 | ||
| 88 | static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core); | 84 | static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core); |
| 89 | 85 | ||
| 86 | void GuestActivate(); | ||
| 90 | void HandleInterrupt(); | 87 | void HandleInterrupt(); |
| 91 | void ShutdownThread(); | 88 | void ShutdownThread(); |
| 92 | void RunThread(std::size_t core); | 89 | void RunThread(std::size_t core); |
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp index 21fd5cb67..65576b8c4 100644 --- a/src/core/hle/kernel/global_scheduler_context.cpp +++ b/src/core/hle/kernel/global_scheduler_context.cpp | |||
| @@ -42,11 +42,6 @@ void GlobalSchedulerContext::PreemptThreads() { | |||
| 42 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | 42 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { |
| 43 | const u32 priority = preemption_priorities[core_id]; | 43 | const u32 priority = preemption_priorities[core_id]; |
| 44 | KScheduler::RotateScheduledQueue(kernel, core_id, priority); | 44 | KScheduler::RotateScheduledQueue(kernel, core_id, priority); |
| 45 | |||
| 46 | // Signal an interrupt occurred. For core 3, this is a certainty, as preemption will result | ||
| 47 | // in the rotator thread being scheduled. For cores 0-2, this is to simulate or system | ||
| 48 | // interrupts that may have occurred. | ||
| 49 | kernel.PhysicalCore(core_id).Interrupt(); | ||
| 50 | } | 45 | } |
| 51 | } | 46 | } |
| 52 | 47 | ||
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 13915dbd9..cac96a780 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -28,9 +28,9 @@ static void IncrementScheduledCount(Kernel::KThread* thread) { | |||
| 28 | } | 28 | } |
| 29 | 29 | ||
| 30 | KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} { | 30 | KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} { |
| 31 | m_idle_stack = std::make_shared<Common::Fiber>([this] { | 31 | m_switch_fiber = std::make_shared<Common::Fiber>([this] { |
| 32 | while (true) { | 32 | while (true) { |
| 33 | ScheduleImplOffStack(); | 33 | ScheduleImplFiber(); |
| 34 | } | 34 | } |
| 35 | }); | 35 | }); |
| 36 | 36 | ||
| @@ -60,9 +60,9 @@ void KScheduler::DisableScheduling(KernelCore& kernel) { | |||
| 60 | void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { | 60 | void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { |
| 61 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 1); | 61 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 1); |
| 62 | 62 | ||
| 63 | auto* scheduler = kernel.CurrentScheduler(); | 63 | auto* scheduler{kernel.CurrentScheduler()}; |
| 64 | 64 | ||
| 65 | if (!scheduler) { | 65 | if (!scheduler || kernel.IsPhantomModeForSingleCore()) { |
| 66 | // HACK: we cannot schedule from this thread, it is not a core thread | 66 | // HACK: we cannot schedule from this thread, it is not a core thread |
| 67 | RescheduleCores(kernel, cores_needing_scheduling); | 67 | RescheduleCores(kernel, cores_needing_scheduling); |
| 68 | if (GetCurrentThread(kernel).GetDisableDispatchCount() == 1) { | 68 | if (GetCurrentThread(kernel).GetDisableDispatchCount() == 1) { |
| @@ -125,9 +125,9 @@ void KScheduler::RescheduleCurrentCoreImpl() { | |||
| 125 | } | 125 | } |
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | void KScheduler::Initialize(KThread* idle_thread) { | 128 | void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id) { |
| 129 | // Set core ID/idle thread/interrupt task manager. | 129 | // Set core ID/idle thread/interrupt task manager. |
| 130 | m_core_id = GetCurrentCoreId(kernel); | 130 | m_core_id = core_id; |
| 131 | m_idle_thread = idle_thread; | 131 | m_idle_thread = idle_thread; |
| 132 | // m_state.idle_thread_stack = m_idle_thread->GetStackTop(); | 132 | // m_state.idle_thread_stack = m_idle_thread->GetStackTop(); |
| 133 | // m_state.interrupt_task_manager = &kernel.GetInterruptTaskManager(); | 133 | // m_state.interrupt_task_manager = &kernel.GetInterruptTaskManager(); |
| @@ -142,10 +142,10 @@ void KScheduler::Initialize(KThread* idle_thread) { | |||
| 142 | // Bind interrupt handler. | 142 | // Bind interrupt handler. |
| 143 | // kernel.GetInterruptManager().BindHandler( | 143 | // kernel.GetInterruptManager().BindHandler( |
| 144 | // GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id, | 144 | // GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id, |
| 145 | // KInterruptController::PriorityLevel_Scheduler, false, false); | 145 | // KInterruptController::PriorityLevel::Scheduler, false, false); |
| 146 | 146 | ||
| 147 | // Set the current thread. | 147 | // Set the current thread. |
| 148 | m_current_thread = GetCurrentThreadPointer(kernel); | 148 | m_current_thread = main_thread; |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | void KScheduler::Activate() { | 151 | void KScheduler::Activate() { |
| @@ -156,6 +156,10 @@ void KScheduler::Activate() { | |||
| 156 | RescheduleCurrentCore(); | 156 | RescheduleCurrentCore(); |
| 157 | } | 157 | } |
| 158 | 158 | ||
| 159 | void KScheduler::OnThreadStart() { | ||
| 160 | GetCurrentThread(kernel).EnableDispatch(); | ||
| 161 | } | ||
| 162 | |||
| 159 | u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { | 163 | u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { |
| 160 | if (KThread* prev_highest_thread = m_state.highest_priority_thread; | 164 | if (KThread* prev_highest_thread = m_state.highest_priority_thread; |
| 161 | prev_highest_thread != highest_thread) [[likely]] { | 165 | prev_highest_thread != highest_thread) [[likely]] { |
| @@ -372,37 +376,30 @@ void KScheduler::ScheduleImpl() { | |||
| 372 | } | 376 | } |
| 373 | 377 | ||
| 374 | // The highest priority thread is not the same as the current thread. | 378 | // The highest priority thread is not the same as the current thread. |
| 375 | // Switch to the idle thread stack and continue executing from there. | 379 | // Jump to the switcher and continue executing from there. |
| 376 | m_idle_cur_thread = cur_thread; | 380 | m_switch_cur_thread = cur_thread; |
| 377 | m_idle_highest_priority_thread = highest_priority_thread; | 381 | m_switch_highest_priority_thread = highest_priority_thread; |
| 378 | Common::Fiber::YieldTo(cur_thread->host_context, *m_idle_stack); | 382 | m_switch_from_schedule = true; |
| 383 | Common::Fiber::YieldTo(cur_thread->host_context, *m_switch_fiber); | ||
| 379 | 384 | ||
| 380 | // Returning from ScheduleImpl occurs after this thread has been scheduled again. | 385 | // Returning from ScheduleImpl occurs after this thread has been scheduled again. |
| 381 | } | 386 | } |
| 382 | 387 | ||
| 383 | void KScheduler::ScheduleImplOffStack() { | 388 | void KScheduler::ScheduleImplFiber() { |
| 384 | KThread* const cur_thread{m_idle_cur_thread}; | 389 | KThread* const cur_thread{m_switch_cur_thread}; |
| 385 | KThread* highest_priority_thread{m_idle_highest_priority_thread}; | 390 | KThread* highest_priority_thread{m_switch_highest_priority_thread}; |
| 386 | 391 | ||
| 387 | // Get a reference to the current thread's stack parameters. | 392 | // If we're not coming from scheduling (i.e., we came from SC preemption), |
| 388 | auto& sp{cur_thread->GetStackParameters()}; | 393 | // we should restart the scheduling loop directly. Not accurate to HOS. |
| 389 | 394 | if (!m_switch_from_schedule) { | |
| 390 | // Save the original thread context. | 395 | goto retry; |
| 391 | { | ||
| 392 | auto& physical_core = kernel.System().CurrentPhysicalCore(); | ||
| 393 | auto& cpu_core = physical_core.ArmInterface(); | ||
| 394 | cpu_core.SaveContext(cur_thread->GetContext32()); | ||
| 395 | cpu_core.SaveContext(cur_thread->GetContext64()); | ||
| 396 | // Save the TPIDR_EL0 system register in case it was modified. | ||
| 397 | cur_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | ||
| 398 | cpu_core.ClearExclusiveState(); | ||
| 399 | } | 396 | } |
| 400 | 397 | ||
| 401 | // Check if the thread is terminated by checking the DPC flags. | 398 | // Mark that we are not coming from scheduling anymore. |
| 402 | if ((sp.dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) { | 399 | m_switch_from_schedule = false; |
| 403 | // The thread isn't terminated, so we want to unlock it. | 400 | |
| 404 | sp.m_lock.store(false, std::memory_order_seq_cst); | 401 | // Save the original thread context. |
| 405 | } | 402 | Unload(cur_thread); |
| 406 | 403 | ||
| 407 | // The current thread's context has been entirely taken care of. | 404 | // The current thread's context has been entirely taken care of. |
| 408 | // Now we want to loop until we successfully switch the thread context. | 405 | // Now we want to loop until we successfully switch the thread context. |
| @@ -411,62 +408,39 @@ void KScheduler::ScheduleImplOffStack() { | |||
| 411 | // Check if the highest priority thread is null. | 408 | // Check if the highest priority thread is null. |
| 412 | if (!highest_priority_thread) { | 409 | if (!highest_priority_thread) { |
| 413 | // The next thread is nullptr! | 410 | // The next thread is nullptr! |
| 414 | // Switch to nullptr. This will actually switch to the idle thread. | ||
| 415 | SwitchThread(nullptr); | ||
| 416 | |||
| 417 | // We've switched to the idle thread, so we want to process interrupt tasks until we | ||
| 418 | // schedule a non-idle thread. | ||
| 419 | while (!m_state.interrupt_task_runnable) { | ||
| 420 | // Check if we need scheduling. | ||
| 421 | if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { | ||
| 422 | goto retry; | ||
| 423 | } | ||
| 424 | 411 | ||
| 425 | // Clear the previous thread. | 412 | // Switch to the idle thread. Note: HOS treats idling as a special case for |
| 426 | m_state.prev_thread = nullptr; | 413 | // performance. This is not *required* for yuzu's purposes, and for singlecore |
| 414 | // compatibility, we can just move the logic that would go here into the execution | ||
| 415 | // of the idle thread. If we ever remove singlecore, we should implement this | ||
| 416 | // accurately to HOS. | ||
| 417 | highest_priority_thread = m_idle_thread; | ||
| 418 | } | ||
| 427 | 419 | ||
| 428 | // Wait for an interrupt before checking again. | 420 | // We want to try to lock the highest priority thread's context. |
| 429 | kernel.System().GetCpuManager().WaitForAndHandleInterrupt(); | 421 | // Try to take it. |
| 422 | while (!highest_priority_thread->context_guard.try_lock()) { | ||
| 423 | // The highest priority thread's context is already locked. | ||
| 424 | // Check if we need scheduling. If we don't, we can retry directly. | ||
| 425 | if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { | ||
| 426 | // If we do, another core is interfering, and we must start again. | ||
| 427 | goto retry; | ||
| 430 | } | 428 | } |
| 429 | } | ||
| 431 | 430 | ||
| 432 | // Execute any pending interrupt tasks. | 431 | // It's time to switch the thread. |
| 433 | // m_state.interrupt_task_manager->DoTasks(); | 432 | // Switch to the highest priority thread. |
| 434 | 433 | SwitchThread(highest_priority_thread); | |
| 435 | // Clear the interrupt task thread as runnable. | ||
| 436 | m_state.interrupt_task_runnable = false; | ||
| 437 | 434 | ||
| 438 | // Retry the scheduling loop. | 435 | // Check if we need scheduling. If we do, then we can't complete the switch and should |
| 436 | // retry. | ||
| 437 | if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { | ||
| 438 | // Our switch failed. | ||
| 439 | // We should unlock the thread context, and then retry. | ||
| 440 | highest_priority_thread->context_guard.unlock(); | ||
| 439 | goto retry; | 441 | goto retry; |
| 440 | } else { | 442 | } else { |
| 441 | // We want to try to lock the highest priority thread's context. | 443 | break; |
| 442 | // Try to take it. | ||
| 443 | bool expected{false}; | ||
| 444 | while (!highest_priority_thread->stack_parameters.m_lock.compare_exchange_strong( | ||
| 445 | expected, true, std::memory_order_seq_cst)) { | ||
| 446 | // The highest priority thread's context is already locked. | ||
| 447 | // Check if we need scheduling. If we don't, we can retry directly. | ||
| 448 | if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { | ||
| 449 | // If we do, another core is interfering, and we must start again. | ||
| 450 | goto retry; | ||
| 451 | } | ||
| 452 | expected = false; | ||
| 453 | } | ||
| 454 | |||
| 455 | // It's time to switch the thread. | ||
| 456 | // Switch to the highest priority thread. | ||
| 457 | SwitchThread(highest_priority_thread); | ||
| 458 | |||
| 459 | // Check if we need scheduling. If we do, then we can't complete the switch and should | ||
| 460 | // retry. | ||
| 461 | if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { | ||
| 462 | // Our switch failed. | ||
| 463 | // We should unlock the thread context, and then retry. | ||
| 464 | highest_priority_thread->stack_parameters.m_lock.store(false, | ||
| 465 | std::memory_order_seq_cst); | ||
| 466 | goto retry; | ||
| 467 | } else { | ||
| 468 | break; | ||
| 469 | } | ||
| 470 | } | 444 | } |
| 471 | 445 | ||
| 472 | retry: | 446 | retry: |
| @@ -480,18 +454,35 @@ void KScheduler::ScheduleImplOffStack() { | |||
| 480 | } | 454 | } |
| 481 | 455 | ||
| 482 | // Reload the guest thread context. | 456 | // Reload the guest thread context. |
| 483 | { | 457 | Reload(highest_priority_thread); |
| 484 | auto& cpu_core = kernel.System().CurrentArmInterface(); | ||
| 485 | cpu_core.LoadContext(highest_priority_thread->GetContext32()); | ||
| 486 | cpu_core.LoadContext(highest_priority_thread->GetContext64()); | ||
| 487 | cpu_core.SetTlsAddress(highest_priority_thread->GetTLSAddress()); | ||
| 488 | cpu_core.SetTPIDR_EL0(highest_priority_thread->GetTPIDR_EL0()); | ||
| 489 | cpu_core.LoadWatchpointArray(highest_priority_thread->GetOwnerProcess()->GetWatchpoints()); | ||
| 490 | cpu_core.ClearExclusiveState(); | ||
| 491 | } | ||
| 492 | 458 | ||
| 493 | // Reload the host thread. | 459 | // Reload the host thread. |
| 494 | Common::Fiber::YieldTo(m_idle_stack, *highest_priority_thread->host_context); | 460 | Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->host_context); |
| 461 | } | ||
| 462 | |||
| 463 | void KScheduler::Unload(KThread* thread) { | ||
| 464 | auto& cpu_core = kernel.System().ArmInterface(m_core_id); | ||
| 465 | cpu_core.SaveContext(thread->GetContext32()); | ||
| 466 | cpu_core.SaveContext(thread->GetContext64()); | ||
| 467 | // Save the TPIDR_EL0 system register in case it was modified. | ||
| 468 | thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | ||
| 469 | cpu_core.ClearExclusiveState(); | ||
| 470 | |||
| 471 | // Check if the thread is terminated by checking the DPC flags. | ||
| 472 | if ((thread->GetStackParameters().dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) { | ||
| 473 | // The thread isn't terminated, so we want to unlock it. | ||
| 474 | thread->context_guard.unlock(); | ||
| 475 | } | ||
| 476 | } | ||
| 477 | |||
| 478 | void KScheduler::Reload(KThread* thread) { | ||
| 479 | auto& cpu_core = kernel.System().ArmInterface(m_core_id); | ||
| 480 | cpu_core.LoadContext(thread->GetContext32()); | ||
| 481 | cpu_core.LoadContext(thread->GetContext64()); | ||
| 482 | cpu_core.SetTlsAddress(thread->GetTLSAddress()); | ||
| 483 | cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); | ||
| 484 | cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints()); | ||
| 485 | cpu_core.ClearExclusiveState(); | ||
| 495 | } | 486 | } |
| 496 | 487 | ||
| 497 | void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) { | 488 | void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) { |
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 8f4eebf6a..91e870933 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h | |||
| @@ -41,8 +41,11 @@ public: | |||
| 41 | explicit KScheduler(KernelCore& kernel); | 41 | explicit KScheduler(KernelCore& kernel); |
| 42 | ~KScheduler(); | 42 | ~KScheduler(); |
| 43 | 43 | ||
| 44 | void Initialize(KThread* idle_thread); | 44 | void Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id); |
| 45 | void Activate(); | 45 | void Activate(); |
| 46 | void OnThreadStart(); | ||
| 47 | void Unload(KThread* thread); | ||
| 48 | void Reload(KThread* thread); | ||
| 46 | 49 | ||
| 47 | void SetInterruptTaskRunnable(); | 50 | void SetInterruptTaskRunnable(); |
| 48 | void RequestScheduleOnInterrupt(); | 51 | void RequestScheduleOnInterrupt(); |
| @@ -55,6 +58,14 @@ public: | |||
| 55 | return m_idle_thread; | 58 | return m_idle_thread; |
| 56 | } | 59 | } |
| 57 | 60 | ||
| 61 | bool IsIdle() const { | ||
| 62 | return m_current_thread.load() == m_idle_thread; | ||
| 63 | } | ||
| 64 | |||
| 65 | std::shared_ptr<Common::Fiber> GetSwitchFiber() { | ||
| 66 | return m_switch_fiber; | ||
| 67 | } | ||
| 68 | |||
| 58 | KThread* GetPreviousThread() const { | 69 | KThread* GetPreviousThread() const { |
| 59 | return m_state.prev_thread; | 70 | return m_state.prev_thread; |
| 60 | } | 71 | } |
| @@ -69,7 +80,7 @@ public: | |||
| 69 | 80 | ||
| 70 | // Static public API. | 81 | // Static public API. |
| 71 | static bool CanSchedule(KernelCore& kernel) { | 82 | static bool CanSchedule(KernelCore& kernel) { |
| 72 | return kernel.GetCurrentEmuThread()->GetDisableDispatchCount() == 0; | 83 | return GetCurrentThread(kernel).GetDisableDispatchCount() == 0; |
| 73 | } | 84 | } |
| 74 | static bool IsSchedulerLockedByCurrentThread(KernelCore& kernel) { | 85 | static bool IsSchedulerLockedByCurrentThread(KernelCore& kernel) { |
| 75 | return kernel.GlobalSchedulerContext().scheduler_lock.IsLockedByCurrentThread(); | 86 | return kernel.GlobalSchedulerContext().scheduler_lock.IsLockedByCurrentThread(); |
| @@ -113,7 +124,7 @@ private: | |||
| 113 | 124 | ||
| 114 | // Instanced private API. | 125 | // Instanced private API. |
| 115 | void ScheduleImpl(); | 126 | void ScheduleImpl(); |
| 116 | void ScheduleImplOffStack(); | 127 | void ScheduleImplFiber(); |
| 117 | void SwitchThread(KThread* next_thread); | 128 | void SwitchThread(KThread* next_thread); |
| 118 | 129 | ||
| 119 | void Schedule(); | 130 | void Schedule(); |
| @@ -147,9 +158,10 @@ private: | |||
| 147 | KThread* m_idle_thread{nullptr}; | 158 | KThread* m_idle_thread{nullptr}; |
| 148 | std::atomic<KThread*> m_current_thread{nullptr}; | 159 | std::atomic<KThread*> m_current_thread{nullptr}; |
| 149 | 160 | ||
| 150 | std::shared_ptr<Common::Fiber> m_idle_stack{}; | 161 | std::shared_ptr<Common::Fiber> m_switch_fiber{}; |
| 151 | KThread* m_idle_cur_thread{}; | 162 | KThread* m_switch_cur_thread{}; |
| 152 | KThread* m_idle_highest_priority_thread{}; | 163 | KThread* m_switch_highest_priority_thread{}; |
| 164 | bool m_switch_from_schedule{}; | ||
| 153 | }; | 165 | }; |
| 154 | 166 | ||
| 155 | class KScopedSchedulerLock : public KScopedLock<KScheduler::LockType> { | 167 | class KScopedSchedulerLock : public KScopedLock<KScheduler::LockType> { |
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 9daa589b5..d5d390f04 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp | |||
| @@ -268,7 +268,7 @@ Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 | |||
| 268 | 268 | ||
| 269 | Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { | 269 | Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { |
| 270 | return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, | 270 | return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, |
| 271 | abort); | 271 | system.GetCpuManager().GetIdleThreadStartFunc()); |
| 272 | } | 272 | } |
| 273 | 273 | ||
| 274 | Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, | 274 | Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, |
| @@ -1204,8 +1204,9 @@ KScopedDisableDispatch::~KScopedDisableDispatch() { | |||
| 1204 | return; | 1204 | return; |
| 1205 | } | 1205 | } |
| 1206 | 1206 | ||
| 1207 | // Skip the reschedule if single-core, as dispatch tracking is disabled here. | 1207 | // Skip the reschedule if single-core. |
| 1208 | if (!Settings::values.use_multi_core.GetValue()) { | 1208 | if (!Settings::values.use_multi_core.GetValue()) { |
| 1209 | GetCurrentThread(kernel).EnableDispatch(); | ||
| 1209 | return; | 1210 | return; |
| 1210 | } | 1211 | } |
| 1211 | 1212 | ||
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index 416a861a9..1fc8f5f3e 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h | |||
| @@ -439,7 +439,6 @@ public: | |||
| 439 | bool is_pinned; | 439 | bool is_pinned; |
| 440 | s32 disable_count; | 440 | s32 disable_count; |
| 441 | KThread* cur_thread; | 441 | KThread* cur_thread; |
| 442 | std::atomic<bool> m_lock; | ||
| 443 | }; | 442 | }; |
| 444 | 443 | ||
| 445 | [[nodiscard]] StackParameters& GetStackParameters() { | 444 | [[nodiscard]] StackParameters& GetStackParameters() { |
| @@ -485,39 +484,16 @@ public: | |||
| 485 | return per_core_priority_queue_entry[core]; | 484 | return per_core_priority_queue_entry[core]; |
| 486 | } | 485 | } |
| 487 | 486 | ||
| 488 | [[nodiscard]] bool IsKernelThread() const { | ||
| 489 | return GetActiveCore() == 3; | ||
| 490 | } | ||
| 491 | |||
| 492 | [[nodiscard]] bool IsDispatchTrackingDisabled() const { | ||
| 493 | return is_single_core || IsKernelThread(); | ||
| 494 | } | ||
| 495 | |||
| 496 | [[nodiscard]] s32 GetDisableDispatchCount() const { | 487 | [[nodiscard]] s32 GetDisableDispatchCount() const { |
| 497 | if (IsDispatchTrackingDisabled()) { | ||
| 498 | // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||
| 499 | return 1; | ||
| 500 | } | ||
| 501 | |||
| 502 | return this->GetStackParameters().disable_count; | 488 | return this->GetStackParameters().disable_count; |
| 503 | } | 489 | } |
| 504 | 490 | ||
| 505 | void DisableDispatch() { | 491 | void DisableDispatch() { |
| 506 | if (IsDispatchTrackingDisabled()) { | ||
| 507 | // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||
| 508 | return; | ||
| 509 | } | ||
| 510 | |||
| 511 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); | 492 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); |
| 512 | this->GetStackParameters().disable_count++; | 493 | this->GetStackParameters().disable_count++; |
| 513 | } | 494 | } |
| 514 | 495 | ||
| 515 | void EnableDispatch() { | 496 | void EnableDispatch() { |
| 516 | if (IsDispatchTrackingDisabled()) { | ||
| 517 | // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||
| 518 | return; | ||
| 519 | } | ||
| 520 | |||
| 521 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); | 497 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); |
| 522 | this->GetStackParameters().disable_count--; | 498 | this->GetStackParameters().disable_count--; |
| 523 | } | 499 | } |
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 10e1f47f6..926c6dc84 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -64,8 +64,6 @@ struct KernelCore::Impl { | |||
| 64 | 64 | ||
| 65 | is_phantom_mode_for_singlecore = false; | 65 | is_phantom_mode_for_singlecore = false; |
| 66 | 66 | ||
| 67 | InitializePhysicalCores(); | ||
| 68 | |||
| 69 | // Derive the initial memory layout from the emulated board | 67 | // Derive the initial memory layout from the emulated board |
| 70 | Init::InitializeSlabResourceCounts(kernel); | 68 | Init::InitializeSlabResourceCounts(kernel); |
| 71 | DeriveInitialMemoryLayout(); | 69 | DeriveInitialMemoryLayout(); |
| @@ -77,6 +75,7 @@ struct KernelCore::Impl { | |||
| 77 | Init::InitializeKPageBufferSlabHeap(system); | 75 | Init::InitializeKPageBufferSlabHeap(system); |
| 78 | InitializeShutdownThreads(); | 76 | InitializeShutdownThreads(); |
| 79 | InitializePreemption(kernel); | 77 | InitializePreemption(kernel); |
| 78 | InitializePhysicalCores(); | ||
| 80 | 79 | ||
| 81 | RegisterHostThread(); | 80 | RegisterHostThread(); |
| 82 | } | 81 | } |
| @@ -193,8 +192,21 @@ struct KernelCore::Impl { | |||
| 193 | exclusive_monitor = | 192 | exclusive_monitor = |
| 194 | Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); | 193 | Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); |
| 195 | for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | 194 | for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { |
| 195 | const s32 core{static_cast<s32>(i)}; | ||
| 196 | |||
| 196 | schedulers[i] = std::make_unique<Kernel::KScheduler>(system.Kernel()); | 197 | schedulers[i] = std::make_unique<Kernel::KScheduler>(system.Kernel()); |
| 197 | cores.emplace_back(i, system, *schedulers[i], interrupts); | 198 | cores.emplace_back(i, system, *schedulers[i], interrupts); |
| 199 | |||
| 200 | auto* main_thread{Kernel::KThread::Create(system.Kernel())}; | ||
| 201 | main_thread->SetName(fmt::format("MainThread:{}", core)); | ||
| 202 | main_thread->SetCurrentCore(core); | ||
| 203 | ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, core).IsSuccess()); | ||
| 204 | |||
| 205 | auto* idle_thread{Kernel::KThread::Create(system.Kernel())}; | ||
| 206 | idle_thread->SetCurrentCore(core); | ||
| 207 | ASSERT(Kernel::KThread::InitializeIdleThread(system, idle_thread, core).IsSuccess()); | ||
| 208 | |||
| 209 | schedulers[i]->Initialize(main_thread, idle_thread, core); | ||
| 198 | } | 210 | } |
| 199 | } | 211 | } |
| 200 | 212 | ||
| @@ -1093,10 +1105,11 @@ void KernelCore::Suspend(bool suspended) { | |||
| 1093 | } | 1105 | } |
| 1094 | 1106 | ||
| 1095 | void KernelCore::ShutdownCores() { | 1107 | void KernelCore::ShutdownCores() { |
| 1108 | KScopedSchedulerLock lk{*this}; | ||
| 1109 | |||
| 1096 | for (auto* thread : impl->shutdown_threads) { | 1110 | for (auto* thread : impl->shutdown_threads) { |
| 1097 | void(thread->Run()); | 1111 | void(thread->Run()); |
| 1098 | } | 1112 | } |
| 1099 | InterruptAllPhysicalCores(); | ||
| 1100 | } | 1113 | } |
| 1101 | 1114 | ||
| 1102 | bool KernelCore::IsMulticore() const { | 1115 | bool KernelCore::IsMulticore() const { |
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp index a5b16ae2e..6e7dacf97 100644 --- a/src/core/hle/kernel/physical_core.cpp +++ b/src/core/hle/kernel/physical_core.cpp | |||
| @@ -43,6 +43,7 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) { | |||
| 43 | 43 | ||
| 44 | void PhysicalCore::Run() { | 44 | void PhysicalCore::Run() { |
| 45 | arm_interface->Run(); | 45 | arm_interface->Run(); |
| 46 | arm_interface->ClearExclusiveState(); | ||
| 46 | } | 47 | } |
| 47 | 48 | ||
| 48 | void PhysicalCore::Idle() { | 49 | void PhysicalCore::Idle() { |