diff options
| author | 2021-01-21 11:26:00 -0800 | |
|---|---|---|
| committer | 2021-01-28 21:42:26 -0800 | |
| commit | 37f74d87417c8cb491fee1681cc05fb7baa5e516 (patch) | |
| tree | 3a3f7bd10787666f6e4c81f3a8f6926ac5bf0da9 /src | |
| parent | hle: kernel: k_scheduler: Fix for single core mode. (diff) | |
| download | yuzu-37f74d87417c8cb491fee1681cc05fb7baa5e516.tar.gz yuzu-37f74d87417c8cb491fee1681cc05fb7baa5e516.tar.xz yuzu-37f74d87417c8cb491fee1681cc05fb7baa5e516.zip | |
hle: kernel: k_scheduler: Use atomics for current_thread, etc.
Diffstat (limited to 'src')
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 47 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.h | 7 |
2 files changed, 28 insertions, 26 deletions
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index e8e3b3dc5..fbdc061df 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -80,7 +80,7 @@ u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { | |||
| 80 | } | 80 | } |
| 81 | 81 | ||
| 82 | state.highest_priority_thread = highest_thread; | 82 | state.highest_priority_thread = highest_thread; |
| 83 | state.needs_scheduling = true; | 83 | state.needs_scheduling.store(true); |
| 84 | return (1ULL << core_id); | 84 | return (1ULL << core_id); |
| 85 | } else { | 85 | } else { |
| 86 | return 0; | 86 | return 0; |
| @@ -609,7 +609,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) { | |||
| 609 | 609 | ||
| 610 | KScheduler::KScheduler(Core::System& system, s32 core_id) : system(system), core_id(core_id) { | 610 | KScheduler::KScheduler(Core::System& system, s32 core_id) : system(system), core_id(core_id) { |
| 611 | switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this); | 611 | switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this); |
| 612 | state.needs_scheduling = true; | 612 | state.needs_scheduling.store(true); |
| 613 | state.interrupt_task_thread_runnable = false; | 613 | state.interrupt_task_thread_runnable = false; |
| 614 | state.should_count_idle = false; | 614 | state.should_count_idle = false; |
| 615 | state.idle_count = 0; | 615 | state.idle_count = 0; |
| @@ -620,10 +620,10 @@ KScheduler::KScheduler(Core::System& system, s32 core_id) : system(system), core | |||
| 620 | KScheduler::~KScheduler() = default; | 620 | KScheduler::~KScheduler() = default; |
| 621 | 621 | ||
| 622 | KThread* KScheduler::GetCurrentThread() const { | 622 | KThread* KScheduler::GetCurrentThread() const { |
| 623 | if (current_thread) { | 623 | if (auto result = current_thread.load(); result) { |
| 624 | return current_thread; | 624 | return result; |
| 625 | } | 625 | } |
| 626 | return idle_thread; | 626 | return idle_thread.get(); |
| 627 | } | 627 | } |
| 628 | 628 | ||
| 629 | u64 KScheduler::GetLastContextSwitchTicks() const { | 629 | u64 KScheduler::GetLastContextSwitchTicks() const { |
| @@ -638,7 +638,7 @@ void KScheduler::RescheduleCurrentCore() { | |||
| 638 | phys_core.ClearInterrupt(); | 638 | phys_core.ClearInterrupt(); |
| 639 | } | 639 | } |
| 640 | guard.lock(); | 640 | guard.lock(); |
| 641 | if (state.needs_scheduling) { | 641 | if (state.needs_scheduling.load()) { |
| 642 | Schedule(); | 642 | Schedule(); |
| 643 | } else { | 643 | } else { |
| 644 | guard.unlock(); | 644 | guard.unlock(); |
| @@ -695,29 +695,29 @@ void KScheduler::Reload(KThread* thread) { | |||
| 695 | 695 | ||
| 696 | void KScheduler::SwitchContextStep2() { | 696 | void KScheduler::SwitchContextStep2() { |
| 697 | // Load context of new thread | 697 | // Load context of new thread |
| 698 | Reload(current_thread); | 698 | Reload(current_thread.load()); |
| 699 | 699 | ||
| 700 | RescheduleCurrentCore(); | 700 | RescheduleCurrentCore(); |
| 701 | } | 701 | } |
| 702 | 702 | ||
| 703 | void KScheduler::ScheduleImpl() { | 703 | void KScheduler::ScheduleImpl() { |
| 704 | KThread* previous_thread = current_thread; | 704 | KThread* previous_thread = current_thread.load(); |
| 705 | KThread* next_thread = state.highest_priority_thread; | 705 | KThread* next_thread = state.highest_priority_thread; |
| 706 | 706 | ||
| 707 | state.needs_scheduling = false; | 707 | state.needs_scheduling = false; |
| 708 | 708 | ||
| 709 | // We never want to schedule a null thread, so use the idle thread if we don't have a next. | 709 | // We never want to schedule a null thread, so use the idle thread if we don't have a next. |
| 710 | if (next_thread == nullptr) { | 710 | if (next_thread == nullptr) { |
| 711 | next_thread = idle_thread; | 711 | next_thread = idle_thread.get(); |
| 712 | } | 712 | } |
| 713 | 713 | ||
| 714 | // If we're not actually switching thread, there's nothing to do. | 714 | // If we're not actually switching thread, there's nothing to do. |
| 715 | if (next_thread == current_thread) { | 715 | if (next_thread == current_thread.load()) { |
| 716 | guard.unlock(); | 716 | guard.unlock(); |
| 717 | return; | 717 | return; |
| 718 | } | 718 | } |
| 719 | 719 | ||
| 720 | current_thread = next_thread; | 720 | current_thread.store(next_thread); |
| 721 | 721 | ||
| 722 | Process* const previous_process = system.Kernel().CurrentProcess(); | 722 | Process* const previous_process = system.Kernel().CurrentProcess(); |
| 723 | 723 | ||
| @@ -749,28 +749,29 @@ void KScheduler::SwitchToCurrent() { | |||
| 749 | while (true) { | 749 | while (true) { |
| 750 | { | 750 | { |
| 751 | std::scoped_lock lock{guard}; | 751 | std::scoped_lock lock{guard}; |
| 752 | current_thread = state.highest_priority_thread; | 752 | current_thread.store(state.highest_priority_thread); |
| 753 | state.needs_scheduling = false; | 753 | state.needs_scheduling.store(false); |
| 754 | } | 754 | } |
| 755 | const auto is_switch_pending = [this] { | 755 | const auto is_switch_pending = [this] { |
| 756 | std::scoped_lock lock{guard}; | 756 | std::scoped_lock lock{guard}; |
| 757 | return state.needs_scheduling.load(std::memory_order_relaxed); | 757 | return state.needs_scheduling.load(); |
| 758 | }; | 758 | }; |
| 759 | do { | 759 | do { |
| 760 | if (current_thread != nullptr) { | 760 | auto next_thread = current_thread.load(); |
| 761 | current_thread->context_guard.lock(); | 761 | if (next_thread != nullptr) { |
| 762 | if (current_thread->GetRawState() != ThreadState::Runnable) { | 762 | next_thread->context_guard.lock(); |
| 763 | current_thread->context_guard.unlock(); | 763 | if (next_thread->GetRawState() != ThreadState::Runnable) { |
| 764 | next_thread->context_guard.unlock(); | ||
| 764 | break; | 765 | break; |
| 765 | } | 766 | } |
| 766 | if (current_thread->GetActiveCore() != core_id) { | 767 | if (next_thread->GetActiveCore() != core_id) { |
| 767 | current_thread->context_guard.unlock(); | 768 | next_thread->context_guard.unlock(); |
| 768 | break; | 769 | break; |
| 769 | } | 770 | } |
| 770 | } | 771 | } |
| 771 | std::shared_ptr<Common::Fiber>* next_context; | 772 | std::shared_ptr<Common::Fiber>* next_context; |
| 772 | if (current_thread != nullptr) { | 773 | if (next_thread != nullptr) { |
| 773 | next_context = ¤t_thread->GetHostContext(); | 774 | next_context = &next_thread->GetHostContext(); |
| 774 | } else { | 775 | } else { |
| 775 | next_context = &idle_thread->GetHostContext(); | 776 | next_context = &idle_thread->GetHostContext(); |
| 776 | } | 777 | } |
| @@ -802,7 +803,7 @@ void KScheduler::Initialize() { | |||
| 802 | auto thread_res = KThread::Create(system, ThreadType::Main, name, 0, | 803 | auto thread_res = KThread::Create(system, ThreadType::Main, name, 0, |
| 803 | KThread::IdleThreadPriority, 0, static_cast<u32>(core_id), 0, | 804 | KThread::IdleThreadPriority, 0, static_cast<u32>(core_id), 0, |
| 804 | nullptr, std::move(init_func), init_func_parameter); | 805 | nullptr, std::move(init_func), init_func_parameter); |
| 805 | idle_thread = thread_res.Unwrap().get(); | 806 | idle_thread = thread_res.Unwrap(); |
| 806 | } | 807 | } |
| 807 | 808 | ||
| 808 | KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel) | 809 | KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel) |
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 2308a55be..e0d052593 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h | |||
| @@ -54,7 +54,7 @@ public: | |||
| 54 | 54 | ||
| 55 | /// Returns true if the scheduler is idle | 55 | /// Returns true if the scheduler is idle |
| 56 | [[nodiscard]] bool IsIdle() const { | 56 | [[nodiscard]] bool IsIdle() const { |
| 57 | return GetCurrentThread() == idle_thread; | 57 | return GetCurrentThread() == idle_thread.get(); |
| 58 | } | 58 | } |
| 59 | 59 | ||
| 60 | /// Gets the timestamp for the last context switch in ticks. | 60 | /// Gets the timestamp for the last context switch in ticks. |
| @@ -174,8 +174,9 @@ private: | |||
| 174 | void SwitchToCurrent(); | 174 | void SwitchToCurrent(); |
| 175 | 175 | ||
| 176 | KThread* prev_thread{}; | 176 | KThread* prev_thread{}; |
| 177 | KThread* current_thread{}; | 177 | std::atomic<KThread*> current_thread{}; |
| 178 | KThread* idle_thread{}; | 178 | |
| 179 | std::shared_ptr<KThread> idle_thread; | ||
| 179 | 180 | ||
| 180 | std::shared_ptr<Common::Fiber> switch_fiber{}; | 181 | std::shared_ptr<Common::Fiber> switch_fiber{}; |
| 181 | 182 | ||