diff options
| author | 2021-08-06 23:16:12 -0700 | |
|---|---|---|
| committer | 2021-12-06 16:39:16 -0800 | |
| commit | 13c82d042f10dbaec7fb66764bf4b636e7a2949b (patch) | |
| tree | 6e2e12e6f52e852c049b29c5e525a2f85dce3a36 /src/core/hle/kernel | |
| parent | core: hle: kernel: k_scheduler: Improve Unload. (diff) | |
| download | yuzu-13c82d042f10dbaec7fb66764bf4b636e7a2949b.tar.gz yuzu-13c82d042f10dbaec7fb66764bf4b636e7a2949b.tar.xz yuzu-13c82d042f10dbaec7fb66764bf4b636e7a2949b.zip | |
core: hle: kernel: k_scheduler: Improve ScheduleImpl.
Diffstat (limited to 'src/core/hle/kernel')
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 5ee4b8adc..e523c4923 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -721,7 +721,7 @@ void KScheduler::SwitchContextStep2() { | |||
| 721 | } | 721 | } |
| 722 | 722 | ||
| 723 | void KScheduler::ScheduleImpl() { | 723 | void KScheduler::ScheduleImpl() { |
| 724 | KThread* previous_thread = current_thread.load(); | 724 | KThread* previous_thread = GetCurrentThread(); |
| 725 | KThread* next_thread = state.highest_priority_thread; | 725 | KThread* next_thread = state.highest_priority_thread; |
| 726 | 726 | ||
| 727 | state.needs_scheduling = false; | 727 | state.needs_scheduling = false; |
| @@ -733,10 +733,15 @@ void KScheduler::ScheduleImpl() { | |||
| 733 | 733 | ||
| 734 | // If we're not actually switching thread, there's nothing to do. | 734 | // If we're not actually switching thread, there's nothing to do. |
| 735 | if (next_thread == current_thread.load()) { | 735 | if (next_thread == current_thread.load()) { |
| 736 | previous_thread->EnableDispatch(); | ||
| 736 | guard.Unlock(); | 737 | guard.Unlock(); |
| 737 | return; | 738 | return; |
| 738 | } | 739 | } |
| 739 | 740 | ||
| 741 | if (next_thread->GetCurrentCore() != core_id) { | ||
| 742 | next_thread->SetCurrentCore(core_id); | ||
| 743 | } | ||
| 744 | |||
| 740 | current_thread.store(next_thread); | 745 | current_thread.store(next_thread); |
| 741 | 746 | ||
| 742 | KProcess* const previous_process = system.Kernel().CurrentProcess(); | 747 | KProcess* const previous_process = system.Kernel().CurrentProcess(); |
| @@ -747,11 +752,7 @@ void KScheduler::ScheduleImpl() { | |||
| 747 | Unload(previous_thread); | 752 | Unload(previous_thread); |
| 748 | 753 | ||
| 749 | std::shared_ptr<Common::Fiber>* old_context; | 754 | std::shared_ptr<Common::Fiber>* old_context; |
| 750 | if (previous_thread != nullptr) { | 755 | old_context = &previous_thread->GetHostContext(); |
| 751 | old_context = &previous_thread->GetHostContext(); | ||
| 752 | } else { | ||
| 753 | old_context = &idle_thread->GetHostContext(); | ||
| 754 | } | ||
| 755 | guard.Unlock(); | 756 | guard.Unlock(); |
| 756 | 757 | ||
| 757 | Common::Fiber::YieldTo(*old_context, *switch_fiber); | 758 | Common::Fiber::YieldTo(*old_context, *switch_fiber); |