diff options
| author | 2020-03-06 09:52:24 -0400 | |
|---|---|---|
| committer | 2020-06-27 11:35:28 -0400 | |
| commit | 1c672128c421ea3141a74f9c6695ecc83231ca30 (patch) | |
| tree | 67af0f05adcdfce6aa1bd75d609e1c09190d54a7 /src/core/hle/kernel/scheduler.cpp | |
| parent | NVDRV: Remove frame limiting as Host Timing already takes care. (diff) | |
| download | yuzu-1c672128c421ea3141a74f9c6695ecc83231ca30.tar.gz yuzu-1c672128c421ea3141a74f9c6695ecc83231ca30.tar.xz yuzu-1c672128c421ea3141a74f9c6695ecc83231ca30.zip | |
Scheduler: Release old thread fiber before trying to switch to the next thread fiber.
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
| -rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 37 |
1 files changed, 26 insertions, 11 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 9329202c6..aa1f1a305 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -53,7 +53,8 @@ u32 GlobalScheduler::SelectThreads() { | |||
| 53 | } | 53 | } |
| 54 | sched.selected_thread_set = SharedFrom(thread); | 54 | sched.selected_thread_set = SharedFrom(thread); |
| 55 | } | 55 | } |
| 56 | const bool reschedule_pending = sched.selected_thread_set != sched.current_thread; | 56 | const bool reschedule_pending = |
| 57 | sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread); | ||
| 57 | sched.is_context_switch_pending = reschedule_pending; | 58 | sched.is_context_switch_pending = reschedule_pending; |
| 58 | std::atomic_thread_fence(std::memory_order_seq_cst); | 59 | std::atomic_thread_fence(std::memory_order_seq_cst); |
| 59 | sched.guard.unlock(); | 60 | sched.guard.unlock(); |
| @@ -552,7 +553,9 @@ void GlobalScheduler::Unlock() { | |||
| 552 | } | 553 | } |
| 553 | 554 | ||
| 554 | Scheduler::Scheduler(Core::System& system, std::size_t core_id) | 555 | Scheduler::Scheduler(Core::System& system, std::size_t core_id) |
| 555 | : system{system}, core_id{core_id} {} | 556 | : system(system), core_id(core_id) { |
| 557 | switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this); | ||
| 558 | } | ||
| 556 | 559 | ||
| 557 | Scheduler::~Scheduler() = default; | 560 | Scheduler::~Scheduler() = default; |
| 558 | 561 | ||
| @@ -636,8 +639,9 @@ void Scheduler::SwitchContext() { | |||
| 636 | current_thread = selected_thread; | 639 | current_thread = selected_thread; |
| 637 | 640 | ||
| 638 | is_context_switch_pending = false; | 641 | is_context_switch_pending = false; |
| 639 | guard.unlock(); | 642 | |
| 640 | if (new_thread == previous_thread) { | 643 | if (new_thread == previous_thread) { |
| 644 | guard.unlock(); | ||
| 641 | return; | 645 | return; |
| 642 | } | 646 | } |
| 643 | 647 | ||
| @@ -669,20 +673,31 @@ void Scheduler::SwitchContext() { | |||
| 669 | } else { | 673 | } else { |
| 670 | old_context = idle_thread->GetHostContext(); | 674 | old_context = idle_thread->GetHostContext(); |
| 671 | } | 675 | } |
| 676 | guard.unlock(); | ||
| 672 | 677 | ||
| 673 | std::shared_ptr<Common::Fiber> next_context; | 678 | Common::Fiber::YieldTo(old_context, switch_fiber); |
| 674 | if (new_thread != nullptr) { | ||
| 675 | next_context = new_thread->GetHostContext(); | ||
| 676 | } else { | ||
| 677 | next_context = idle_thread->GetHostContext(); | ||
| 678 | } | ||
| 679 | |||
| 680 | Common::Fiber::YieldTo(old_context, next_context); | ||
| 681 | /// When a thread wakes up, the scheduler may have changed to other in another core. | 679 | /// When a thread wakes up, the scheduler may have changed to other in another core. |
| 682 | auto& next_scheduler = system.Kernel().CurrentScheduler(); | 680 | auto& next_scheduler = system.Kernel().CurrentScheduler(); |
| 683 | next_scheduler.SwitchContextStep2(); | 681 | next_scheduler.SwitchContextStep2(); |
| 684 | } | 682 | } |
| 685 | 683 | ||
| 684 | void Scheduler::OnSwitch(void* this_scheduler) { | ||
| 685 | Scheduler* sched = static_cast<Scheduler*>(this_scheduler); | ||
| 686 | sched->SwitchToCurrent(); | ||
| 687 | } | ||
| 688 | |||
| 689 | void Scheduler::SwitchToCurrent() { | ||
| 690 | while (true) { | ||
| 691 | std::shared_ptr<Common::Fiber> next_context; | ||
| 692 | if (current_thread != nullptr) { | ||
| 693 | next_context = current_thread->GetHostContext(); | ||
| 694 | } else { | ||
| 695 | next_context = idle_thread->GetHostContext(); | ||
| 696 | } | ||
| 697 | Common::Fiber::YieldTo(switch_fiber, next_context); | ||
| 698 | } | ||
| 699 | } | ||
| 700 | |||
| 686 | void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { | 701 | void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { |
| 687 | const u64 prev_switch_ticks = last_context_switch_time; | 702 | const u64 prev_switch_ticks = last_context_switch_time; |
| 688 | const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks(); | 703 | const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks(); |