summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar bunnei2022-01-27 12:17:14 -0800
committerGravatar bunnei2022-01-27 12:17:14 -0800
commit3a1a3dd0db4256eef6382706c84c7b908b48bccd (patch)
treec042cfa45d8c043371e7e41d16ec619be5ed5803
parentMerge pull request #7783 from lioncash/abi-cexpr (diff)
downloadyuzu-3a1a3dd0db4256eef6382706c84c7b908b48bccd.tar.gz
yuzu-3a1a3dd0db4256eef6382706c84c7b908b48bccd.tar.xz
yuzu-3a1a3dd0db4256eef6382706c84c7b908b48bccd.zip
hle: kernel: KScheduler: Fix deadlock with core waiting for a thread lock that has migrated.
- Previously, it was possible for a thread migration to occur from core A to core B. - Next, core B waits on a guest lock that must be released by a thread queued for core A. - Meanwhile, core A is still waiting on the core B's current thread lock - resulting in a deadlock. - Fix this by try-locking the thread lock. - Fixes softlocks in FF8 and Pokemon Legends Arceus.
Diffstat (limited to '')
-rw-r--r--src/core/hle/kernel/k_priority_queue.h2
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp45
2 files changed, 24 insertions, 23 deletions
diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h
index 0b894c8cf..bd779739d 100644
--- a/src/core/hle/kernel/k_priority_queue.h
+++ b/src/core/hle/kernel/k_priority_queue.h
@@ -258,7 +258,7 @@ private:
258 258
259private: 259private:
260 constexpr void ClearAffinityBit(u64& affinity, s32 core) { 260 constexpr void ClearAffinityBit(u64& affinity, s32 core) {
261 affinity &= ~(u64(1) << core); 261 affinity &= ~(UINT64_C(1) << core);
262 } 262 }
263 263
264 constexpr s32 GetNextCore(u64& affinity) { 264 constexpr s32 GetNextCore(u64& affinity) {
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index b32d4f285..c96520828 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -710,23 +710,19 @@ void KScheduler::Unload(KThread* thread) {
710} 710}
711 711
712void KScheduler::Reload(KThread* thread) { 712void KScheduler::Reload(KThread* thread) {
713 LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread ? thread->GetName() : "nullptr"); 713 LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread->GetName());
714 714
715 if (thread) { 715 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
716 ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); 716 cpu_core.LoadContext(thread->GetContext32());
717 717 cpu_core.LoadContext(thread->GetContext64());
718 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); 718 cpu_core.SetTlsAddress(thread->GetTLSAddress());
719 cpu_core.LoadContext(thread->GetContext32()); 719 cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
720 cpu_core.LoadContext(thread->GetContext64()); 720 cpu_core.ClearExclusiveState();
721 cpu_core.SetTlsAddress(thread->GetTLSAddress());
722 cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
723 cpu_core.ClearExclusiveState();
724 }
725} 721}
726 722
727void KScheduler::SwitchContextStep2() { 723void KScheduler::SwitchContextStep2() {
728 // Load context of new thread 724 // Load context of new thread
729 Reload(current_thread.load()); 725 Reload(GetCurrentThread());
730 726
731 RescheduleCurrentCore(); 727 RescheduleCurrentCore();
732} 728}
@@ -735,13 +731,17 @@ void KScheduler::ScheduleImpl() {
735 KThread* previous_thread = GetCurrentThread(); 731 KThread* previous_thread = GetCurrentThread();
736 KThread* next_thread = state.highest_priority_thread; 732 KThread* next_thread = state.highest_priority_thread;
737 733
738 state.needs_scheduling = false; 734 state.needs_scheduling.store(false);
739 735
740 // We never want to schedule a null thread, so use the idle thread if we don't have a next. 736 // We never want to schedule a null thread, so use the idle thread if we don't have a next.
741 if (next_thread == nullptr) { 737 if (next_thread == nullptr) {
742 next_thread = idle_thread; 738 next_thread = idle_thread;
743 } 739 }
744 740
741 if (next_thread->GetCurrentCore() != core_id) {
742 next_thread->SetCurrentCore(core_id);
743 }
744
745 // We never want to schedule a dummy thread, as these are only used by host threads for locking. 745 // We never want to schedule a dummy thread, as these are only used by host threads for locking.
746 if (next_thread->GetThreadType() == ThreadType::Dummy) { 746 if (next_thread->GetThreadType() == ThreadType::Dummy) {
747 ASSERT_MSG(false, "Dummy threads should never be scheduled!"); 747 ASSERT_MSG(false, "Dummy threads should never be scheduled!");
@@ -755,14 +755,8 @@ void KScheduler::ScheduleImpl() {
755 return; 755 return;
756 } 756 }
757 757
758 if (next_thread->GetCurrentCore() != core_id) { 758 // Update the CPU time tracking variables.
759 next_thread->SetCurrentCore(core_id);
760 }
761
762 current_thread.store(next_thread);
763
764 KProcess* const previous_process = system.Kernel().CurrentProcess(); 759 KProcess* const previous_process = system.Kernel().CurrentProcess();
765
766 UpdateLastContextSwitchTime(previous_thread, previous_process); 760 UpdateLastContextSwitchTime(previous_thread, previous_process);
767 761
768 // Save context for previous thread 762 // Save context for previous thread
@@ -770,6 +764,10 @@ void KScheduler::ScheduleImpl() {
770 764
771 std::shared_ptr<Common::Fiber>* old_context; 765 std::shared_ptr<Common::Fiber>* old_context;
772 old_context = &previous_thread->GetHostContext(); 766 old_context = &previous_thread->GetHostContext();
767
768 // Set the new thread.
769 current_thread.store(next_thread);
770
773 guard.Unlock(); 771 guard.Unlock();
774 772
775 Common::Fiber::YieldTo(*old_context, *switch_fiber); 773 Common::Fiber::YieldTo(*old_context, *switch_fiber);
@@ -797,8 +795,8 @@ void KScheduler::SwitchToCurrent() {
797 do { 795 do {
798 auto next_thread = current_thread.load(); 796 auto next_thread = current_thread.load();
799 if (next_thread != nullptr) { 797 if (next_thread != nullptr) {
800 next_thread->context_guard.Lock(); 798 const auto locked = next_thread->context_guard.TryLock();
801 if (next_thread->GetRawState() != ThreadState::Runnable) { 799 if (state.needs_scheduling.load()) {
802 next_thread->context_guard.Unlock(); 800 next_thread->context_guard.Unlock();
803 break; 801 break;
804 } 802 }
@@ -806,6 +804,9 @@ void KScheduler::SwitchToCurrent() {
806 next_thread->context_guard.Unlock(); 804 next_thread->context_guard.Unlock();
807 break; 805 break;
808 } 806 }
807 if (!locked) {
808 continue;
809 }
809 } 810 }
810 auto thread = next_thread ? next_thread : idle_thread; 811 auto thread = next_thread ? next_thread : idle_thread;
811 Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext()); 812 Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext());