diff options
| author | 2021-08-18 15:42:46 -0700 | |
|---|---|---|
| committer | 2021-08-18 15:42:46 -0700 | |
| commit | aa40084c241129ef08081bae72bd5de1b4c86348 (patch) | |
| tree | b4f406cbf0f230cf9064040992ce3ef8bf54e5a7 /src/core/hle/kernel | |
| parent | Merge pull request #6863 from spholz/fix-lan-play (diff) | |
| parent | core: hle: kernel: Disable dispatch count tracking on single core. (diff) | |
| download | yuzu-aa40084c241129ef08081bae72bd5de1b4c86348.tar.gz yuzu-aa40084c241129ef08081bae72bd5de1b4c86348.tar.xz yuzu-aa40084c241129ef08081bae72bd5de1b4c86348.zip | |
Merge pull request #6832 from bunnei/scheduler-improvements
kernel: Various improvements to scheduler
Diffstat (limited to 'src/core/hle/kernel')
| -rw-r--r-- | src/core/hle/kernel/k_address_arbiter.cpp | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_auto_object.h | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_condition_variable.cpp | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_handle_table.cpp | 6 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_handle_table.h | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_process.cpp | 1 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 85 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.h | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_thread.cpp | 21 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_thread.h | 36 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 57 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.h | 3 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc.cpp | 2 |
13 files changed, 151 insertions, 74 deletions
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp index 1b429bc1e..6771ef621 100644 --- a/src/core/hle/kernel/k_address_arbiter.cpp +++ b/src/core/hle/kernel/k_address_arbiter.cpp | |||
| @@ -28,7 +28,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) { | |||
| 28 | 28 | ||
| 29 | bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { | 29 | bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { |
| 30 | auto& monitor = system.Monitor(); | 30 | auto& monitor = system.Monitor(); |
| 31 | const auto current_core = system.CurrentCoreIndex(); | 31 | const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); |
| 32 | 32 | ||
| 33 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. | 33 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. |
| 34 | // TODO(bunnei): We should call CanAccessAtomic(..) here. | 34 | // TODO(bunnei): We should call CanAccessAtomic(..) here. |
| @@ -58,7 +58,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu | |||
| 58 | 58 | ||
| 59 | bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { | 59 | bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { |
| 60 | auto& monitor = system.Monitor(); | 60 | auto& monitor = system.Monitor(); |
| 61 | const auto current_core = system.CurrentCoreIndex(); | 61 | const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); |
| 62 | 62 | ||
| 63 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. | 63 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. |
| 64 | // TODO(bunnei): We should call CanAccessAtomic(..) here. | 64 | // TODO(bunnei): We should call CanAccessAtomic(..) here. |
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h index e4fcdbc67..165b76747 100644 --- a/src/core/hle/kernel/k_auto_object.h +++ b/src/core/hle/kernel/k_auto_object.h | |||
| @@ -170,6 +170,10 @@ public: | |||
| 170 | } | 170 | } |
| 171 | } | 171 | } |
| 172 | 172 | ||
| 173 | const std::string& GetName() const { | ||
| 174 | return name; | ||
| 175 | } | ||
| 176 | |||
| 173 | private: | 177 | private: |
| 174 | void RegisterWithKernel(); | 178 | void RegisterWithKernel(); |
| 175 | void UnregisterWithKernel(); | 179 | void UnregisterWithKernel(); |
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index ef14ad1d2..4174f35fd 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp | |||
| @@ -35,7 +35,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) { | |||
| 35 | bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, | 35 | bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, |
| 36 | u32 new_orr_mask) { | 36 | u32 new_orr_mask) { |
| 37 | auto& monitor = system.Monitor(); | 37 | auto& monitor = system.Monitor(); |
| 38 | const auto current_core = system.CurrentCoreIndex(); | 38 | const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); |
| 39 | 39 | ||
| 40 | // Load the value from the address. | 40 | // Load the value from the address. |
| 41 | const auto expected = monitor.ExclusiveRead32(current_core, address); | 41 | const auto expected = monitor.ExclusiveRead32(current_core, address); |
diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp index 6a420d5b0..d720c2dda 100644 --- a/src/core/hle/kernel/k_handle_table.cpp +++ b/src/core/hle/kernel/k_handle_table.cpp | |||
| @@ -13,6 +13,7 @@ ResultCode KHandleTable::Finalize() { | |||
| 13 | // Get the table and clear our record of it. | 13 | // Get the table and clear our record of it. |
| 14 | u16 saved_table_size = 0; | 14 | u16 saved_table_size = 0; |
| 15 | { | 15 | { |
| 16 | KScopedDisableDispatch dd(kernel); | ||
| 16 | KScopedSpinLock lk(m_lock); | 17 | KScopedSpinLock lk(m_lock); |
| 17 | 18 | ||
| 18 | std::swap(m_table_size, saved_table_size); | 19 | std::swap(m_table_size, saved_table_size); |
| @@ -43,6 +44,7 @@ bool KHandleTable::Remove(Handle handle) { | |||
| 43 | // Find the object and free the entry. | 44 | // Find the object and free the entry. |
| 44 | KAutoObject* obj = nullptr; | 45 | KAutoObject* obj = nullptr; |
| 45 | { | 46 | { |
| 47 | KScopedDisableDispatch dd(kernel); | ||
| 46 | KScopedSpinLock lk(m_lock); | 48 | KScopedSpinLock lk(m_lock); |
| 47 | 49 | ||
| 48 | if (this->IsValidHandle(handle)) { | 50 | if (this->IsValidHandle(handle)) { |
| @@ -61,6 +63,7 @@ bool KHandleTable::Remove(Handle handle) { | |||
| 61 | } | 63 | } |
| 62 | 64 | ||
| 63 | ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { | 65 | ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { |
| 66 | KScopedDisableDispatch dd(kernel); | ||
| 64 | KScopedSpinLock lk(m_lock); | 67 | KScopedSpinLock lk(m_lock); |
| 65 | 68 | ||
| 66 | // Never exceed our capacity. | 69 | // Never exceed our capacity. |
| @@ -83,6 +86,7 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { | |||
| 83 | } | 86 | } |
| 84 | 87 | ||
| 85 | ResultCode KHandleTable::Reserve(Handle* out_handle) { | 88 | ResultCode KHandleTable::Reserve(Handle* out_handle) { |
| 89 | KScopedDisableDispatch dd(kernel); | ||
| 86 | KScopedSpinLock lk(m_lock); | 90 | KScopedSpinLock lk(m_lock); |
| 87 | 91 | ||
| 88 | // Never exceed our capacity. | 92 | // Never exceed our capacity. |
| @@ -93,6 +97,7 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) { | |||
| 93 | } | 97 | } |
| 94 | 98 | ||
| 95 | void KHandleTable::Unreserve(Handle handle) { | 99 | void KHandleTable::Unreserve(Handle handle) { |
| 100 | KScopedDisableDispatch dd(kernel); | ||
| 96 | KScopedSpinLock lk(m_lock); | 101 | KScopedSpinLock lk(m_lock); |
| 97 | 102 | ||
| 98 | // Unpack the handle. | 103 | // Unpack the handle. |
| @@ -111,6 +116,7 @@ void KHandleTable::Unreserve(Handle handle) { | |||
| 111 | } | 116 | } |
| 112 | 117 | ||
| 113 | void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { | 118 | void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { |
| 119 | KScopedDisableDispatch dd(kernel); | ||
| 114 | KScopedSpinLock lk(m_lock); | 120 | KScopedSpinLock lk(m_lock); |
| 115 | 121 | ||
| 116 | // Unpack the handle. | 122 | // Unpack the handle. |
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h index 2ff6aa160..75dcec7df 100644 --- a/src/core/hle/kernel/k_handle_table.h +++ b/src/core/hle/kernel/k_handle_table.h | |||
| @@ -69,6 +69,7 @@ public: | |||
| 69 | template <typename T = KAutoObject> | 69 | template <typename T = KAutoObject> |
| 70 | KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { | 70 | KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { |
| 71 | // Lock and look up in table. | 71 | // Lock and look up in table. |
| 72 | KScopedDisableDispatch dd(kernel); | ||
| 72 | KScopedSpinLock lk(m_lock); | 73 | KScopedSpinLock lk(m_lock); |
| 73 | 74 | ||
| 74 | if constexpr (std::is_same_v<T, KAutoObject>) { | 75 | if constexpr (std::is_same_v<T, KAutoObject>) { |
| @@ -123,6 +124,7 @@ public: | |||
| 123 | size_t num_opened; | 124 | size_t num_opened; |
| 124 | { | 125 | { |
| 125 | // Lock the table. | 126 | // Lock the table. |
| 127 | KScopedDisableDispatch dd(kernel); | ||
| 126 | KScopedSpinLock lk(m_lock); | 128 | KScopedSpinLock lk(m_lock); |
| 127 | for (num_opened = 0; num_opened < num_handles; num_opened++) { | 129 | for (num_opened = 0; num_opened < num_handles; num_opened++) { |
| 128 | // Get the current handle. | 130 | // Get the current handle. |
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 8ead1a769..3d7e6707e 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp | |||
| @@ -59,6 +59,7 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority | |||
| 59 | thread->GetContext64().cpu_registers[0] = 0; | 59 | thread->GetContext64().cpu_registers[0] = 0; |
| 60 | thread->GetContext32().cpu_registers[1] = thread_handle; | 60 | thread->GetContext32().cpu_registers[1] = thread_handle; |
| 61 | thread->GetContext64().cpu_registers[1] = thread_handle; | 61 | thread->GetContext64().cpu_registers[1] = thread_handle; |
| 62 | thread->DisableDispatch(); | ||
| 62 | 63 | ||
| 63 | auto& kernel = system.Kernel(); | 64 | auto& kernel = system.Kernel(); |
| 64 | // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires | 65 | // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires |
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 6a7d80d03..6ddbae52c 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -376,20 +376,18 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { | |||
| 376 | } | 376 | } |
| 377 | 377 | ||
| 378 | void KScheduler::DisableScheduling(KernelCore& kernel) { | 378 | void KScheduler::DisableScheduling(KernelCore& kernel) { |
| 379 | if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { | 379 | ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0); |
| 380 | ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0); | 380 | GetCurrentThreadPointer(kernel)->DisableDispatch(); |
| 381 | scheduler->GetCurrentThread()->DisableDispatch(); | ||
| 382 | } | ||
| 383 | } | 381 | } |
| 384 | 382 | ||
| 385 | void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { | 383 | void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { |
| 386 | if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { | 384 | ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 1); |
| 387 | ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1); | 385 | |
| 388 | if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) { | 386 | if (GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() > 1) { |
| 389 | scheduler->GetCurrentThread()->EnableDispatch(); | 387 | GetCurrentThreadPointer(kernel)->EnableDispatch(); |
| 390 | } | 388 | } else { |
| 389 | RescheduleCores(kernel, cores_needing_scheduling); | ||
| 391 | } | 390 | } |
| 392 | RescheduleCores(kernel, cores_needing_scheduling); | ||
| 393 | } | 391 | } |
| 394 | 392 | ||
| 395 | u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { | 393 | u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { |
| @@ -617,13 +615,17 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c | |||
| 617 | state.highest_priority_thread = nullptr; | 615 | state.highest_priority_thread = nullptr; |
| 618 | } | 616 | } |
| 619 | 617 | ||
| 620 | KScheduler::~KScheduler() { | 618 | void KScheduler::Finalize() { |
| 621 | if (idle_thread) { | 619 | if (idle_thread) { |
| 622 | idle_thread->Close(); | 620 | idle_thread->Close(); |
| 623 | idle_thread = nullptr; | 621 | idle_thread = nullptr; |
| 624 | } | 622 | } |
| 625 | } | 623 | } |
| 626 | 624 | ||
| 625 | KScheduler::~KScheduler() { | ||
| 626 | ASSERT(!idle_thread); | ||
| 627 | } | ||
| 628 | |||
| 627 | KThread* KScheduler::GetCurrentThread() const { | 629 | KThread* KScheduler::GetCurrentThread() const { |
| 628 | if (auto result = current_thread.load(); result) { | 630 | if (auto result = current_thread.load(); result) { |
| 629 | return result; | 631 | return result; |
| @@ -642,10 +644,12 @@ void KScheduler::RescheduleCurrentCore() { | |||
| 642 | if (phys_core.IsInterrupted()) { | 644 | if (phys_core.IsInterrupted()) { |
| 643 | phys_core.ClearInterrupt(); | 645 | phys_core.ClearInterrupt(); |
| 644 | } | 646 | } |
| 647 | |||
| 645 | guard.Lock(); | 648 | guard.Lock(); |
| 646 | if (state.needs_scheduling.load()) { | 649 | if (state.needs_scheduling.load()) { |
| 647 | Schedule(); | 650 | Schedule(); |
| 648 | } else { | 651 | } else { |
| 652 | GetCurrentThread()->EnableDispatch(); | ||
| 649 | guard.Unlock(); | 653 | guard.Unlock(); |
| 650 | } | 654 | } |
| 651 | } | 655 | } |
| @@ -655,26 +659,33 @@ void KScheduler::OnThreadStart() { | |||
| 655 | } | 659 | } |
| 656 | 660 | ||
| 657 | void KScheduler::Unload(KThread* thread) { | 661 | void KScheduler::Unload(KThread* thread) { |
| 662 | ASSERT(thread); | ||
| 663 | |||
| 658 | LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); | 664 | LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); |
| 659 | 665 | ||
| 660 | if (thread) { | 666 | if (thread->IsCallingSvc()) { |
| 661 | if (thread->IsCallingSvc()) { | 667 | thread->ClearIsCallingSvc(); |
| 662 | thread->ClearIsCallingSvc(); | ||
| 663 | } | ||
| 664 | if (!thread->IsTerminationRequested()) { | ||
| 665 | prev_thread = thread; | ||
| 666 | |||
| 667 | Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); | ||
| 668 | cpu_core.SaveContext(thread->GetContext32()); | ||
| 669 | cpu_core.SaveContext(thread->GetContext64()); | ||
| 670 | // Save the TPIDR_EL0 system register in case it was modified. | ||
| 671 | thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | ||
| 672 | cpu_core.ClearExclusiveState(); | ||
| 673 | } else { | ||
| 674 | prev_thread = nullptr; | ||
| 675 | } | ||
| 676 | thread->context_guard.Unlock(); | ||
| 677 | } | 668 | } |
| 669 | |||
| 670 | auto& physical_core = system.Kernel().PhysicalCore(core_id); | ||
| 671 | if (!physical_core.IsInitialized()) { | ||
| 672 | return; | ||
| 673 | } | ||
| 674 | |||
| 675 | Core::ARM_Interface& cpu_core = physical_core.ArmInterface(); | ||
| 676 | cpu_core.SaveContext(thread->GetContext32()); | ||
| 677 | cpu_core.SaveContext(thread->GetContext64()); | ||
| 678 | // Save the TPIDR_EL0 system register in case it was modified. | ||
| 679 | thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | ||
| 680 | cpu_core.ClearExclusiveState(); | ||
| 681 | |||
| 682 | if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) { | ||
| 683 | prev_thread = thread; | ||
| 684 | } else { | ||
| 685 | prev_thread = nullptr; | ||
| 686 | } | ||
| 687 | |||
| 688 | thread->context_guard.Unlock(); | ||
| 678 | } | 689 | } |
| 679 | 690 | ||
| 680 | void KScheduler::Reload(KThread* thread) { | 691 | void KScheduler::Reload(KThread* thread) { |
| @@ -683,11 +694,6 @@ void KScheduler::Reload(KThread* thread) { | |||
| 683 | if (thread) { | 694 | if (thread) { |
| 684 | ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); | 695 | ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); |
| 685 | 696 | ||
| 686 | auto* const thread_owner_process = thread->GetOwnerProcess(); | ||
| 687 | if (thread_owner_process != nullptr) { | ||
| 688 | system.Kernel().MakeCurrentProcess(thread_owner_process); | ||
| 689 | } | ||
| 690 | |||
| 691 | Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); | 697 | Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); |
| 692 | cpu_core.LoadContext(thread->GetContext32()); | 698 | cpu_core.LoadContext(thread->GetContext32()); |
| 693 | cpu_core.LoadContext(thread->GetContext64()); | 699 | cpu_core.LoadContext(thread->GetContext64()); |
| @@ -705,7 +711,7 @@ void KScheduler::SwitchContextStep2() { | |||
| 705 | } | 711 | } |
| 706 | 712 | ||
| 707 | void KScheduler::ScheduleImpl() { | 713 | void KScheduler::ScheduleImpl() { |
| 708 | KThread* previous_thread = current_thread.load(); | 714 | KThread* previous_thread = GetCurrentThread(); |
| 709 | KThread* next_thread = state.highest_priority_thread; | 715 | KThread* next_thread = state.highest_priority_thread; |
| 710 | 716 | ||
| 711 | state.needs_scheduling = false; | 717 | state.needs_scheduling = false; |
| @@ -717,10 +723,15 @@ void KScheduler::ScheduleImpl() { | |||
| 717 | 723 | ||
| 718 | // If we're not actually switching thread, there's nothing to do. | 724 | // If we're not actually switching thread, there's nothing to do. |
| 719 | if (next_thread == current_thread.load()) { | 725 | if (next_thread == current_thread.load()) { |
| 726 | previous_thread->EnableDispatch(); | ||
| 720 | guard.Unlock(); | 727 | guard.Unlock(); |
| 721 | return; | 728 | return; |
| 722 | } | 729 | } |
| 723 | 730 | ||
| 731 | if (next_thread->GetCurrentCore() != core_id) { | ||
| 732 | next_thread->SetCurrentCore(core_id); | ||
| 733 | } | ||
| 734 | |||
| 724 | current_thread.store(next_thread); | 735 | current_thread.store(next_thread); |
| 725 | 736 | ||
| 726 | KProcess* const previous_process = system.Kernel().CurrentProcess(); | 737 | KProcess* const previous_process = system.Kernel().CurrentProcess(); |
| @@ -731,11 +742,7 @@ void KScheduler::ScheduleImpl() { | |||
| 731 | Unload(previous_thread); | 742 | Unload(previous_thread); |
| 732 | 743 | ||
| 733 | std::shared_ptr<Common::Fiber>* old_context; | 744 | std::shared_ptr<Common::Fiber>* old_context; |
| 734 | if (previous_thread != nullptr) { | 745 | old_context = &previous_thread->GetHostContext(); |
| 735 | old_context = &previous_thread->GetHostContext(); | ||
| 736 | } else { | ||
| 737 | old_context = &idle_thread->GetHostContext(); | ||
| 738 | } | ||
| 739 | guard.Unlock(); | 746 | guard.Unlock(); |
| 740 | 747 | ||
| 741 | Common::Fiber::YieldTo(*old_context, *switch_fiber); | 748 | Common::Fiber::YieldTo(*old_context, *switch_fiber); |
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 12cfae919..516e0cdba 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h | |||
| @@ -33,6 +33,8 @@ public: | |||
| 33 | explicit KScheduler(Core::System& system_, s32 core_id_); | 33 | explicit KScheduler(Core::System& system_, s32 core_id_); |
| 34 | ~KScheduler(); | 34 | ~KScheduler(); |
| 35 | 35 | ||
| 36 | void Finalize(); | ||
| 37 | |||
| 36 | /// Reschedules to the next available thread (call after current thread is suspended) | 38 | /// Reschedules to the next available thread (call after current thread is suspended) |
| 37 | void RescheduleCurrentCore(); | 39 | void RescheduleCurrentCore(); |
| 38 | 40 | ||
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 9f1d3156b..0f6808ade 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include "common/fiber.h" | 14 | #include "common/fiber.h" |
| 15 | #include "common/logging/log.h" | 15 | #include "common/logging/log.h" |
| 16 | #include "common/scope_exit.h" | 16 | #include "common/scope_exit.h" |
| 17 | #include "common/settings.h" | ||
| 17 | #include "common/thread_queue_list.h" | 18 | #include "common/thread_queue_list.h" |
| 18 | #include "core/core.h" | 19 | #include "core/core.h" |
| 19 | #include "core/cpu_manager.h" | 20 | #include "core/cpu_manager.h" |
| @@ -188,7 +189,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s | |||
| 188 | // Setup the stack parameters. | 189 | // Setup the stack parameters. |
| 189 | StackParameters& sp = GetStackParameters(); | 190 | StackParameters& sp = GetStackParameters(); |
| 190 | sp.cur_thread = this; | 191 | sp.cur_thread = this; |
| 191 | sp.disable_count = 1; | 192 | sp.disable_count = 0; |
| 192 | SetInExceptionHandler(); | 193 | SetInExceptionHandler(); |
| 193 | 194 | ||
| 194 | // Set thread ID. | 195 | // Set thread ID. |
| @@ -215,9 +216,10 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint | |||
| 215 | // Initialize the thread. | 216 | // Initialize the thread. |
| 216 | R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); | 217 | R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); |
| 217 | 218 | ||
| 218 | // Initialize host context. | 219 | // Initialize emulation parameters. |
| 219 | thread->host_context = | 220 | thread->host_context = |
| 220 | std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter); | 221 | std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter); |
| 222 | thread->is_single_core = !Settings::values.use_multi_core.GetValue(); | ||
| 221 | 223 | ||
| 222 | return ResultSuccess; | 224 | return ResultSuccess; |
| 223 | } | 225 | } |
| @@ -970,6 +972,9 @@ ResultCode KThread::Run() { | |||
| 970 | 972 | ||
| 971 | // Set our state and finish. | 973 | // Set our state and finish. |
| 972 | SetState(ThreadState::Runnable); | 974 | SetState(ThreadState::Runnable); |
| 975 | |||
| 976 | DisableDispatch(); | ||
| 977 | |||
| 973 | return ResultSuccess; | 978 | return ResultSuccess; |
| 974 | } | 979 | } |
| 975 | } | 980 | } |
| @@ -1054,4 +1059,16 @@ s32 GetCurrentCoreId(KernelCore& kernel) { | |||
| 1054 | return GetCurrentThread(kernel).GetCurrentCore(); | 1059 | return GetCurrentThread(kernel).GetCurrentCore(); |
| 1055 | } | 1060 | } |
| 1056 | 1061 | ||
| 1062 | KScopedDisableDispatch::~KScopedDisableDispatch() { | ||
| 1063 | if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) { | ||
| 1064 | auto scheduler = kernel.CurrentScheduler(); | ||
| 1065 | |||
| 1066 | if (scheduler) { | ||
| 1067 | scheduler->RescheduleCurrentCore(); | ||
| 1068 | } | ||
| 1069 | } else { | ||
| 1070 | GetCurrentThread(kernel).EnableDispatch(); | ||
| 1071 | } | ||
| 1072 | } | ||
| 1073 | |||
| 1057 | } // namespace Kernel | 1074 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index c77f44ad4..e4c4c877d 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h | |||
| @@ -450,16 +450,39 @@ public: | |||
| 450 | sleeping_queue = q; | 450 | sleeping_queue = q; |
| 451 | } | 451 | } |
| 452 | 452 | ||
| 453 | [[nodiscard]] bool IsKernelThread() const { | ||
| 454 | return GetActiveCore() == 3; | ||
| 455 | } | ||
| 456 | |||
| 457 | [[nodiscard]] bool IsDispatchTrackingDisabled() const { | ||
| 458 | return is_single_core || IsKernelThread(); | ||
| 459 | } | ||
| 460 | |||
| 453 | [[nodiscard]] s32 GetDisableDispatchCount() const { | 461 | [[nodiscard]] s32 GetDisableDispatchCount() const { |
| 462 | if (IsDispatchTrackingDisabled()) { | ||
| 463 | // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||
| 464 | return 1; | ||
| 465 | } | ||
| 466 | |||
| 454 | return this->GetStackParameters().disable_count; | 467 | return this->GetStackParameters().disable_count; |
| 455 | } | 468 | } |
| 456 | 469 | ||
| 457 | void DisableDispatch() { | 470 | void DisableDispatch() { |
| 471 | if (IsDispatchTrackingDisabled()) { | ||
| 472 | // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||
| 473 | return; | ||
| 474 | } | ||
| 475 | |||
| 458 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); | 476 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); |
| 459 | this->GetStackParameters().disable_count++; | 477 | this->GetStackParameters().disable_count++; |
| 460 | } | 478 | } |
| 461 | 479 | ||
| 462 | void EnableDispatch() { | 480 | void EnableDispatch() { |
| 481 | if (IsDispatchTrackingDisabled()) { | ||
| 482 | // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||
| 483 | return; | ||
| 484 | } | ||
| 485 | |||
| 463 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); | 486 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); |
| 464 | this->GetStackParameters().disable_count--; | 487 | this->GetStackParameters().disable_count--; |
| 465 | } | 488 | } |
| @@ -708,6 +731,7 @@ private: | |||
| 708 | 731 | ||
| 709 | // For emulation | 732 | // For emulation |
| 710 | std::shared_ptr<Common::Fiber> host_context{}; | 733 | std::shared_ptr<Common::Fiber> host_context{}; |
| 734 | bool is_single_core{}; | ||
| 711 | 735 | ||
| 712 | // For debugging | 736 | // For debugging |
| 713 | std::vector<KSynchronizationObject*> wait_objects_for_debugging; | 737 | std::vector<KSynchronizationObject*> wait_objects_for_debugging; |
| @@ -752,4 +776,16 @@ public: | |||
| 752 | } | 776 | } |
| 753 | }; | 777 | }; |
| 754 | 778 | ||
| 779 | class KScopedDisableDispatch { | ||
| 780 | public: | ||
| 781 | [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} { | ||
| 782 | GetCurrentThread(kernel).DisableDispatch(); | ||
| 783 | } | ||
| 784 | |||
| 785 | ~KScopedDisableDispatch(); | ||
| 786 | |||
| 787 | private: | ||
| 788 | KernelCore& kernel; | ||
| 789 | }; | ||
| 790 | |||
| 755 | } // namespace Kernel | 791 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 92fbc5532..8673384ee 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -85,8 +85,9 @@ struct KernelCore::Impl { | |||
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | void InitializeCores() { | 87 | void InitializeCores() { |
| 88 | for (auto& core : cores) { | 88 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { |
| 89 | core.Initialize(current_process->Is64BitProcess()); | 89 | cores[core_id].Initialize(current_process->Is64BitProcess()); |
| 90 | system.Memory().SetCurrentPageTable(*current_process, core_id); | ||
| 90 | } | 91 | } |
| 91 | } | 92 | } |
| 92 | 93 | ||
| @@ -131,15 +132,6 @@ struct KernelCore::Impl { | |||
| 131 | next_user_process_id = KProcess::ProcessIDMin; | 132 | next_user_process_id = KProcess::ProcessIDMin; |
| 132 | next_thread_id = 1; | 133 | next_thread_id = 1; |
| 133 | 134 | ||
| 134 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||
| 135 | if (suspend_threads[core_id]) { | ||
| 136 | suspend_threads[core_id]->Close(); | ||
| 137 | suspend_threads[core_id] = nullptr; | ||
| 138 | } | ||
| 139 | |||
| 140 | schedulers[core_id].reset(); | ||
| 141 | } | ||
| 142 | |||
| 143 | cores.clear(); | 135 | cores.clear(); |
| 144 | 136 | ||
| 145 | global_handle_table->Finalize(); | 137 | global_handle_table->Finalize(); |
| @@ -167,6 +159,16 @@ struct KernelCore::Impl { | |||
| 167 | CleanupObject(time_shared_mem); | 159 | CleanupObject(time_shared_mem); |
| 168 | CleanupObject(system_resource_limit); | 160 | CleanupObject(system_resource_limit); |
| 169 | 161 | ||
| 162 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||
| 163 | if (suspend_threads[core_id]) { | ||
| 164 | suspend_threads[core_id]->Close(); | ||
| 165 | suspend_threads[core_id] = nullptr; | ||
| 166 | } | ||
| 167 | |||
| 168 | schedulers[core_id]->Finalize(); | ||
| 169 | schedulers[core_id].reset(); | ||
| 170 | } | ||
| 171 | |||
| 170 | // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others | 172 | // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others |
| 171 | next_host_thread_id = Core::Hardware::NUM_CPU_CORES; | 173 | next_host_thread_id = Core::Hardware::NUM_CPU_CORES; |
| 172 | 174 | ||
| @@ -257,14 +259,6 @@ struct KernelCore::Impl { | |||
| 257 | 259 | ||
| 258 | void MakeCurrentProcess(KProcess* process) { | 260 | void MakeCurrentProcess(KProcess* process) { |
| 259 | current_process = process; | 261 | current_process = process; |
| 260 | if (process == nullptr) { | ||
| 261 | return; | ||
| 262 | } | ||
| 263 | |||
| 264 | const u32 core_id = GetCurrentHostThreadID(); | ||
| 265 | if (core_id < Core::Hardware::NUM_CPU_CORES) { | ||
| 266 | system.Memory().SetCurrentPageTable(*process, core_id); | ||
| 267 | } | ||
| 268 | } | 262 | } |
| 269 | 263 | ||
| 270 | /// Creates a new host thread ID, should only be called by GetHostThreadId | 264 | /// Creates a new host thread ID, should only be called by GetHostThreadId |
| @@ -824,16 +818,20 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const { | |||
| 824 | return impl->cores[id]; | 818 | return impl->cores[id]; |
| 825 | } | 819 | } |
| 826 | 820 | ||
| 821 | size_t KernelCore::CurrentPhysicalCoreIndex() const { | ||
| 822 | const u32 core_id = impl->GetCurrentHostThreadID(); | ||
| 823 | if (core_id >= Core::Hardware::NUM_CPU_CORES) { | ||
| 824 | return Core::Hardware::NUM_CPU_CORES - 1; | ||
| 825 | } | ||
| 826 | return core_id; | ||
| 827 | } | ||
| 828 | |||
| 827 | Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { | 829 | Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { |
| 828 | u32 core_id = impl->GetCurrentHostThreadID(); | 830 | return impl->cores[CurrentPhysicalCoreIndex()]; |
| 829 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 830 | return impl->cores[core_id]; | ||
| 831 | } | 831 | } |
| 832 | 832 | ||
| 833 | const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { | 833 | const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { |
| 834 | u32 core_id = impl->GetCurrentHostThreadID(); | 834 | return impl->cores[CurrentPhysicalCoreIndex()]; |
| 835 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 836 | return impl->cores[core_id]; | ||
| 837 | } | 835 | } |
| 838 | 836 | ||
| 839 | Kernel::KScheduler* KernelCore::CurrentScheduler() { | 837 | Kernel::KScheduler* KernelCore::CurrentScheduler() { |
| @@ -1026,6 +1024,9 @@ void KernelCore::Suspend(bool in_suspention) { | |||
| 1026 | impl->suspend_threads[core_id]->SetState(state); | 1024 | impl->suspend_threads[core_id]->SetState(state); |
| 1027 | impl->suspend_threads[core_id]->SetWaitReasonForDebugging( | 1025 | impl->suspend_threads[core_id]->SetWaitReasonForDebugging( |
| 1028 | ThreadWaitReasonForDebugging::Suspended); | 1026 | ThreadWaitReasonForDebugging::Suspended); |
| 1027 | if (!should_suspend) { | ||
| 1028 | impl->suspend_threads[core_id]->DisableDispatch(); | ||
| 1029 | } | ||
| 1029 | } | 1030 | } |
| 1030 | } | 1031 | } |
| 1031 | } | 1032 | } |
| @@ -1040,13 +1041,11 @@ void KernelCore::ExceptionalExit() { | |||
| 1040 | } | 1041 | } |
| 1041 | 1042 | ||
| 1042 | void KernelCore::EnterSVCProfile() { | 1043 | void KernelCore::EnterSVCProfile() { |
| 1043 | std::size_t core = impl->GetCurrentHostThreadID(); | 1044 | impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); |
| 1044 | impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); | ||
| 1045 | } | 1045 | } |
| 1046 | 1046 | ||
| 1047 | void KernelCore::ExitSVCProfile() { | 1047 | void KernelCore::ExitSVCProfile() { |
| 1048 | std::size_t core = impl->GetCurrentHostThreadID(); | 1048 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]); |
| 1049 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]); | ||
| 1050 | } | 1049 | } |
| 1051 | 1050 | ||
| 1052 | std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { | 1051 | std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { |
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 3a6db0b1c..57535433b 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h | |||
| @@ -146,6 +146,9 @@ public: | |||
| 146 | /// Gets the an instance of the respective physical CPU core. | 146 | /// Gets the an instance of the respective physical CPU core. |
| 147 | const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; | 147 | const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; |
| 148 | 148 | ||
| 149 | /// Gets the current physical core index for the running host thread. | ||
| 150 | std::size_t CurrentPhysicalCoreIndex() const; | ||
| 151 | |||
| 149 | /// Gets the sole instance of the Scheduler at the current running core. | 152 | /// Gets the sole instance of the Scheduler at the current running core. |
| 150 | Kernel::KScheduler* CurrentScheduler(); | 153 | Kernel::KScheduler* CurrentScheduler(); |
| 151 | 154 | ||
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 2eb532472..a90b291da 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -877,7 +877,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle | |||
| 877 | const u64 thread_ticks = current_thread->GetCpuTime(); | 877 | const u64 thread_ticks = current_thread->GetCpuTime(); |
| 878 | 878 | ||
| 879 | out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); | 879 | out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); |
| 880 | } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { | 880 | } else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) { |
| 881 | out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; | 881 | out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; |
| 882 | } | 882 | } |
| 883 | 883 | ||