diff options
Diffstat (limited to 'src/core/hle/kernel')
| -rw-r--r-- | src/core/hle/kernel/k_address_arbiter.cpp | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_auto_object.h | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_condition_variable.cpp | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_handle_table.cpp | 6 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_handle_table.h | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_process.cpp | 1 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 85 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.h | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_thread.cpp | 21 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_thread.h | 36 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 57 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.h | 3 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc.cpp | 2 |
13 files changed, 74 insertions, 151 deletions
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp index 6771ef621..1b429bc1e 100644 --- a/src/core/hle/kernel/k_address_arbiter.cpp +++ b/src/core/hle/kernel/k_address_arbiter.cpp | |||
| @@ -28,7 +28,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) { | |||
| 28 | 28 | ||
| 29 | bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { | 29 | bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { |
| 30 | auto& monitor = system.Monitor(); | 30 | auto& monitor = system.Monitor(); |
| 31 | const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); | 31 | const auto current_core = system.CurrentCoreIndex(); |
| 32 | 32 | ||
| 33 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. | 33 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. |
| 34 | // TODO(bunnei): We should call CanAccessAtomic(..) here. | 34 | // TODO(bunnei): We should call CanAccessAtomic(..) here. |
| @@ -58,7 +58,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu | |||
| 58 | 58 | ||
| 59 | bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { | 59 | bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { |
| 60 | auto& monitor = system.Monitor(); | 60 | auto& monitor = system.Monitor(); |
| 61 | const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); | 61 | const auto current_core = system.CurrentCoreIndex(); |
| 62 | 62 | ||
| 63 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. | 63 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. |
| 64 | // TODO(bunnei): We should call CanAccessAtomic(..) here. | 64 | // TODO(bunnei): We should call CanAccessAtomic(..) here. |
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h index 165b76747..e4fcdbc67 100644 --- a/src/core/hle/kernel/k_auto_object.h +++ b/src/core/hle/kernel/k_auto_object.h | |||
| @@ -170,10 +170,6 @@ public: | |||
| 170 | } | 170 | } |
| 171 | } | 171 | } |
| 172 | 172 | ||
| 173 | const std::string& GetName() const { | ||
| 174 | return name; | ||
| 175 | } | ||
| 176 | |||
| 177 | private: | 173 | private: |
| 178 | void RegisterWithKernel(); | 174 | void RegisterWithKernel(); |
| 179 | void UnregisterWithKernel(); | 175 | void UnregisterWithKernel(); |
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index 4174f35fd..ef14ad1d2 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp | |||
| @@ -35,7 +35,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) { | |||
| 35 | bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, | 35 | bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, |
| 36 | u32 new_orr_mask) { | 36 | u32 new_orr_mask) { |
| 37 | auto& monitor = system.Monitor(); | 37 | auto& monitor = system.Monitor(); |
| 38 | const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); | 38 | const auto current_core = system.CurrentCoreIndex(); |
| 39 | 39 | ||
| 40 | // Load the value from the address. | 40 | // Load the value from the address. |
| 41 | const auto expected = monitor.ExclusiveRead32(current_core, address); | 41 | const auto expected = monitor.ExclusiveRead32(current_core, address); |
diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp index d720c2dda..6a420d5b0 100644 --- a/src/core/hle/kernel/k_handle_table.cpp +++ b/src/core/hle/kernel/k_handle_table.cpp | |||
| @@ -13,7 +13,6 @@ ResultCode KHandleTable::Finalize() { | |||
| 13 | // Get the table and clear our record of it. | 13 | // Get the table and clear our record of it. |
| 14 | u16 saved_table_size = 0; | 14 | u16 saved_table_size = 0; |
| 15 | { | 15 | { |
| 16 | KScopedDisableDispatch dd(kernel); | ||
| 17 | KScopedSpinLock lk(m_lock); | 16 | KScopedSpinLock lk(m_lock); |
| 18 | 17 | ||
| 19 | std::swap(m_table_size, saved_table_size); | 18 | std::swap(m_table_size, saved_table_size); |
| @@ -44,7 +43,6 @@ bool KHandleTable::Remove(Handle handle) { | |||
| 44 | // Find the object and free the entry. | 43 | // Find the object and free the entry. |
| 45 | KAutoObject* obj = nullptr; | 44 | KAutoObject* obj = nullptr; |
| 46 | { | 45 | { |
| 47 | KScopedDisableDispatch dd(kernel); | ||
| 48 | KScopedSpinLock lk(m_lock); | 46 | KScopedSpinLock lk(m_lock); |
| 49 | 47 | ||
| 50 | if (this->IsValidHandle(handle)) { | 48 | if (this->IsValidHandle(handle)) { |
| @@ -63,7 +61,6 @@ bool KHandleTable::Remove(Handle handle) { | |||
| 63 | } | 61 | } |
| 64 | 62 | ||
| 65 | ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { | 63 | ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { |
| 66 | KScopedDisableDispatch dd(kernel); | ||
| 67 | KScopedSpinLock lk(m_lock); | 64 | KScopedSpinLock lk(m_lock); |
| 68 | 65 | ||
| 69 | // Never exceed our capacity. | 66 | // Never exceed our capacity. |
| @@ -86,7 +83,6 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { | |||
| 86 | } | 83 | } |
| 87 | 84 | ||
| 88 | ResultCode KHandleTable::Reserve(Handle* out_handle) { | 85 | ResultCode KHandleTable::Reserve(Handle* out_handle) { |
| 89 | KScopedDisableDispatch dd(kernel); | ||
| 90 | KScopedSpinLock lk(m_lock); | 86 | KScopedSpinLock lk(m_lock); |
| 91 | 87 | ||
| 92 | // Never exceed our capacity. | 88 | // Never exceed our capacity. |
| @@ -97,7 +93,6 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) { | |||
| 97 | } | 93 | } |
| 98 | 94 | ||
| 99 | void KHandleTable::Unreserve(Handle handle) { | 95 | void KHandleTable::Unreserve(Handle handle) { |
| 100 | KScopedDisableDispatch dd(kernel); | ||
| 101 | KScopedSpinLock lk(m_lock); | 96 | KScopedSpinLock lk(m_lock); |
| 102 | 97 | ||
| 103 | // Unpack the handle. | 98 | // Unpack the handle. |
| @@ -116,7 +111,6 @@ void KHandleTable::Unreserve(Handle handle) { | |||
| 116 | } | 111 | } |
| 117 | 112 | ||
| 118 | void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { | 113 | void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { |
| 119 | KScopedDisableDispatch dd(kernel); | ||
| 120 | KScopedSpinLock lk(m_lock); | 114 | KScopedSpinLock lk(m_lock); |
| 121 | 115 | ||
| 122 | // Unpack the handle. | 116 | // Unpack the handle. |
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h index 75dcec7df..2ff6aa160 100644 --- a/src/core/hle/kernel/k_handle_table.h +++ b/src/core/hle/kernel/k_handle_table.h | |||
| @@ -69,7 +69,6 @@ public: | |||
| 69 | template <typename T = KAutoObject> | 69 | template <typename T = KAutoObject> |
| 70 | KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { | 70 | KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { |
| 71 | // Lock and look up in table. | 71 | // Lock and look up in table. |
| 72 | KScopedDisableDispatch dd(kernel); | ||
| 73 | KScopedSpinLock lk(m_lock); | 72 | KScopedSpinLock lk(m_lock); |
| 74 | 73 | ||
| 75 | if constexpr (std::is_same_v<T, KAutoObject>) { | 74 | if constexpr (std::is_same_v<T, KAutoObject>) { |
| @@ -124,7 +123,6 @@ public: | |||
| 124 | size_t num_opened; | 123 | size_t num_opened; |
| 125 | { | 124 | { |
| 126 | // Lock the table. | 125 | // Lock the table. |
| 127 | KScopedDisableDispatch dd(kernel); | ||
| 128 | KScopedSpinLock lk(m_lock); | 126 | KScopedSpinLock lk(m_lock); |
| 129 | for (num_opened = 0; num_opened < num_handles; num_opened++) { | 127 | for (num_opened = 0; num_opened < num_handles; num_opened++) { |
| 130 | // Get the current handle. | 128 | // Get the current handle. |
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 3d7e6707e..8ead1a769 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp | |||
| @@ -59,7 +59,6 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority | |||
| 59 | thread->GetContext64().cpu_registers[0] = 0; | 59 | thread->GetContext64().cpu_registers[0] = 0; |
| 60 | thread->GetContext32().cpu_registers[1] = thread_handle; | 60 | thread->GetContext32().cpu_registers[1] = thread_handle; |
| 61 | thread->GetContext64().cpu_registers[1] = thread_handle; | 61 | thread->GetContext64().cpu_registers[1] = thread_handle; |
| 62 | thread->DisableDispatch(); | ||
| 63 | 62 | ||
| 64 | auto& kernel = system.Kernel(); | 63 | auto& kernel = system.Kernel(); |
| 65 | // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires | 64 | // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires |
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 6ddbae52c..6a7d80d03 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -376,18 +376,20 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { | |||
| 376 | } | 376 | } |
| 377 | 377 | ||
| 378 | void KScheduler::DisableScheduling(KernelCore& kernel) { | 378 | void KScheduler::DisableScheduling(KernelCore& kernel) { |
| 379 | ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0); | 379 | if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { |
| 380 | GetCurrentThreadPointer(kernel)->DisableDispatch(); | 380 | ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0); |
| 381 | scheduler->GetCurrentThread()->DisableDispatch(); | ||
| 382 | } | ||
| 381 | } | 383 | } |
| 382 | 384 | ||
| 383 | void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { | 385 | void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { |
| 384 | ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 1); | 386 | if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { |
| 385 | 387 | ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1); | |
| 386 | if (GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() > 1) { | 388 | if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) { |
| 387 | GetCurrentThreadPointer(kernel)->EnableDispatch(); | 389 | scheduler->GetCurrentThread()->EnableDispatch(); |
| 388 | } else { | 390 | } |
| 389 | RescheduleCores(kernel, cores_needing_scheduling); | ||
| 390 | } | 391 | } |
| 392 | RescheduleCores(kernel, cores_needing_scheduling); | ||
| 391 | } | 393 | } |
| 392 | 394 | ||
| 393 | u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { | 395 | u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { |
| @@ -615,17 +617,13 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c | |||
| 615 | state.highest_priority_thread = nullptr; | 617 | state.highest_priority_thread = nullptr; |
| 616 | } | 618 | } |
| 617 | 619 | ||
| 618 | void KScheduler::Finalize() { | 620 | KScheduler::~KScheduler() { |
| 619 | if (idle_thread) { | 621 | if (idle_thread) { |
| 620 | idle_thread->Close(); | 622 | idle_thread->Close(); |
| 621 | idle_thread = nullptr; | 623 | idle_thread = nullptr; |
| 622 | } | 624 | } |
| 623 | } | 625 | } |
| 624 | 626 | ||
| 625 | KScheduler::~KScheduler() { | ||
| 626 | ASSERT(!idle_thread); | ||
| 627 | } | ||
| 628 | |||
| 629 | KThread* KScheduler::GetCurrentThread() const { | 627 | KThread* KScheduler::GetCurrentThread() const { |
| 630 | if (auto result = current_thread.load(); result) { | 628 | if (auto result = current_thread.load(); result) { |
| 631 | return result; | 629 | return result; |
| @@ -644,12 +642,10 @@ void KScheduler::RescheduleCurrentCore() { | |||
| 644 | if (phys_core.IsInterrupted()) { | 642 | if (phys_core.IsInterrupted()) { |
| 645 | phys_core.ClearInterrupt(); | 643 | phys_core.ClearInterrupt(); |
| 646 | } | 644 | } |
| 647 | |||
| 648 | guard.Lock(); | 645 | guard.Lock(); |
| 649 | if (state.needs_scheduling.load()) { | 646 | if (state.needs_scheduling.load()) { |
| 650 | Schedule(); | 647 | Schedule(); |
| 651 | } else { | 648 | } else { |
| 652 | GetCurrentThread()->EnableDispatch(); | ||
| 653 | guard.Unlock(); | 649 | guard.Unlock(); |
| 654 | } | 650 | } |
| 655 | } | 651 | } |
| @@ -659,33 +655,26 @@ void KScheduler::OnThreadStart() { | |||
| 659 | } | 655 | } |
| 660 | 656 | ||
| 661 | void KScheduler::Unload(KThread* thread) { | 657 | void KScheduler::Unload(KThread* thread) { |
| 662 | ASSERT(thread); | ||
| 663 | |||
| 664 | LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); | 658 | LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); |
| 665 | 659 | ||
| 666 | if (thread->IsCallingSvc()) { | 660 | if (thread) { |
| 667 | thread->ClearIsCallingSvc(); | 661 | if (thread->IsCallingSvc()) { |
| 668 | } | 662 | thread->ClearIsCallingSvc(); |
| 669 | 663 | } | |
| 670 | auto& physical_core = system.Kernel().PhysicalCore(core_id); | 664 | if (!thread->IsTerminationRequested()) { |
| 671 | if (!physical_core.IsInitialized()) { | 665 | prev_thread = thread; |
| 672 | return; | 666 | |
| 673 | } | 667 | Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); |
| 674 | 668 | cpu_core.SaveContext(thread->GetContext32()); | |
| 675 | Core::ARM_Interface& cpu_core = physical_core.ArmInterface(); | 669 | cpu_core.SaveContext(thread->GetContext64()); |
| 676 | cpu_core.SaveContext(thread->GetContext32()); | 670 | // Save the TPIDR_EL0 system register in case it was modified. |
| 677 | cpu_core.SaveContext(thread->GetContext64()); | 671 | thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); |
| 678 | // Save the TPIDR_EL0 system register in case it was modified. | 672 | cpu_core.ClearExclusiveState(); |
| 679 | thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | 673 | } else { |
| 680 | cpu_core.ClearExclusiveState(); | 674 | prev_thread = nullptr; |
| 681 | 675 | } | |
| 682 | if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) { | 676 | thread->context_guard.Unlock(); |
| 683 | prev_thread = thread; | ||
| 684 | } else { | ||
| 685 | prev_thread = nullptr; | ||
| 686 | } | 677 | } |
| 687 | |||
| 688 | thread->context_guard.Unlock(); | ||
| 689 | } | 678 | } |
| 690 | 679 | ||
| 691 | void KScheduler::Reload(KThread* thread) { | 680 | void KScheduler::Reload(KThread* thread) { |
| @@ -694,6 +683,11 @@ void KScheduler::Reload(KThread* thread) { | |||
| 694 | if (thread) { | 683 | if (thread) { |
| 695 | ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); | 684 | ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); |
| 696 | 685 | ||
| 686 | auto* const thread_owner_process = thread->GetOwnerProcess(); | ||
| 687 | if (thread_owner_process != nullptr) { | ||
| 688 | system.Kernel().MakeCurrentProcess(thread_owner_process); | ||
| 689 | } | ||
| 690 | |||
| 697 | Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); | 691 | Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); |
| 698 | cpu_core.LoadContext(thread->GetContext32()); | 692 | cpu_core.LoadContext(thread->GetContext32()); |
| 699 | cpu_core.LoadContext(thread->GetContext64()); | 693 | cpu_core.LoadContext(thread->GetContext64()); |
| @@ -711,7 +705,7 @@ void KScheduler::SwitchContextStep2() { | |||
| 711 | } | 705 | } |
| 712 | 706 | ||
| 713 | void KScheduler::ScheduleImpl() { | 707 | void KScheduler::ScheduleImpl() { |
| 714 | KThread* previous_thread = GetCurrentThread(); | 708 | KThread* previous_thread = current_thread.load(); |
| 715 | KThread* next_thread = state.highest_priority_thread; | 709 | KThread* next_thread = state.highest_priority_thread; |
| 716 | 710 | ||
| 717 | state.needs_scheduling = false; | 711 | state.needs_scheduling = false; |
| @@ -723,15 +717,10 @@ void KScheduler::ScheduleImpl() { | |||
| 723 | 717 | ||
| 724 | // If we're not actually switching thread, there's nothing to do. | 718 | // If we're not actually switching thread, there's nothing to do. |
| 725 | if (next_thread == current_thread.load()) { | 719 | if (next_thread == current_thread.load()) { |
| 726 | previous_thread->EnableDispatch(); | ||
| 727 | guard.Unlock(); | 720 | guard.Unlock(); |
| 728 | return; | 721 | return; |
| 729 | } | 722 | } |
| 730 | 723 | ||
| 731 | if (next_thread->GetCurrentCore() != core_id) { | ||
| 732 | next_thread->SetCurrentCore(core_id); | ||
| 733 | } | ||
| 734 | |||
| 735 | current_thread.store(next_thread); | 724 | current_thread.store(next_thread); |
| 736 | 725 | ||
| 737 | KProcess* const previous_process = system.Kernel().CurrentProcess(); | 726 | KProcess* const previous_process = system.Kernel().CurrentProcess(); |
| @@ -742,7 +731,11 @@ void KScheduler::ScheduleImpl() { | |||
| 742 | Unload(previous_thread); | 731 | Unload(previous_thread); |
| 743 | 732 | ||
| 744 | std::shared_ptr<Common::Fiber>* old_context; | 733 | std::shared_ptr<Common::Fiber>* old_context; |
| 745 | old_context = &previous_thread->GetHostContext(); | 734 | if (previous_thread != nullptr) { |
| 735 | old_context = &previous_thread->GetHostContext(); | ||
| 736 | } else { | ||
| 737 | old_context = &idle_thread->GetHostContext(); | ||
| 738 | } | ||
| 746 | guard.Unlock(); | 739 | guard.Unlock(); |
| 747 | 740 | ||
| 748 | Common::Fiber::YieldTo(*old_context, *switch_fiber); | 741 | Common::Fiber::YieldTo(*old_context, *switch_fiber); |
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 516e0cdba..12cfae919 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h | |||
| @@ -33,8 +33,6 @@ public: | |||
| 33 | explicit KScheduler(Core::System& system_, s32 core_id_); | 33 | explicit KScheduler(Core::System& system_, s32 core_id_); |
| 34 | ~KScheduler(); | 34 | ~KScheduler(); |
| 35 | 35 | ||
| 36 | void Finalize(); | ||
| 37 | |||
| 38 | /// Reschedules to the next available thread (call after current thread is suspended) | 36 | /// Reschedules to the next available thread (call after current thread is suspended) |
| 39 | void RescheduleCurrentCore(); | 37 | void RescheduleCurrentCore(); |
| 40 | 38 | ||
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 0f6808ade..9f1d3156b 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | #include "common/fiber.h" | 14 | #include "common/fiber.h" |
| 15 | #include "common/logging/log.h" | 15 | #include "common/logging/log.h" |
| 16 | #include "common/scope_exit.h" | 16 | #include "common/scope_exit.h" |
| 17 | #include "common/settings.h" | ||
| 18 | #include "common/thread_queue_list.h" | 17 | #include "common/thread_queue_list.h" |
| 19 | #include "core/core.h" | 18 | #include "core/core.h" |
| 20 | #include "core/cpu_manager.h" | 19 | #include "core/cpu_manager.h" |
| @@ -189,7 +188,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s | |||
| 189 | // Setup the stack parameters. | 188 | // Setup the stack parameters. |
| 190 | StackParameters& sp = GetStackParameters(); | 189 | StackParameters& sp = GetStackParameters(); |
| 191 | sp.cur_thread = this; | 190 | sp.cur_thread = this; |
| 192 | sp.disable_count = 0; | 191 | sp.disable_count = 1; |
| 193 | SetInExceptionHandler(); | 192 | SetInExceptionHandler(); |
| 194 | 193 | ||
| 195 | // Set thread ID. | 194 | // Set thread ID. |
| @@ -216,10 +215,9 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint | |||
| 216 | // Initialize the thread. | 215 | // Initialize the thread. |
| 217 | R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); | 216 | R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); |
| 218 | 217 | ||
| 219 | // Initialize emulation parameters. | 218 | // Initialize host context. |
| 220 | thread->host_context = | 219 | thread->host_context = |
| 221 | std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter); | 220 | std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter); |
| 222 | thread->is_single_core = !Settings::values.use_multi_core.GetValue(); | ||
| 223 | 221 | ||
| 224 | return ResultSuccess; | 222 | return ResultSuccess; |
| 225 | } | 223 | } |
| @@ -972,9 +970,6 @@ ResultCode KThread::Run() { | |||
| 972 | 970 | ||
| 973 | // Set our state and finish. | 971 | // Set our state and finish. |
| 974 | SetState(ThreadState::Runnable); | 972 | SetState(ThreadState::Runnable); |
| 975 | |||
| 976 | DisableDispatch(); | ||
| 977 | |||
| 978 | return ResultSuccess; | 973 | return ResultSuccess; |
| 979 | } | 974 | } |
| 980 | } | 975 | } |
| @@ -1059,16 +1054,4 @@ s32 GetCurrentCoreId(KernelCore& kernel) { | |||
| 1059 | return GetCurrentThread(kernel).GetCurrentCore(); | 1054 | return GetCurrentThread(kernel).GetCurrentCore(); |
| 1060 | } | 1055 | } |
| 1061 | 1056 | ||
| 1062 | KScopedDisableDispatch::~KScopedDisableDispatch() { | ||
| 1063 | if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) { | ||
| 1064 | auto scheduler = kernel.CurrentScheduler(); | ||
| 1065 | |||
| 1066 | if (scheduler) { | ||
| 1067 | scheduler->RescheduleCurrentCore(); | ||
| 1068 | } | ||
| 1069 | } else { | ||
| 1070 | GetCurrentThread(kernel).EnableDispatch(); | ||
| 1071 | } | ||
| 1072 | } | ||
| 1073 | |||
| 1074 | } // namespace Kernel | 1057 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index e4c4c877d..c77f44ad4 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h | |||
| @@ -450,39 +450,16 @@ public: | |||
| 450 | sleeping_queue = q; | 450 | sleeping_queue = q; |
| 451 | } | 451 | } |
| 452 | 452 | ||
| 453 | [[nodiscard]] bool IsKernelThread() const { | ||
| 454 | return GetActiveCore() == 3; | ||
| 455 | } | ||
| 456 | |||
| 457 | [[nodiscard]] bool IsDispatchTrackingDisabled() const { | ||
| 458 | return is_single_core || IsKernelThread(); | ||
| 459 | } | ||
| 460 | |||
| 461 | [[nodiscard]] s32 GetDisableDispatchCount() const { | 453 | [[nodiscard]] s32 GetDisableDispatchCount() const { |
| 462 | if (IsDispatchTrackingDisabled()) { | ||
| 463 | // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||
| 464 | return 1; | ||
| 465 | } | ||
| 466 | |||
| 467 | return this->GetStackParameters().disable_count; | 454 | return this->GetStackParameters().disable_count; |
| 468 | } | 455 | } |
| 469 | 456 | ||
| 470 | void DisableDispatch() { | 457 | void DisableDispatch() { |
| 471 | if (IsDispatchTrackingDisabled()) { | ||
| 472 | // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||
| 473 | return; | ||
| 474 | } | ||
| 475 | |||
| 476 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); | 458 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); |
| 477 | this->GetStackParameters().disable_count++; | 459 | this->GetStackParameters().disable_count++; |
| 478 | } | 460 | } |
| 479 | 461 | ||
| 480 | void EnableDispatch() { | 462 | void EnableDispatch() { |
| 481 | if (IsDispatchTrackingDisabled()) { | ||
| 482 | // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||
| 483 | return; | ||
| 484 | } | ||
| 485 | |||
| 486 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); | 463 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); |
| 487 | this->GetStackParameters().disable_count--; | 464 | this->GetStackParameters().disable_count--; |
| 488 | } | 465 | } |
| @@ -731,7 +708,6 @@ private: | |||
| 731 | 708 | ||
| 732 | // For emulation | 709 | // For emulation |
| 733 | std::shared_ptr<Common::Fiber> host_context{}; | 710 | std::shared_ptr<Common::Fiber> host_context{}; |
| 734 | bool is_single_core{}; | ||
| 735 | 711 | ||
| 736 | // For debugging | 712 | // For debugging |
| 737 | std::vector<KSynchronizationObject*> wait_objects_for_debugging; | 713 | std::vector<KSynchronizationObject*> wait_objects_for_debugging; |
| @@ -776,16 +752,4 @@ public: | |||
| 776 | } | 752 | } |
| 777 | }; | 753 | }; |
| 778 | 754 | ||
| 779 | class KScopedDisableDispatch { | ||
| 780 | public: | ||
| 781 | [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} { | ||
| 782 | GetCurrentThread(kernel).DisableDispatch(); | ||
| 783 | } | ||
| 784 | |||
| 785 | ~KScopedDisableDispatch(); | ||
| 786 | |||
| 787 | private: | ||
| 788 | KernelCore& kernel; | ||
| 789 | }; | ||
| 790 | |||
| 791 | } // namespace Kernel | 755 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 8fdab44e4..bea945301 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -85,9 +85,8 @@ struct KernelCore::Impl { | |||
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | void InitializeCores() { | 87 | void InitializeCores() { |
| 88 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | 88 | for (auto& core : cores) { |
| 89 | cores[core_id].Initialize(current_process->Is64BitProcess()); | 89 | core.Initialize(current_process->Is64BitProcess()); |
| 90 | system.Memory().SetCurrentPageTable(*current_process, core_id); | ||
| 91 | } | 90 | } |
| 92 | } | 91 | } |
| 93 | 92 | ||
| @@ -132,6 +131,15 @@ struct KernelCore::Impl { | |||
| 132 | next_user_process_id = KProcess::ProcessIDMin; | 131 | next_user_process_id = KProcess::ProcessIDMin; |
| 133 | next_thread_id = 1; | 132 | next_thread_id = 1; |
| 134 | 133 | ||
| 134 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||
| 135 | if (suspend_threads[core_id]) { | ||
| 136 | suspend_threads[core_id]->Close(); | ||
| 137 | suspend_threads[core_id] = nullptr; | ||
| 138 | } | ||
| 139 | |||
| 140 | schedulers[core_id].reset(); | ||
| 141 | } | ||
| 142 | |||
| 135 | cores.clear(); | 143 | cores.clear(); |
| 136 | 144 | ||
| 137 | global_handle_table->Finalize(); | 145 | global_handle_table->Finalize(); |
| @@ -159,16 +167,6 @@ struct KernelCore::Impl { | |||
| 159 | CleanupObject(time_shared_mem); | 167 | CleanupObject(time_shared_mem); |
| 160 | CleanupObject(system_resource_limit); | 168 | CleanupObject(system_resource_limit); |
| 161 | 169 | ||
| 162 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||
| 163 | if (suspend_threads[core_id]) { | ||
| 164 | suspend_threads[core_id]->Close(); | ||
| 165 | suspend_threads[core_id] = nullptr; | ||
| 166 | } | ||
| 167 | |||
| 168 | schedulers[core_id]->Finalize(); | ||
| 169 | schedulers[core_id].reset(); | ||
| 170 | } | ||
| 171 | |||
| 172 | // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others | 170 | // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others |
| 173 | next_host_thread_id = Core::Hardware::NUM_CPU_CORES; | 171 | next_host_thread_id = Core::Hardware::NUM_CPU_CORES; |
| 174 | 172 | ||
| @@ -259,6 +257,14 @@ struct KernelCore::Impl { | |||
| 259 | 257 | ||
| 260 | void MakeCurrentProcess(KProcess* process) { | 258 | void MakeCurrentProcess(KProcess* process) { |
| 261 | current_process = process; | 259 | current_process = process; |
| 260 | if (process == nullptr) { | ||
| 261 | return; | ||
| 262 | } | ||
| 263 | |||
| 264 | const u32 core_id = GetCurrentHostThreadID(); | ||
| 265 | if (core_id < Core::Hardware::NUM_CPU_CORES) { | ||
| 266 | system.Memory().SetCurrentPageTable(*process, core_id); | ||
| 267 | } | ||
| 262 | } | 268 | } |
| 263 | 269 | ||
| 264 | static inline thread_local u32 host_thread_id = UINT32_MAX; | 270 | static inline thread_local u32 host_thread_id = UINT32_MAX; |
| @@ -821,20 +827,16 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const { | |||
| 821 | return impl->cores[id]; | 827 | return impl->cores[id]; |
| 822 | } | 828 | } |
| 823 | 829 | ||
| 824 | size_t KernelCore::CurrentPhysicalCoreIndex() const { | ||
| 825 | const u32 core_id = impl->GetCurrentHostThreadID(); | ||
| 826 | if (core_id >= Core::Hardware::NUM_CPU_CORES) { | ||
| 827 | return Core::Hardware::NUM_CPU_CORES - 1; | ||
| 828 | } | ||
| 829 | return core_id; | ||
| 830 | } | ||
| 831 | |||
| 832 | Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { | 830 | Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { |
| 833 | return impl->cores[CurrentPhysicalCoreIndex()]; | 831 | u32 core_id = impl->GetCurrentHostThreadID(); |
| 832 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 833 | return impl->cores[core_id]; | ||
| 834 | } | 834 | } |
| 835 | 835 | ||
| 836 | const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { | 836 | const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { |
| 837 | return impl->cores[CurrentPhysicalCoreIndex()]; | 837 | u32 core_id = impl->GetCurrentHostThreadID(); |
| 838 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 839 | return impl->cores[core_id]; | ||
| 838 | } | 840 | } |
| 839 | 841 | ||
| 840 | Kernel::KScheduler* KernelCore::CurrentScheduler() { | 842 | Kernel::KScheduler* KernelCore::CurrentScheduler() { |
| @@ -1027,9 +1029,6 @@ void KernelCore::Suspend(bool in_suspention) { | |||
| 1027 | impl->suspend_threads[core_id]->SetState(state); | 1029 | impl->suspend_threads[core_id]->SetState(state); |
| 1028 | impl->suspend_threads[core_id]->SetWaitReasonForDebugging( | 1030 | impl->suspend_threads[core_id]->SetWaitReasonForDebugging( |
| 1029 | ThreadWaitReasonForDebugging::Suspended); | 1031 | ThreadWaitReasonForDebugging::Suspended); |
| 1030 | if (!should_suspend) { | ||
| 1031 | impl->suspend_threads[core_id]->DisableDispatch(); | ||
| 1032 | } | ||
| 1033 | } | 1032 | } |
| 1034 | } | 1033 | } |
| 1035 | } | 1034 | } |
| @@ -1044,11 +1043,13 @@ void KernelCore::ExceptionalExit() { | |||
| 1044 | } | 1043 | } |
| 1045 | 1044 | ||
| 1046 | void KernelCore::EnterSVCProfile() { | 1045 | void KernelCore::EnterSVCProfile() { |
| 1047 | impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); | 1046 | std::size_t core = impl->GetCurrentHostThreadID(); |
| 1047 | impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); | ||
| 1048 | } | 1048 | } |
| 1049 | 1049 | ||
| 1050 | void KernelCore::ExitSVCProfile() { | 1050 | void KernelCore::ExitSVCProfile() { |
| 1051 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]); | 1051 | std::size_t core = impl->GetCurrentHostThreadID(); |
| 1052 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]); | ||
| 1052 | } | 1053 | } |
| 1053 | 1054 | ||
| 1054 | std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { | 1055 | std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { |
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 57535433b..3a6db0b1c 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h | |||
| @@ -146,9 +146,6 @@ public: | |||
| 146 | /// Gets the an instance of the respective physical CPU core. | 146 | /// Gets the an instance of the respective physical CPU core. |
| 147 | const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; | 147 | const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; |
| 148 | 148 | ||
| 149 | /// Gets the current physical core index for the running host thread. | ||
| 150 | std::size_t CurrentPhysicalCoreIndex() const; | ||
| 151 | |||
| 152 | /// Gets the sole instance of the Scheduler at the current running core. | 149 | /// Gets the sole instance of the Scheduler at the current running core. |
| 153 | Kernel::KScheduler* CurrentScheduler(); | 150 | Kernel::KScheduler* CurrentScheduler(); |
| 154 | 151 | ||
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 890c52198..62fb06c45 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -877,7 +877,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle | |||
| 877 | const u64 thread_ticks = current_thread->GetCpuTime(); | 877 | const u64 thread_ticks = current_thread->GetCpuTime(); |
| 878 | 878 | ||
| 879 | out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); | 879 | out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); |
| 880 | } else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) { | 880 | } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { |
| 881 | out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; | 881 | out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; |
| 882 | } | 882 | } |
| 883 | 883 | ||