From 68eee948758eeddb4f3f091cd89c870e481b278b Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 6 Aug 2021 22:45:18 -0700 Subject: core: hle: kernel: Reflect non-emulated threads as core 3. --- src/core/hle/kernel/k_address_arbiter.cpp | 4 ++-- src/core/hle/kernel/k_condition_variable.cpp | 2 +- src/core/hle/kernel/kernel.cpp | 8 ++++++++ src/core/hle/kernel/kernel.h | 3 +++ src/core/hle/kernel/svc.cpp | 2 +- 5 files changed, 15 insertions(+), 4 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp index 1b429bc1e..6771ef621 100644 --- a/src/core/hle/kernel/k_address_arbiter.cpp +++ b/src/core/hle/kernel/k_address_arbiter.cpp @@ -28,7 +28,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) { bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { auto& monitor = system.Monitor(); - const auto current_core = system.CurrentCoreIndex(); + const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. // TODO(bunnei): We should call CanAccessAtomic(..) here. @@ -58,7 +58,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { auto& monitor = system.Monitor(); - const auto current_core = system.CurrentCoreIndex(); + const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. // TODO(bunnei): We should call CanAccessAtomic(..) here. diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index ef14ad1d2..4174f35fd 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp @@ -35,7 +35,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) { bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, u32 new_orr_mask) { auto& monitor = system.Monitor(); - const auto current_core = system.CurrentCoreIndex(); + const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); // Load the value from the address. const auto expected = monitor.ExclusiveRead32(current_core, address); diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 92fbc5532..b0b130719 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -824,6 +824,14 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const { return impl->cores[id]; } +size_t KernelCore::CurrentPhysicalCoreIndex() const { + const u32 core_id = impl->GetCurrentHostThreadID(); + if (core_id >= Core::Hardware::NUM_CPU_CORES) { + return Core::Hardware::NUM_CPU_CORES - 1; + } + return core_id; +} + Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { u32 core_id = impl->GetCurrentHostThreadID(); ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 3a6db0b1c..57535433b 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -146,6 +146,9 @@ public: /// Gets the an instance of the respective physical CPU core. const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; + /// Gets the current physical core index for the running host thread. + std::size_t CurrentPhysicalCoreIndex() const; + /// Gets the sole instance of the Scheduler at the current running core. Kernel::KScheduler* CurrentScheduler(); diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 2eb532472..a90b291da 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -877,7 +877,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle const u64 thread_ticks = current_thread->GetCpuTime(); out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); - } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { + } else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) { out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; } -- cgit v1.2.3 From 2b9560428b6ab84fc61dd8f82e75f58cdb851c07 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 6 Aug 2021 22:58:46 -0700 Subject: core: hle: kernel: Ensure idle threads are closed before destroying scheduler. --- src/core/hle/kernel/k_scheduler.cpp | 6 +++++- src/core/hle/kernel/k_scheduler.h | 2 ++ src/core/hle/kernel/kernel.cpp | 38 +++++++++++++++---------------------- 3 files changed, 22 insertions(+), 24 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 6a7d80d03..4bae69f71 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -617,13 +617,17 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c state.highest_priority_thread = nullptr; } -KScheduler::~KScheduler() { +void KScheduler::Finalize() { if (idle_thread) { idle_thread->Close(); idle_thread = nullptr; } } +KScheduler::~KScheduler() { + ASSERT(!idle_thread); +} + KThread* KScheduler::GetCurrentThread() const { if (auto result = current_thread.load(); result) { return result; diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 12cfae919..516e0cdba 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -33,6 +33,8 @@ public: explicit KScheduler(Core::System& system_, s32 core_id_); ~KScheduler(); + void Finalize(); + /// Reschedules to the next available thread (call after current thread is suspended) void RescheduleCurrentCore(); diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index b0b130719..6bfb55f71 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -85,8 +85,9 @@ struct KernelCore::Impl { } void InitializeCores() { - for (auto& core : cores) { - core.Initialize(current_process->Is64BitProcess()); + for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { + cores[core_id].Initialize(current_process->Is64BitProcess()); + system.Memory().SetCurrentPageTable(*current_process, core_id); } } @@ -131,15 +132,6 @@ struct KernelCore::Impl { next_user_process_id = KProcess::ProcessIDMin; next_thread_id = 1; - for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { - if (suspend_threads[core_id]) { - suspend_threads[core_id]->Close(); - suspend_threads[core_id] = nullptr; - } - - schedulers[core_id].reset(); - } - cores.clear(); global_handle_table->Finalize(); @@ -167,6 +159,16 @@ struct KernelCore::Impl { CleanupObject(time_shared_mem); CleanupObject(system_resource_limit); + for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { + if (suspend_threads[core_id]) { + suspend_threads[core_id]->Close(); + suspend_threads[core_id] = nullptr; + } + + schedulers[core_id]->Finalize(); + schedulers[core_id].reset(); + } + // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others next_host_thread_id = Core::Hardware::NUM_CPU_CORES; @@ -257,14 +259,6 @@ struct KernelCore::Impl { void MakeCurrentProcess(KProcess* process) { current_process = process; - if (process == nullptr) { - return; - } - - const u32 core_id = GetCurrentHostThreadID(); - if (core_id < Core::Hardware::NUM_CPU_CORES) { - system.Memory().SetCurrentPageTable(*process, core_id); - } } /// Creates a new host thread ID, should only be called by GetHostThreadId @@ -1048,13 +1042,11 @@ void KernelCore::ExceptionalExit() { } void KernelCore::EnterSVCProfile() { - std::size_t core = impl->GetCurrentHostThreadID(); - impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); + impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); } void KernelCore::ExitSVCProfile() { - std::size_t core = impl->GetCurrentHostThreadID(); - MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]); + MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]); } std::weak_ptr KernelCore::CreateServiceThread(const std::string& name) { -- cgit v1.2.3 From 01af2f4162308029e53143f0caa953bc4c59de92 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 6 Aug 2021 23:04:32 -0700 Subject: core: hle: kernel: k_thread: Add KScopedDisableDispatch. --- src/core/hle/kernel/k_thread.cpp | 17 ++++++++++++++++- src/core/hle/kernel/k_thread.h | 31 +++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 9f1d3156b..89653e0cb 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -188,7 +188,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s // Setup the stack parameters. StackParameters& sp = GetStackParameters(); sp.cur_thread = this; - sp.disable_count = 1; + sp.disable_count = 0; SetInExceptionHandler(); // Set thread ID. @@ -970,6 +970,9 @@ ResultCode KThread::Run() { // Set our state and finish. SetState(ThreadState::Runnable); + + DisableDispatch(); + return ResultSuccess; } } @@ -1054,4 +1057,16 @@ s32 GetCurrentCoreId(KernelCore& kernel) { return GetCurrentThread(kernel).GetCurrentCore(); } +KScopedDisableDispatch::~KScopedDisableDispatch() { + if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) { + auto scheduler = kernel.CurrentScheduler(); + + if (scheduler) { + scheduler->RescheduleCurrentCore(); + } + } else { + GetCurrentThread(kernel).EnableDispatch(); + } +} + } // namespace Kernel diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index c77f44ad4..a149744c8 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -450,16 +450,35 @@ public: sleeping_queue = q; } + [[nodiscard]] bool IsKernelThread() const { + return GetActiveCore() == 3; + } + [[nodiscard]] s32 GetDisableDispatchCount() const { + if (IsKernelThread()) { + // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. + return 1; + } + return this->GetStackParameters().disable_count; } void DisableDispatch() { + if (IsKernelThread()) { + // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. + return; + } + ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); this->GetStackParameters().disable_count++; } void EnableDispatch() { + if (IsKernelThread()) { + // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. + return; + } + ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); this->GetStackParameters().disable_count--; } @@ -752,4 +771,16 @@ public: } }; +class KScopedDisableDispatch { +public: + explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} { + GetCurrentThread(kernel).DisableDispatch(); + } + + ~KScopedDisableDispatch(); + +private: + KernelCore& kernel; +}; + } // namespace Kernel -- cgit v1.2.3 From f2b0d289833be94401edb4bb3b5b891fcda713d4 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 6 Aug 2021 23:05:01 -0700 Subject: core: hle: kernel: k_handle_table: Use KScopedDisableDispatch as necessary. --- src/core/hle/kernel/k_handle_table.cpp | 6 ++++++ src/core/hle/kernel/k_handle_table.h | 2 ++ 2 files changed, 8 insertions(+) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp index 6a420d5b0..d720c2dda 100644 --- a/src/core/hle/kernel/k_handle_table.cpp +++ b/src/core/hle/kernel/k_handle_table.cpp @@ -13,6 +13,7 @@ ResultCode KHandleTable::Finalize() { // Get the table and clear our record of it. u16 saved_table_size = 0; { + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); std::swap(m_table_size, saved_table_size); @@ -43,6 +44,7 @@ bool KHandleTable::Remove(Handle handle) { // Find the object and free the entry. KAutoObject* obj = nullptr; { + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); if (this->IsValidHandle(handle)) { @@ -61,6 +63,7 @@ bool KHandleTable::Remove(Handle handle) { } ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); // Never exceed our capacity. @@ -83,6 +86,7 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { } ResultCode KHandleTable::Reserve(Handle* out_handle) { + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); // Never exceed our capacity. @@ -93,6 +97,7 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) { } void KHandleTable::Unreserve(Handle handle) { + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); // Unpack the handle. @@ -111,6 +116,7 @@ void KHandleTable::Unreserve(Handle handle) { } void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); // Unpack the handle. diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h index 2ff6aa160..75dcec7df 100644 --- a/src/core/hle/kernel/k_handle_table.h +++ b/src/core/hle/kernel/k_handle_table.h @@ -69,6 +69,7 @@ public: template KScopedAutoObject GetObjectWithoutPseudoHandle(Handle handle) const { // Lock and look up in table. + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); if constexpr (std::is_same_v) { @@ -123,6 +124,7 @@ public: size_t num_opened; { // Lock the table. + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); for (num_opened = 0; num_opened < num_handles; num_opened++) { // Get the current handle. -- cgit v1.2.3 From 7569d6774d2d445d261713b42472fd429ce782a0 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 6 Aug 2021 23:08:26 -0700 Subject: core: hle: kernel: k_process: DisableDispatch on main thread. --- src/core/hle/kernel/k_process.cpp | 1 + 1 file changed, 1 insertion(+) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 8ead1a769..3d7e6707e 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -59,6 +59,7 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority thread->GetContext64().cpu_registers[0] = 0; thread->GetContext32().cpu_registers[1] = thread_handle; thread->GetContext64().cpu_registers[1] = thread_handle; + thread->DisableDispatch(); auto& kernel = system.Kernel(); // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires -- cgit v1.2.3 From bedcf1971022129ec93ab56b9ccb186fbbb26b7f Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 6 Aug 2021 23:12:47 -0700 Subject: core: hle: kernel: k_scheduler: Improve Unload. --- src/core/hle/kernel/k_scheduler.cpp | 46 +++++++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 17 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 4bae69f71..5ee4b8adc 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -650,6 +650,7 @@ void KScheduler::RescheduleCurrentCore() { if (state.needs_scheduling.load()) { Schedule(); } else { + GetCurrentThread()->EnableDispatch(); guard.Unlock(); } } @@ -659,26 +660,37 @@ void KScheduler::OnThreadStart() { } void KScheduler::Unload(KThread* thread) { + ASSERT(thread); + + if (!thread) { + return; + } + LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); - if (thread) { - if (thread->IsCallingSvc()) { - thread->ClearIsCallingSvc(); - } - if (!thread->IsTerminationRequested()) { - prev_thread = thread; - - Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); - cpu_core.SaveContext(thread->GetContext32()); - cpu_core.SaveContext(thread->GetContext64()); - // Save the TPIDR_EL0 system register in case it was modified. - thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); - cpu_core.ClearExclusiveState(); - } else { - prev_thread = nullptr; - } - thread->context_guard.Unlock(); + if (thread->IsCallingSvc()) { + thread->ClearIsCallingSvc(); } + + auto& physical_core = system.Kernel().PhysicalCore(core_id); + if (!physical_core.IsInitialized()) { + return; + } + + Core::ARM_Interface& cpu_core = physical_core.ArmInterface(); + cpu_core.SaveContext(thread->GetContext32()); + cpu_core.SaveContext(thread->GetContext64()); + // Save the TPIDR_EL0 system register in case it was modified. + thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); + cpu_core.ClearExclusiveState(); + + if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) { + prev_thread = thread; + } else { + prev_thread = nullptr; + } + + thread->context_guard.Unlock(); } void KScheduler::Reload(KThread* thread) { -- cgit v1.2.3 From 77ad64b97d3d5d9d6f5189ad901e2cd98369c973 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 6 Aug 2021 23:16:12 -0700 Subject: core: hle: kernel: k_scheduler: Improve ScheduleImpl. --- src/core/hle/kernel/k_scheduler.cpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 5ee4b8adc..e523c4923 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -721,7 +721,7 @@ void KScheduler::SwitchContextStep2() { } void KScheduler::ScheduleImpl() { - KThread* previous_thread = current_thread.load(); + KThread* previous_thread = GetCurrentThread(); KThread* next_thread = state.highest_priority_thread; state.needs_scheduling = false; @@ -733,10 +733,15 @@ void KScheduler::ScheduleImpl() { // If we're not actually switching thread, there's nothing to do. if (next_thread == current_thread.load()) { + previous_thread->EnableDispatch(); guard.Unlock(); return; } + if (next_thread->GetCurrentCore() != core_id) { + next_thread->SetCurrentCore(core_id); + } + current_thread.store(next_thread); KProcess* const previous_process = system.Kernel().CurrentProcess(); @@ -747,11 +752,7 @@ void KScheduler::ScheduleImpl() { Unload(previous_thread); std::shared_ptr* old_context; - if (previous_thread != nullptr) { - old_context = &previous_thread->GetHostContext(); - } else { - old_context = &idle_thread->GetHostContext(); - } + old_context = &previous_thread->GetHostContext(); guard.Unlock(); Common::Fiber::YieldTo(*old_context, *switch_fiber); -- cgit v1.2.3 From d1c502720d9adec47047800896594886e7952693 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 6 Aug 2021 23:16:36 -0700 Subject: core: hle: kernel: k_scheduler: Remove unnecessary MakeCurrentProcess. --- src/core/hle/kernel/k_scheduler.cpp | 5 ----- 1 file changed, 5 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index e523c4923..f5236dfea 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -699,11 +699,6 @@ void KScheduler::Reload(KThread* thread) { if (thread) { ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); - auto* const thread_owner_process = thread->GetOwnerProcess(); - if (thread_owner_process != nullptr) { - system.Kernel().MakeCurrentProcess(thread_owner_process); - } - Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); cpu_core.LoadContext(thread->GetContext32()); cpu_core.LoadContext(thread->GetContext64()); -- cgit v1.2.3 From 2dfb07388ac4755a2b43944cc9a3edfa531a2fee Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 6 Aug 2021 23:20:07 -0700 Subject: core: hle: kernel: Use CurrentPhysicalCoreIndex as appropriate. --- src/core/hle/kernel/kernel.cpp | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 6bfb55f71..2a984b913 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -827,15 +827,11 @@ size_t KernelCore::CurrentPhysicalCoreIndex() const { } Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { - u32 core_id = impl->GetCurrentHostThreadID(); - ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); - return impl->cores[core_id]; + return impl->cores[CurrentPhysicalCoreIndex()]; } const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { - u32 core_id = impl->GetCurrentHostThreadID(); - ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); - return impl->cores[core_id]; + return impl->cores[CurrentPhysicalCoreIndex()]; } Kernel::KScheduler* KernelCore::CurrentScheduler() { -- cgit v1.2.3 From 1798c3b6b0cd4a088be821c604c7ff1617afde7b Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 6 Aug 2021 23:27:33 -0700 Subject: core: hle: kernel: k_scheduler: Improve DisableScheduling and EnableScheduling. --- src/core/hle/kernel/k_scheduler.cpp | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index f5236dfea..6ddbae52c 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -376,20 +376,18 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { } void KScheduler::DisableScheduling(KernelCore& kernel) { - if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { - ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0); - scheduler->GetCurrentThread()->DisableDispatch(); - } + ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0); + GetCurrentThreadPointer(kernel)->DisableDispatch(); } void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { - if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { - ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1); - if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) { - scheduler->GetCurrentThread()->EnableDispatch(); - } + ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 1); + + if (GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() > 1) { + GetCurrentThreadPointer(kernel)->EnableDispatch(); + } else { + RescheduleCores(kernel, cores_needing_scheduling); } - RescheduleCores(kernel, cores_needing_scheduling); } u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { @@ -646,6 +644,7 @@ void KScheduler::RescheduleCurrentCore() { if (phys_core.IsInterrupted()) { phys_core.ClearInterrupt(); } + guard.Lock(); if (state.needs_scheduling.load()) { Schedule(); @@ -662,10 +661,6 @@ void KScheduler::OnThreadStart() { void KScheduler::Unload(KThread* thread) { ASSERT(thread); - if (!thread) { - return; - } - LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); if (thread->IsCallingSvc()) { -- cgit v1.2.3 From 5051d3c4159adabe4c3e8b9233a524894eed6808 Mon Sep 17 00:00:00 2001 From: bunnei Date: Fri, 6 Aug 2021 23:43:26 -0700 Subject: core: hle: kernel: DisableDispatch on suspend threads. --- src/core/hle/kernel/kernel.cpp | 3 +++ 1 file changed, 3 insertions(+) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 2a984b913..8673384ee 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -1024,6 +1024,9 @@ void KernelCore::Suspend(bool in_suspention) { impl->suspend_threads[core_id]->SetState(state); impl->suspend_threads[core_id]->SetWaitReasonForDebugging( ThreadWaitReasonForDebugging::Suspended); + if (!should_suspend) { + impl->suspend_threads[core_id]->DisableDispatch(); + } } } } -- cgit v1.2.3 From 48a3496b9390e0e1cd0a649d941360dda7b9839b Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 7 Aug 2021 01:16:29 -0700 Subject: core: hle: kernel: k_auto_object: Add GetName method. - Useful purely for debugging. --- src/core/hle/kernel/k_auto_object.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h index e4fcdbc67..165b76747 100644 --- a/src/core/hle/kernel/k_auto_object.h +++ b/src/core/hle/kernel/k_auto_object.h @@ -170,6 +170,10 @@ public: } } + const std::string& GetName() const { + return name; + } + private: void RegisterWithKernel(); void UnregisterWithKernel(); -- cgit v1.2.3 From 5060a9721091b3cc994a73569bfd5ab573b9f47d Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 7 Aug 2021 12:33:31 -0700 Subject: core: hle: kernel: k_thread: Mark KScopedDisableDispatch as nodiscard. --- src/core/hle/kernel/k_thread.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index a149744c8..8bbf66c52 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -773,7 +773,7 @@ public: class KScopedDisableDispatch { public: - explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} { + [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} { GetCurrentThread(kernel).DisableDispatch(); } -- cgit v1.2.3 From aef0ca6f0d422623f46f47c15e9a4c9b5fd04dd0 Mon Sep 17 00:00:00 2001 From: bunnei Date: Sat, 14 Aug 2021 02:14:19 -0700 Subject: core: hle: kernel: Disable dispatch count tracking on single core. - This would have limited value, and would be a mess to handle properly. --- src/core/hle/kernel/k_thread.cpp | 4 +++- src/core/hle/kernel/k_thread.h | 11 ++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) (limited to 'src/core/hle/kernel') diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 89653e0cb..0f6808ade 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -14,6 +14,7 @@ #include "common/fiber.h" #include "common/logging/log.h" #include "common/scope_exit.h" +#include "common/settings.h" #include "common/thread_queue_list.h" #include "core/core.h" #include "core/cpu_manager.h" @@ -215,9 +216,10 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint // Initialize the thread. R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); - // Initialize host context. + // Initialize emulation parameters. thread->host_context = std::make_shared(std::move(init_func), init_func_parameter); + thread->is_single_core = !Settings::values.use_multi_core.GetValue(); return ResultSuccess; } diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index 8bbf66c52..e4c4c877d 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -454,8 +454,12 @@ public: return GetActiveCore() == 3; } + [[nodiscard]] bool IsDispatchTrackingDisabled() const { + return is_single_core || IsKernelThread(); + } + [[nodiscard]] s32 GetDisableDispatchCount() const { - if (IsKernelThread()) { + if (IsDispatchTrackingDisabled()) { // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. return 1; } @@ -464,7 +468,7 @@ public: } void DisableDispatch() { - if (IsKernelThread()) { + if (IsDispatchTrackingDisabled()) { // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. return; } @@ -474,7 +478,7 @@ public: } void EnableDispatch() { - if (IsKernelThread()) { + if (IsDispatchTrackingDisabled()) { // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. return; } @@ -727,6 +731,7 @@ private: // For emulation std::shared_ptr host_context{}; + bool is_single_core{}; // For debugging std::vector wait_objects_for_debugging; -- cgit v1.2.3