diff options
| author | 2021-12-12 22:43:25 -0800 | |
|---|---|---|
| committer | 2021-12-12 22:43:25 -0800 | |
| commit | 280c77989880e81f622440b157a0ce1b7139847b (patch) | |
| tree | 49a3ef8127d721dc44effb8315e5db7e796336f4 /src/core/hle/kernel/kernel.cpp | |
| parent | Merge pull request #7495 from FernandoS27/text-blit-fix-again (diff) | |
| parent | hle: kernel k_scheduler: EnableScheduling: Remove redundant GetCurrentThreadP... (diff) | |
| download | yuzu-280c77989880e81f622440b157a0ce1b7139847b.tar.gz yuzu-280c77989880e81f622440b157a0ce1b7139847b.tar.xz yuzu-280c77989880e81f622440b157a0ce1b7139847b.zip | |
Merge pull request #7462 from bunnei/kernel-improve-scheduling
Kernel: Improve threading & scheduling V3
Diffstat (limited to 'src/core/hle/kernel/kernel.cpp')
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 81 |
1 files changed, 48 insertions, 33 deletions
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 45e86a677..2e4e4cb1c 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include "common/assert.h" | 14 | #include "common/assert.h" |
| 15 | #include "common/logging/log.h" | 15 | #include "common/logging/log.h" |
| 16 | #include "common/microprofile.h" | 16 | #include "common/microprofile.h" |
| 17 | #include "common/scope_exit.h" | ||
| 17 | #include "common/thread.h" | 18 | #include "common/thread.h" |
| 18 | #include "common/thread_worker.h" | 19 | #include "common/thread_worker.h" |
| 19 | #include "core/arm/arm_interface.h" | 20 | #include "core/arm/arm_interface.h" |
| @@ -83,12 +84,16 @@ struct KernelCore::Impl { | |||
| 83 | } | 84 | } |
| 84 | 85 | ||
| 85 | void InitializeCores() { | 86 | void InitializeCores() { |
| 86 | for (auto& core : cores) { | 87 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { |
| 87 | core.Initialize(current_process->Is64BitProcess()); | 88 | cores[core_id].Initialize(current_process->Is64BitProcess()); |
| 89 | system.Memory().SetCurrentPageTable(*current_process, core_id); | ||
| 88 | } | 90 | } |
| 89 | } | 91 | } |
| 90 | 92 | ||
| 91 | void Shutdown() { | 93 | void Shutdown() { |
| 94 | is_shutting_down.store(true, std::memory_order_relaxed); | ||
| 95 | SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); }); | ||
| 96 | |||
| 92 | process_list.clear(); | 97 | process_list.clear(); |
| 93 | 98 | ||
| 94 | // Close all open server ports. | 99 | // Close all open server ports. |
| @@ -123,15 +128,6 @@ struct KernelCore::Impl { | |||
| 123 | next_user_process_id = KProcess::ProcessIDMin; | 128 | next_user_process_id = KProcess::ProcessIDMin; |
| 124 | next_thread_id = 1; | 129 | next_thread_id = 1; |
| 125 | 130 | ||
| 126 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||
| 127 | if (suspend_threads[core_id]) { | ||
| 128 | suspend_threads[core_id]->Close(); | ||
| 129 | suspend_threads[core_id] = nullptr; | ||
| 130 | } | ||
| 131 | |||
| 132 | schedulers[core_id].reset(); | ||
| 133 | } | ||
| 134 | |||
| 135 | cores.clear(); | 131 | cores.clear(); |
| 136 | 132 | ||
| 137 | global_handle_table->Finalize(); | 133 | global_handle_table->Finalize(); |
| @@ -159,6 +155,16 @@ struct KernelCore::Impl { | |||
| 159 | CleanupObject(time_shared_mem); | 155 | CleanupObject(time_shared_mem); |
| 160 | CleanupObject(system_resource_limit); | 156 | CleanupObject(system_resource_limit); |
| 161 | 157 | ||
| 158 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||
| 159 | if (suspend_threads[core_id]) { | ||
| 160 | suspend_threads[core_id]->Close(); | ||
| 161 | suspend_threads[core_id] = nullptr; | ||
| 162 | } | ||
| 163 | |||
| 164 | schedulers[core_id]->Finalize(); | ||
| 165 | schedulers[core_id].reset(); | ||
| 166 | } | ||
| 167 | |||
| 162 | // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others | 168 | // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others |
| 163 | next_host_thread_id = Core::Hardware::NUM_CPU_CORES; | 169 | next_host_thread_id = Core::Hardware::NUM_CPU_CORES; |
| 164 | 170 | ||
| @@ -245,13 +251,11 @@ struct KernelCore::Impl { | |||
| 245 | KScopedSchedulerLock lock(kernel); | 251 | KScopedSchedulerLock lock(kernel); |
| 246 | global_scheduler_context->PreemptThreads(); | 252 | global_scheduler_context->PreemptThreads(); |
| 247 | } | 253 | } |
| 248 | const auto time_interval = std::chrono::nanoseconds{ | 254 | const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)}; |
| 249 | Core::Timing::msToCycles(std::chrono::milliseconds(10))}; | ||
| 250 | system.CoreTiming().ScheduleEvent(time_interval, preemption_event); | 255 | system.CoreTiming().ScheduleEvent(time_interval, preemption_event); |
| 251 | }); | 256 | }); |
| 252 | 257 | ||
| 253 | const auto time_interval = | 258 | const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)}; |
| 254 | std::chrono::nanoseconds{Core::Timing::msToCycles(std::chrono::milliseconds(10))}; | ||
| 255 | system.CoreTiming().ScheduleEvent(time_interval, preemption_event); | 259 | system.CoreTiming().ScheduleEvent(time_interval, preemption_event); |
| 256 | } | 260 | } |
| 257 | 261 | ||
| @@ -267,14 +271,6 @@ struct KernelCore::Impl { | |||
| 267 | 271 | ||
| 268 | void MakeCurrentProcess(KProcess* process) { | 272 | void MakeCurrentProcess(KProcess* process) { |
| 269 | current_process = process; | 273 | current_process = process; |
| 270 | if (process == nullptr) { | ||
| 271 | return; | ||
| 272 | } | ||
| 273 | |||
| 274 | const u32 core_id = GetCurrentHostThreadID(); | ||
| 275 | if (core_id < Core::Hardware::NUM_CPU_CORES) { | ||
| 276 | system.Memory().SetCurrentPageTable(*process, core_id); | ||
| 277 | } | ||
| 278 | } | 274 | } |
| 279 | 275 | ||
| 280 | static inline thread_local u32 host_thread_id = UINT32_MAX; | 276 | static inline thread_local u32 host_thread_id = UINT32_MAX; |
| @@ -344,7 +340,16 @@ struct KernelCore::Impl { | |||
| 344 | is_phantom_mode_for_singlecore = value; | 340 | is_phantom_mode_for_singlecore = value; |
| 345 | } | 341 | } |
| 346 | 342 | ||
| 343 | bool IsShuttingDown() const { | ||
| 344 | return is_shutting_down.load(std::memory_order_relaxed); | ||
| 345 | } | ||
| 346 | |||
| 347 | KThread* GetCurrentEmuThread() { | 347 | KThread* GetCurrentEmuThread() { |
| 348 | // If we are shutting down the kernel, none of this is relevant anymore. | ||
| 349 | if (IsShuttingDown()) { | ||
| 350 | return {}; | ||
| 351 | } | ||
| 352 | |||
| 348 | const auto thread_id = GetCurrentHostThreadID(); | 353 | const auto thread_id = GetCurrentHostThreadID(); |
| 349 | if (thread_id >= Core::Hardware::NUM_CPU_CORES) { | 354 | if (thread_id >= Core::Hardware::NUM_CPU_CORES) { |
| 350 | return GetHostDummyThread(); | 355 | return GetHostDummyThread(); |
| @@ -760,6 +765,7 @@ struct KernelCore::Impl { | |||
| 760 | std::vector<std::unique_ptr<KThread>> dummy_threads; | 765 | std::vector<std::unique_ptr<KThread>> dummy_threads; |
| 761 | 766 | ||
| 762 | bool is_multicore{}; | 767 | bool is_multicore{}; |
| 768 | std::atomic_bool is_shutting_down{}; | ||
| 763 | bool is_phantom_mode_for_singlecore{}; | 769 | bool is_phantom_mode_for_singlecore{}; |
| 764 | u32 single_core_thread_id{}; | 770 | u32 single_core_thread_id{}; |
| 765 | 771 | ||
| @@ -845,16 +851,20 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const { | |||
| 845 | return impl->cores[id]; | 851 | return impl->cores[id]; |
| 846 | } | 852 | } |
| 847 | 853 | ||
| 854 | size_t KernelCore::CurrentPhysicalCoreIndex() const { | ||
| 855 | const u32 core_id = impl->GetCurrentHostThreadID(); | ||
| 856 | if (core_id >= Core::Hardware::NUM_CPU_CORES) { | ||
| 857 | return Core::Hardware::NUM_CPU_CORES - 1; | ||
| 858 | } | ||
| 859 | return core_id; | ||
| 860 | } | ||
| 861 | |||
| 848 | Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { | 862 | Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { |
| 849 | u32 core_id = impl->GetCurrentHostThreadID(); | 863 | return impl->cores[CurrentPhysicalCoreIndex()]; |
| 850 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 851 | return impl->cores[core_id]; | ||
| 852 | } | 864 | } |
| 853 | 865 | ||
| 854 | const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { | 866 | const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { |
| 855 | u32 core_id = impl->GetCurrentHostThreadID(); | 867 | return impl->cores[CurrentPhysicalCoreIndex()]; |
| 856 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 857 | return impl->cores[core_id]; | ||
| 858 | } | 868 | } |
| 859 | 869 | ||
| 860 | Kernel::KScheduler* KernelCore::CurrentScheduler() { | 870 | Kernel::KScheduler* KernelCore::CurrentScheduler() { |
| @@ -1057,6 +1067,9 @@ void KernelCore::Suspend(bool in_suspention) { | |||
| 1057 | impl->suspend_threads[core_id]->SetState(state); | 1067 | impl->suspend_threads[core_id]->SetState(state); |
| 1058 | impl->suspend_threads[core_id]->SetWaitReasonForDebugging( | 1068 | impl->suspend_threads[core_id]->SetWaitReasonForDebugging( |
| 1059 | ThreadWaitReasonForDebugging::Suspended); | 1069 | ThreadWaitReasonForDebugging::Suspended); |
| 1070 | if (!should_suspend) { | ||
| 1071 | impl->suspend_threads[core_id]->DisableDispatch(); | ||
| 1072 | } | ||
| 1060 | } | 1073 | } |
| 1061 | } | 1074 | } |
| 1062 | } | 1075 | } |
| @@ -1065,19 +1078,21 @@ bool KernelCore::IsMulticore() const { | |||
| 1065 | return impl->is_multicore; | 1078 | return impl->is_multicore; |
| 1066 | } | 1079 | } |
| 1067 | 1080 | ||
| 1081 | bool KernelCore::IsShuttingDown() const { | ||
| 1082 | return impl->IsShuttingDown(); | ||
| 1083 | } | ||
| 1084 | |||
| 1068 | void KernelCore::ExceptionalExit() { | 1085 | void KernelCore::ExceptionalExit() { |
| 1069 | exception_exited = true; | 1086 | exception_exited = true; |
| 1070 | Suspend(true); | 1087 | Suspend(true); |
| 1071 | } | 1088 | } |
| 1072 | 1089 | ||
| 1073 | void KernelCore::EnterSVCProfile() { | 1090 | void KernelCore::EnterSVCProfile() { |
| 1074 | std::size_t core = impl->GetCurrentHostThreadID(); | 1091 | impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); |
| 1075 | impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); | ||
| 1076 | } | 1092 | } |
| 1077 | 1093 | ||
| 1078 | void KernelCore::ExitSVCProfile() { | 1094 | void KernelCore::ExitSVCProfile() { |
| 1079 | std::size_t core = impl->GetCurrentHostThreadID(); | 1095 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]); |
| 1080 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]); | ||
| 1081 | } | 1096 | } |
| 1082 | 1097 | ||
| 1083 | std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { | 1098 | std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { |