summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
authorGravatar bunnei2021-08-06 22:58:46 -0700
committerGravatar bunnei2021-12-06 16:39:16 -0800
commit3bd5d4b6f8887ffe302a73e3ad595f58409b5c9e (patch)
treedca57ef24acc7094d5237694b351e38ae7676043 /src/core/hle/kernel
parentcore: hle: kernel: Reflect non-emulated threads as core 3. (diff)
downloadyuzu-3bd5d4b6f8887ffe302a73e3ad595f58409b5c9e.tar.gz
yuzu-3bd5d4b6f8887ffe302a73e3ad595f58409b5c9e.tar.xz
yuzu-3bd5d4b6f8887ffe302a73e3ad595f58409b5c9e.zip
core: hle: kernel: Ensure idle threads are closed before destroying scheduler.
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp6
-rw-r--r--src/core/hle/kernel/k_scheduler.h2
-rw-r--r--src/core/hle/kernel/kernel.cpp38
3 files changed, 22 insertions, 24 deletions
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 6a7d80d03..4bae69f71 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -617,13 +617,17 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c
617 state.highest_priority_thread = nullptr; 617 state.highest_priority_thread = nullptr;
618} 618}
619 619
620KScheduler::~KScheduler() { 620void KScheduler::Finalize() {
621 if (idle_thread) { 621 if (idle_thread) {
622 idle_thread->Close(); 622 idle_thread->Close();
623 idle_thread = nullptr; 623 idle_thread = nullptr;
624 } 624 }
625} 625}
626 626
627KScheduler::~KScheduler() {
628 ASSERT(!idle_thread);
629}
630
627KThread* KScheduler::GetCurrentThread() const { 631KThread* KScheduler::GetCurrentThread() const {
628 if (auto result = current_thread.load(); result) { 632 if (auto result = current_thread.load(); result) {
629 return result; 633 return result;
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 7df288438..82fcd99e7 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -33,6 +33,8 @@ public:
33 explicit KScheduler(Core::System& system_, s32 core_id_); 33 explicit KScheduler(Core::System& system_, s32 core_id_);
34 ~KScheduler(); 34 ~KScheduler();
35 35
36 void Finalize();
37
36 /// Reschedules to the next available thread (call after current thread is suspended) 38 /// Reschedules to the next available thread (call after current thread is suspended)
37 void RescheduleCurrentCore(); 39 void RescheduleCurrentCore();
38 40
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 04926a291..275fee0d8 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -83,8 +83,9 @@ struct KernelCore::Impl {
83 } 83 }
84 84
85 void InitializeCores() { 85 void InitializeCores() {
86 for (auto& core : cores) { 86 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
87 core.Initialize(current_process->Is64BitProcess()); 87 cores[core_id].Initialize(current_process->Is64BitProcess());
88 system.Memory().SetCurrentPageTable(*current_process, core_id);
88 } 89 }
89 } 90 }
90 91
@@ -123,15 +124,6 @@ struct KernelCore::Impl {
123 next_user_process_id = KProcess::ProcessIDMin; 124 next_user_process_id = KProcess::ProcessIDMin;
124 next_thread_id = 1; 125 next_thread_id = 1;
125 126
126 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
127 if (suspend_threads[core_id]) {
128 suspend_threads[core_id]->Close();
129 suspend_threads[core_id] = nullptr;
130 }
131
132 schedulers[core_id].reset();
133 }
134
135 cores.clear(); 127 cores.clear();
136 128
137 global_handle_table->Finalize(); 129 global_handle_table->Finalize();
@@ -159,6 +151,16 @@ struct KernelCore::Impl {
159 CleanupObject(time_shared_mem); 151 CleanupObject(time_shared_mem);
160 CleanupObject(system_resource_limit); 152 CleanupObject(system_resource_limit);
161 153
154 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
155 if (suspend_threads[core_id]) {
156 suspend_threads[core_id]->Close();
157 suspend_threads[core_id] = nullptr;
158 }
159
160 schedulers[core_id]->Finalize();
161 schedulers[core_id].reset();
162 }
163
162 // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others 164 // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
163 next_host_thread_id = Core::Hardware::NUM_CPU_CORES; 165 next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
164 166
@@ -267,14 +269,6 @@ struct KernelCore::Impl {
267 269
268 void MakeCurrentProcess(KProcess* process) { 270 void MakeCurrentProcess(KProcess* process) {
269 current_process = process; 271 current_process = process;
270 if (process == nullptr) {
271 return;
272 }
273
274 const u32 core_id = GetCurrentHostThreadID();
275 if (core_id < Core::Hardware::NUM_CPU_CORES) {
276 system.Memory().SetCurrentPageTable(*process, core_id);
277 }
278 } 272 }
279 273
280 static inline thread_local u32 host_thread_id = UINT32_MAX; 274 static inline thread_local u32 host_thread_id = UINT32_MAX;
@@ -1079,13 +1073,11 @@ void KernelCore::ExceptionalExit() {
1079} 1073}
1080 1074
1081void KernelCore::EnterSVCProfile() { 1075void KernelCore::EnterSVCProfile() {
1082 std::size_t core = impl->GetCurrentHostThreadID(); 1076 impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
1083 impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
1084} 1077}
1085 1078
1086void KernelCore::ExitSVCProfile() { 1079void KernelCore::ExitSVCProfile() {
1087 std::size_t core = impl->GetCurrentHostThreadID(); 1080 MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]);
1088 MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
1089} 1081}
1090 1082
1091std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { 1083std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {