summaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
authorGravatar bunnei2021-08-06 22:58:46 -0700
committerGravatar bunnei2021-08-07 12:18:47 -0700
commit2b9560428b6ab84fc61dd8f82e75f58cdb851c07 (patch)
treeaae3ac2f04eb73f67dd09f6bd79b7c6898c39679 /src/core
parentcore: hle: kernel: Reflect non-emulated threads as core 3. (diff)
downloadyuzu-2b9560428b6ab84fc61dd8f82e75f58cdb851c07.tar.gz
yuzu-2b9560428b6ab84fc61dd8f82e75f58cdb851c07.tar.xz
yuzu-2b9560428b6ab84fc61dd8f82e75f58cdb851c07.zip
core: hle: kernel: Ensure idle threads are closed before destroying scheduler.
Diffstat (limited to 'src/core')
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp6
-rw-r--r--src/core/hle/kernel/k_scheduler.h2
-rw-r--r--src/core/hle/kernel/kernel.cpp38
3 files changed, 22 insertions, 24 deletions
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 6a7d80d03..4bae69f71 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -617,13 +617,17 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c
617 state.highest_priority_thread = nullptr; 617 state.highest_priority_thread = nullptr;
618} 618}
619 619
620KScheduler::~KScheduler() { 620void KScheduler::Finalize() {
621 if (idle_thread) { 621 if (idle_thread) {
622 idle_thread->Close(); 622 idle_thread->Close();
623 idle_thread = nullptr; 623 idle_thread = nullptr;
624 } 624 }
625} 625}
626 626
627KScheduler::~KScheduler() {
628 ASSERT(!idle_thread);
629}
630
627KThread* KScheduler::GetCurrentThread() const { 631KThread* KScheduler::GetCurrentThread() const {
628 if (auto result = current_thread.load(); result) { 632 if (auto result = current_thread.load(); result) {
629 return result; 633 return result;
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 12cfae919..516e0cdba 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -33,6 +33,8 @@ public:
33 explicit KScheduler(Core::System& system_, s32 core_id_); 33 explicit KScheduler(Core::System& system_, s32 core_id_);
34 ~KScheduler(); 34 ~KScheduler();
35 35
36 void Finalize();
37
36 /// Reschedules to the next available thread (call after current thread is suspended) 38 /// Reschedules to the next available thread (call after current thread is suspended)
37 void RescheduleCurrentCore(); 39 void RescheduleCurrentCore();
38 40
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index b0b130719..6bfb55f71 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -85,8 +85,9 @@ struct KernelCore::Impl {
85 } 85 }
86 86
87 void InitializeCores() { 87 void InitializeCores() {
88 for (auto& core : cores) { 88 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
89 core.Initialize(current_process->Is64BitProcess()); 89 cores[core_id].Initialize(current_process->Is64BitProcess());
90 system.Memory().SetCurrentPageTable(*current_process, core_id);
90 } 91 }
91 } 92 }
92 93
@@ -131,15 +132,6 @@ struct KernelCore::Impl {
131 next_user_process_id = KProcess::ProcessIDMin; 132 next_user_process_id = KProcess::ProcessIDMin;
132 next_thread_id = 1; 133 next_thread_id = 1;
133 134
134 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
135 if (suspend_threads[core_id]) {
136 suspend_threads[core_id]->Close();
137 suspend_threads[core_id] = nullptr;
138 }
139
140 schedulers[core_id].reset();
141 }
142
143 cores.clear(); 135 cores.clear();
144 136
145 global_handle_table->Finalize(); 137 global_handle_table->Finalize();
@@ -167,6 +159,16 @@ struct KernelCore::Impl {
167 CleanupObject(time_shared_mem); 159 CleanupObject(time_shared_mem);
168 CleanupObject(system_resource_limit); 160 CleanupObject(system_resource_limit);
169 161
162 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
163 if (suspend_threads[core_id]) {
164 suspend_threads[core_id]->Close();
165 suspend_threads[core_id] = nullptr;
166 }
167
168 schedulers[core_id]->Finalize();
169 schedulers[core_id].reset();
170 }
171
170 // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others 172 // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
171 next_host_thread_id = Core::Hardware::NUM_CPU_CORES; 173 next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
172 174
@@ -257,14 +259,6 @@ struct KernelCore::Impl {
257 259
258 void MakeCurrentProcess(KProcess* process) { 260 void MakeCurrentProcess(KProcess* process) {
259 current_process = process; 261 current_process = process;
260 if (process == nullptr) {
261 return;
262 }
263
264 const u32 core_id = GetCurrentHostThreadID();
265 if (core_id < Core::Hardware::NUM_CPU_CORES) {
266 system.Memory().SetCurrentPageTable(*process, core_id);
267 }
268 } 262 }
269 263
270 /// Creates a new host thread ID, should only be called by GetHostThreadId 264 /// Creates a new host thread ID, should only be called by GetHostThreadId
@@ -1048,13 +1042,11 @@ void KernelCore::ExceptionalExit() {
1048} 1042}
1049 1043
1050void KernelCore::EnterSVCProfile() { 1044void KernelCore::EnterSVCProfile() {
1051 std::size_t core = impl->GetCurrentHostThreadID(); 1045 impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
1052 impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
1053} 1046}
1054 1047
1055void KernelCore::ExitSVCProfile() { 1048void KernelCore::ExitSVCProfile() {
1056 std::size_t core = impl->GetCurrentHostThreadID(); 1049 MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]);
1057 MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
1058} 1050}
1059 1051
1060std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { 1052std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {