summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar Fernando Sahmkow2020-03-10 11:50:33 -0400
committerGravatar Fernando Sahmkow2020-06-27 11:35:43 -0400
commita439cdf22ea50f0e39cb51f6dff15fee3b495d16 (patch)
tree2c88310d7cca08ef451107d9ae6bd5191e7d72e5 /src
parentSynchronization: Correct wide Assertion. (diff)
downloadyuzu-a439cdf22ea50f0e39cb51f6dff15fee3b495d16.tar.gz
yuzu-a439cdf22ea50f0e39cb51f6dff15fee3b495d16.tar.xz
yuzu-a439cdf22ea50f0e39cb51f6dff15fee3b495d16.zip
CPU_Manager: Unload/Reload threads on preemption on SingleCore
Diffstat (limited to 'src')
-rw-r--r--src/core/cpu_manager.cpp14
-rw-r--r--src/core/cpu_manager.h5
-rw-r--r--src/core/hle/kernel/scheduler.cpp42
-rw-r--r--src/core/hle/kernel/scheduler.h10
4 files changed, 64 insertions, 7 deletions
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp
index e72f89808..95842aad1 100644
--- a/src/core/cpu_manager.cpp
+++ b/src/core/cpu_manager.cpp
@@ -225,7 +225,7 @@ void CpuManager::SingleCoreRunGuestLoop() {
225 } 225 }
226 physical_core.ClearExclusive(); 226 physical_core.ClearExclusive();
227 PreemptSingleCore(); 227 PreemptSingleCore();
228 auto& scheduler = physical_core.Scheduler(); 228 auto& scheduler = kernel.Scheduler(current_core);
229 scheduler.TryDoContextSwitch(); 229 scheduler.TryDoContextSwitch();
230 } 230 }
231} 231}
@@ -260,11 +260,15 @@ void CpuManager::SingleCoreRunSuspendThread() {
260void CpuManager::PreemptSingleCore() { 260void CpuManager::PreemptSingleCore() {
261 preemption_count = 0; 261 preemption_count = 0;
262 std::size_t old_core = current_core; 262 std::size_t old_core = current_core;
263 current_core = (current_core + 1) % Core::Hardware::NUM_CPU_CORES; 263 current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
264 auto& scheduler = system.Kernel().Scheduler(old_core); 264 auto& scheduler = system.Kernel().Scheduler(old_core);
265 Kernel::Thread* current_thread = system.Kernel().Scheduler(old_core).GetCurrentThread(); 265 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
266 Kernel::Thread* next_thread = system.Kernel().Scheduler(current_core).GetCurrentThread(); 266 scheduler.Unload();
267 Common::Fiber::YieldTo(current_thread->GetHostContext(), next_thread->GetHostContext()); 267 auto& next_scheduler = system.Kernel().Scheduler(current_core);
268 Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext());
269 /// May have changed scheduler
270 auto& current_scheduler = system.Kernel().Scheduler(current_core);
271 current_scheduler.Reload();
268} 272}
269 273
270void CpuManager::SingleCorePause(bool paused) { 274void CpuManager::SingleCorePause(bool paused) {
diff --git a/src/core/cpu_manager.h b/src/core/cpu_manager.h
index 1e81481ec..ff1935d5c 100644
--- a/src/core/cpu_manager.h
+++ b/src/core/cpu_manager.h
@@ -5,6 +5,7 @@
5#pragma once 5#pragma once
6 6
7#include <array> 7#include <array>
8#include <atomic>
8#include <functional> 9#include <functional>
9#include <memory> 10#include <memory>
10#include <thread> 11#include <thread>
@@ -45,7 +46,7 @@ public:
45 void* GetStartFuncParamater(); 46 void* GetStartFuncParamater();
46 47
47 std::size_t CurrentCore() const { 48 std::size_t CurrentCore() const {
48 return current_core; 49 return current_core.load();
49 } 50 }
50 51
51private: 52private:
@@ -88,7 +89,7 @@ private:
88 std::array<CoreData, Core::Hardware::NUM_CPU_CORES> core_data{}; 89 std::array<CoreData, Core::Hardware::NUM_CPU_CORES> core_data{};
89 90
90 bool is_multicore{}; 91 bool is_multicore{};
91 std::size_t current_core{}; 92 std::atomic<std::size_t> current_core{};
92 std::size_t preemption_count{}; 93 std::size_t preemption_count{};
93 static constexpr std::size_t max_cycle_runs = 5; 94 static constexpr std::size_t max_cycle_runs = 5;
94 95
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index d68d86cdf..00322d997 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -602,6 +602,48 @@ void Scheduler::OnThreadStart() {
602 SwitchContextStep2(); 602 SwitchContextStep2();
603} 603}
604 604
605void Scheduler::Unload() {
606 Thread* thread = current_thread.get();
607 if (thread) {
608 thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
609 thread->SetIsRunning(false);
610 if (!thread->IsHLEThread()) {
611 auto& cpu_core = system.ArmInterface(core_id);
612 cpu_core.SaveContext(thread->GetContext32());
613 cpu_core.SaveContext(thread->GetContext64());
614 // Save the TPIDR_EL0 system register in case it was modified.
615 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
616 cpu_core.ClearExclusiveState();
617 }
618 thread->context_guard.unlock();
619 }
620}
621
622void Scheduler::Reload() {
623 Thread* thread = current_thread.get();
624 if (thread) {
625 ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
626 "Thread must be runnable.");
627
628 // Cancel any outstanding wakeup events for this thread
629 thread->SetIsRunning(true);
630 thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
631
632 auto* const thread_owner_process = thread->GetOwnerProcess();
633 if (thread_owner_process != nullptr) {
634 system.Kernel().MakeCurrentProcess(thread_owner_process);
635 }
636 if (!thread->IsHLEThread()) {
637 auto& cpu_core = system.ArmInterface(core_id);
638 cpu_core.LoadContext(thread->GetContext32());
639 cpu_core.LoadContext(thread->GetContext64());
640 cpu_core.SetTlsAddress(thread->GetTLSAddress());
641 cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
642 cpu_core.ClearExclusiveState();
643 }
644 }
645}
646
605void Scheduler::SwitchContextStep2() { 647void Scheduler::SwitchContextStep2() {
606 Thread* previous_thread = current_thread_prev.get(); 648 Thread* previous_thread = current_thread_prev.get();
607 Thread* new_thread = selected_thread.get(); 649 Thread* new_thread = selected_thread.get();
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 5e062bf59..f63cc5085 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -210,6 +210,12 @@ public:
210 /// Reschedules to the next available thread (call after current thread is suspended) 210 /// Reschedules to the next available thread (call after current thread is suspended)
211 void TryDoContextSwitch(); 211 void TryDoContextSwitch();
212 212
213 /// The next two are for SingleCore Only.
214 /// Unload current thread before preempting core.
215 void Unload();
216 /// Reload current thread after core preemption.
217 void Reload();
218
213 /// Gets the current running thread 219 /// Gets the current running thread
214 Thread* GetCurrentThread() const; 220 Thread* GetCurrentThread() const;
215 221
@@ -230,6 +236,10 @@ public:
230 236
231 void OnThreadStart(); 237 void OnThreadStart();
232 238
239 std::shared_ptr<Common::Fiber> ControlContext() {
240 return switch_fiber;
241 }
242
233private: 243private:
234 friend class GlobalScheduler; 244 friend class GlobalScheduler;
235 245