summaryrefslogtreecommitdiff
path: root/src/core/cpu_manager.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/cpu_manager.cpp')
-rw-r--r--src/core/cpu_manager.cpp368
1 files changed, 330 insertions, 38 deletions
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp
index 70ddbdcca..32afcf3ae 100644
--- a/src/core/cpu_manager.cpp
+++ b/src/core/cpu_manager.cpp
@@ -2,80 +2,372 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "common/fiber.h"
6#include "common/microprofile.h"
7#include "common/thread.h"
5#include "core/arm/exclusive_monitor.h" 8#include "core/arm/exclusive_monitor.h"
6#include "core/core.h" 9#include "core/core.h"
7#include "core/core_manager.h"
8#include "core/core_timing.h" 10#include "core/core_timing.h"
9#include "core/cpu_manager.h" 11#include "core/cpu_manager.h"
10#include "core/gdbstub/gdbstub.h" 12#include "core/gdbstub/gdbstub.h"
13#include "core/hle/kernel/kernel.h"
14#include "core/hle/kernel/physical_core.h"
15#include "core/hle/kernel/scheduler.h"
16#include "core/hle/kernel/thread.h"
17#include "video_core/gpu.h"
11 18
12namespace Core { 19namespace Core {
13 20
14CpuManager::CpuManager(System& system) : system{system} {} 21CpuManager::CpuManager(System& system) : system{system} {}
15CpuManager::~CpuManager() = default; 22CpuManager::~CpuManager() = default;
16 23
24void CpuManager::ThreadStart(CpuManager& cpu_manager, std::size_t core) {
25 cpu_manager.RunThread(core);
26}
27
17void CpuManager::Initialize() { 28void CpuManager::Initialize() {
18 for (std::size_t index = 0; index < core_managers.size(); ++index) { 29 running_mode = true;
19 core_managers[index] = std::make_unique<CoreManager>(system, index); 30 if (is_multicore) {
31 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
32 core_data[core].host_thread =
33 std::make_unique<std::thread>(ThreadStart, std::ref(*this), core);
34 }
35 } else {
36 core_data[0].host_thread = std::make_unique<std::thread>(ThreadStart, std::ref(*this), 0);
20 } 37 }
21} 38}
22 39
23void CpuManager::Shutdown() { 40void CpuManager::Shutdown() {
24 for (auto& cpu_core : core_managers) { 41 running_mode = false;
25 cpu_core.reset(); 42 Pause(false);
43 if (is_multicore) {
44 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
45 core_data[core].host_thread->join();
46 core_data[core].host_thread.reset();
47 }
48 } else {
49 core_data[0].host_thread->join();
50 core_data[0].host_thread.reset();
26 } 51 }
27} 52}
28 53
29CoreManager& CpuManager::GetCoreManager(std::size_t index) { 54std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() {
30 return *core_managers.at(index); 55 return std::function<void(void*)>(GuestThreadFunction);
31} 56}
32 57
33const CoreManager& CpuManager::GetCoreManager(std::size_t index) const { 58std::function<void(void*)> CpuManager::GetIdleThreadStartFunc() {
34 return *core_managers.at(index); 59 return std::function<void(void*)>(IdleThreadFunction);
35} 60}
36 61
37CoreManager& CpuManager::GetCurrentCoreManager() { 62std::function<void(void*)> CpuManager::GetSuspendThreadStartFunc() {
38 // Otherwise, use single-threaded mode active_core variable 63 return std::function<void(void*)>(SuspendThreadFunction);
39 return *core_managers[active_core];
40} 64}
41 65
42const CoreManager& CpuManager::GetCurrentCoreManager() const { 66void CpuManager::GuestThreadFunction(void* cpu_manager_) {
43 // Otherwise, use single-threaded mode active_core variable 67 CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
44 return *core_managers[active_core]; 68 if (cpu_manager->is_multicore) {
69 cpu_manager->MultiCoreRunGuestThread();
70 } else {
71 cpu_manager->SingleCoreRunGuestThread();
72 }
45} 73}
46 74
47void CpuManager::RunLoop(bool tight_loop) { 75void CpuManager::GuestRewindFunction(void* cpu_manager_) {
48 if (GDBStub::IsServerEnabled()) { 76 CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
49 GDBStub::HandlePacket(); 77 if (cpu_manager->is_multicore) {
78 cpu_manager->MultiCoreRunGuestLoop();
79 } else {
80 cpu_manager->SingleCoreRunGuestLoop();
81 }
82}
50 83
51 // If the loop is halted and we want to step, use a tiny (1) number of instructions to 84void CpuManager::IdleThreadFunction(void* cpu_manager_) {
52 // execute. Otherwise, get out of the loop function. 85 CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
53 if (GDBStub::GetCpuHaltFlag()) { 86 if (cpu_manager->is_multicore) {
54 if (GDBStub::GetCpuStepFlag()) { 87 cpu_manager->MultiCoreRunIdleThread();
55 tight_loop = false; 88 } else {
56 } else { 89 cpu_manager->SingleCoreRunIdleThread();
57 return; 90 }
91}
92
93void CpuManager::SuspendThreadFunction(void* cpu_manager_) {
94 CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_);
95 if (cpu_manager->is_multicore) {
96 cpu_manager->MultiCoreRunSuspendThread();
97 } else {
98 cpu_manager->SingleCoreRunSuspendThread();
99 }
100}
101
102void* CpuManager::GetStartFuncParamater() {
103 return static_cast<void*>(this);
104}
105
106///////////////////////////////////////////////////////////////////////////////
107/// MultiCore ///
108///////////////////////////////////////////////////////////////////////////////
109
110void CpuManager::MultiCoreRunGuestThread() {
111 auto& kernel = system.Kernel();
112 {
113 auto& sched = kernel.CurrentScheduler();
114 sched.OnThreadStart();
115 }
116 MultiCoreRunGuestLoop();
117}
118
119void CpuManager::MultiCoreRunGuestLoop() {
120 auto& kernel = system.Kernel();
121 auto* thread = kernel.CurrentScheduler().GetCurrentThread();
122 while (true) {
123 auto* physical_core = &kernel.CurrentPhysicalCore();
124 auto& arm_interface = thread->ArmInterface();
125 system.EnterDynarmicProfile();
126 while (!physical_core->IsInterrupted()) {
127 arm_interface.Run();
128 physical_core = &kernel.CurrentPhysicalCore();
129 }
130 system.ExitDynarmicProfile();
131 arm_interface.ClearExclusiveState();
132 auto& scheduler = kernel.CurrentScheduler();
133 scheduler.TryDoContextSwitch();
134 }
135}
136
137void CpuManager::MultiCoreRunIdleThread() {
138 auto& kernel = system.Kernel();
139 while (true) {
140 auto& physical_core = kernel.CurrentPhysicalCore();
141 physical_core.Idle();
142 auto& scheduler = kernel.CurrentScheduler();
143 scheduler.TryDoContextSwitch();
144 }
145}
146
147void CpuManager::MultiCoreRunSuspendThread() {
148 auto& kernel = system.Kernel();
149 {
150 auto& sched = kernel.CurrentScheduler();
151 sched.OnThreadStart();
152 }
153 while (true) {
154 auto core = kernel.GetCurrentHostThreadID();
155 auto& scheduler = kernel.CurrentScheduler();
156 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
157 Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context);
158 ASSERT(scheduler.ContextSwitchPending());
159 ASSERT(core == kernel.GetCurrentHostThreadID());
160 scheduler.TryDoContextSwitch();
161 }
162}
163
164void CpuManager::MultiCorePause(bool paused) {
165 if (!paused) {
166 bool all_not_barrier = false;
167 while (!all_not_barrier) {
168 all_not_barrier = true;
169 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
170 all_not_barrier &=
171 !core_data[core].is_running.load() && core_data[core].initialized.load();
172 }
173 }
174 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
175 core_data[core].enter_barrier->Set();
176 }
177 if (paused_state.load()) {
178 bool all_barrier = false;
179 while (!all_barrier) {
180 all_barrier = true;
181 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
182 all_barrier &=
183 core_data[core].is_paused.load() && core_data[core].initialized.load();
184 }
185 }
186 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
187 core_data[core].exit_barrier->Set();
188 }
189 }
190 } else {
191 /// Wait until all cores are paused.
192 bool all_barrier = false;
193 while (!all_barrier) {
194 all_barrier = true;
195 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
196 all_barrier &=
197 core_data[core].is_paused.load() && core_data[core].initialized.load();
58 } 198 }
59 } 199 }
200 /// Don't release the barrier
60 } 201 }
202 paused_state = paused;
203}
204
205///////////////////////////////////////////////////////////////////////////////
206/// SingleCore ///
207///////////////////////////////////////////////////////////////////////////////
61 208
62 auto& core_timing = system.CoreTiming(); 209void CpuManager::SingleCoreRunGuestThread() {
63 core_timing.ResetRun(); 210 auto& kernel = system.Kernel();
64 bool keep_running{}; 211 {
65 do { 212 auto& sched = kernel.CurrentScheduler();
66 keep_running = false; 213 sched.OnThreadStart();
67 for (active_core = 0; active_core < NUM_CPU_CORES; ++active_core) { 214 }
68 core_timing.SwitchContext(active_core); 215 SingleCoreRunGuestLoop();
69 if (core_timing.CanCurrentContextRun()) { 216}
70 core_managers[active_core]->RunLoop(tight_loop); 217
218void CpuManager::SingleCoreRunGuestLoop() {
219 auto& kernel = system.Kernel();
220 auto* thread = kernel.CurrentScheduler().GetCurrentThread();
221 while (true) {
222 auto* physical_core = &kernel.CurrentPhysicalCore();
223 auto& arm_interface = thread->ArmInterface();
224 system.EnterDynarmicProfile();
225 if (!physical_core->IsInterrupted()) {
226 arm_interface.Run();
227 physical_core = &kernel.CurrentPhysicalCore();
228 }
229 system.ExitDynarmicProfile();
230 thread->SetPhantomMode(true);
231 system.CoreTiming().Advance();
232 thread->SetPhantomMode(false);
233 arm_interface.ClearExclusiveState();
234 PreemptSingleCore();
235 auto& scheduler = kernel.Scheduler(current_core);
236 scheduler.TryDoContextSwitch();
237 }
238}
239
240void CpuManager::SingleCoreRunIdleThread() {
241 auto& kernel = system.Kernel();
242 while (true) {
243 auto& physical_core = kernel.CurrentPhysicalCore();
244 PreemptSingleCore(false);
245 system.CoreTiming().AddTicks(1000U);
246 idle_count++;
247 auto& scheduler = physical_core.Scheduler();
248 scheduler.TryDoContextSwitch();
249 }
250}
251
252void CpuManager::SingleCoreRunSuspendThread() {
253 auto& kernel = system.Kernel();
254 {
255 auto& sched = kernel.CurrentScheduler();
256 sched.OnThreadStart();
257 }
258 while (true) {
259 auto core = kernel.GetCurrentHostThreadID();
260 auto& scheduler = kernel.CurrentScheduler();
261 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
262 Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context);
263 ASSERT(scheduler.ContextSwitchPending());
264 ASSERT(core == kernel.GetCurrentHostThreadID());
265 scheduler.TryDoContextSwitch();
266 }
267}
268
269void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
270 std::size_t old_core = current_core;
271 auto& scheduler = system.Kernel().Scheduler(old_core);
272 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
273 if (idle_count >= 4 || from_running_enviroment) {
274 if (!from_running_enviroment) {
275 system.CoreTiming().Idle();
276 idle_count = 0;
277 }
278 current_thread->SetPhantomMode(true);
279 system.CoreTiming().Advance();
280 current_thread->SetPhantomMode(false);
281 }
282 current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
283 system.CoreTiming().ResetTicks();
284 scheduler.Unload();
285 auto& next_scheduler = system.Kernel().Scheduler(current_core);
286 Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext());
287 /// May have changed scheduler
288 auto& current_scheduler = system.Kernel().Scheduler(current_core);
289 current_scheduler.Reload();
290 auto* currrent_thread2 = current_scheduler.GetCurrentThread();
291 if (!currrent_thread2->IsIdleThread()) {
292 idle_count = 0;
293 }
294}
295
296void CpuManager::SingleCorePause(bool paused) {
297 if (!paused) {
298 bool all_not_barrier = false;
299 while (!all_not_barrier) {
300 all_not_barrier = !core_data[0].is_running.load() && core_data[0].initialized.load();
301 }
302 core_data[0].enter_barrier->Set();
303 if (paused_state.load()) {
304 bool all_barrier = false;
305 while (!all_barrier) {
306 all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
71 } 307 }
72 keep_running |= core_timing.CanCurrentContextRun(); 308 core_data[0].exit_barrier->Set();
73 } 309 }
74 } while (keep_running); 310 } else {
311 /// Wait until all cores are paused.
312 bool all_barrier = false;
313 while (!all_barrier) {
314 all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
315 }
316 /// Don't release the barrier
317 }
318 paused_state = paused;
319}
320
321void CpuManager::Pause(bool paused) {
322 if (is_multicore) {
323 MultiCorePause(paused);
324 } else {
325 SingleCorePause(paused);
326 }
327}
75 328
76 if (GDBStub::IsServerEnabled()) { 329void CpuManager::RunThread(std::size_t core) {
77 GDBStub::SetCpuStepFlag(false); 330 /// Initialization
331 system.RegisterCoreThread(core);
332 std::string name;
333 if (is_multicore) {
334 name = "yuzu:CoreCPUThread_" + std::to_string(core);
335 } else {
336 name = "yuzu:CPUThread";
337 }
338 MicroProfileOnThreadCreate(name.c_str());
339 Common::SetCurrentThreadName(name.c_str());
340 Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
341 auto& data = core_data[core];
342 data.enter_barrier = std::make_unique<Common::Event>();
343 data.exit_barrier = std::make_unique<Common::Event>();
344 data.host_context = Common::Fiber::ThreadToFiber();
345 data.is_running = false;
346 data.initialized = true;
347 const bool sc_sync = !is_async_gpu && !is_multicore;
348 bool sc_sync_first_use = sc_sync;
349 /// Running
350 while (running_mode) {
351 data.is_running = false;
352 data.enter_barrier->Wait();
353 if (sc_sync_first_use) {
354 system.GPU().ObtainContext();
355 sc_sync_first_use = false;
356 }
357 auto& scheduler = system.Kernel().CurrentScheduler();
358 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
359 data.is_running = true;
360 Common::Fiber::YieldTo(data.host_context, current_thread->GetHostContext());
361 data.is_running = false;
362 data.is_paused = true;
363 data.exit_barrier->Wait();
364 data.is_paused = false;
78 } 365 }
366 /// Time to cleanup
367 data.host_context->Exit();
368 data.enter_barrier.reset();
369 data.exit_barrier.reset();
370 data.initialized = false;
79} 371}
80 372
81} // namespace Core 373} // namespace Core