summaryrefslogtreecommitdiff
path: root/src/core/cpu_manager.cpp
diff options
context:
space:
mode:
authorGravatar bunnei2022-06-13 15:48:03 -0700
committerGravatar GitHub2022-06-13 15:48:03 -0700
commit741da9c8bfaa77f96d4c7ddbc82346b2322491db (patch)
treed309493a62422a627f7b42aa5f582ce55b68e123 /src/core/cpu_manager.cpp
parentMerge pull request #8446 from liamwhite/cmd-gdb (diff)
parentCpuManager: simplify pausing (diff)
downloadyuzu-741da9c8bfaa77f96d4c7ddbc82346b2322491db.tar.gz
yuzu-741da9c8bfaa77f96d4c7ddbc82346b2322491db.tar.xz
yuzu-741da9c8bfaa77f96d4c7ddbc82346b2322491db.zip
Merge pull request #8388 from liamwhite/simpler-pause
CpuManager: simplify pausing
Diffstat (limited to 'src/core/cpu_manager.cpp')
-rw-r--r--src/core/cpu_manager.cpp116
1 files changed, 32 insertions, 84 deletions
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp
index 09d9c5163..b4718fbbe 100644
--- a/src/core/cpu_manager.cpp
+++ b/src/core/cpu_manager.cpp
@@ -16,7 +16,8 @@
16 16
17namespace Core { 17namespace Core {
18 18
19CpuManager::CpuManager(System& system_) : system{system_} {} 19CpuManager::CpuManager(System& system_)
20 : pause_barrier{std::make_unique<Common::Barrier>(1)}, system{system_} {}
20CpuManager::~CpuManager() = default; 21CpuManager::~CpuManager() = default;
21 22
22void CpuManager::ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, 23void CpuManager::ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager,
@@ -30,8 +31,10 @@ void CpuManager::Initialize() {
30 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { 31 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
31 core_data[core].host_thread = std::jthread(ThreadStart, std::ref(*this), core); 32 core_data[core].host_thread = std::jthread(ThreadStart, std::ref(*this), core);
32 } 33 }
34 pause_barrier = std::make_unique<Common::Barrier>(Core::Hardware::NUM_CPU_CORES + 1);
33 } else { 35 } else {
34 core_data[0].host_thread = std::jthread(ThreadStart, std::ref(*this), 0); 36 core_data[0].host_thread = std::jthread(ThreadStart, std::ref(*this), 0);
37 pause_barrier = std::make_unique<Common::Barrier>(2);
35 } 38 }
36} 39}
37 40
@@ -138,51 +141,14 @@ void CpuManager::MultiCoreRunSuspendThread() {
138 auto core = kernel.CurrentPhysicalCoreIndex(); 141 auto core = kernel.CurrentPhysicalCoreIndex();
139 auto& scheduler = *kernel.CurrentScheduler(); 142 auto& scheduler = *kernel.CurrentScheduler();
140 Kernel::KThread* current_thread = scheduler.GetCurrentThread(); 143 Kernel::KThread* current_thread = scheduler.GetCurrentThread();
144 current_thread->DisableDispatch();
145
141 Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context); 146 Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
142 ASSERT(scheduler.ContextSwitchPending());
143 ASSERT(core == kernel.CurrentPhysicalCoreIndex()); 147 ASSERT(core == kernel.CurrentPhysicalCoreIndex());
144 scheduler.RescheduleCurrentCore(); 148 scheduler.RescheduleCurrentCore();
145 } 149 }
146} 150}
147 151
148void CpuManager::MultiCorePause(bool paused) {
149 if (!paused) {
150 bool all_not_barrier = false;
151 while (!all_not_barrier) {
152 all_not_barrier = true;
153 for (const auto& data : core_data) {
154 all_not_barrier &= !data.is_running.load() && data.initialized.load();
155 }
156 }
157 for (auto& data : core_data) {
158 data.enter_barrier->Set();
159 }
160 if (paused_state.load()) {
161 bool all_barrier = false;
162 while (!all_barrier) {
163 all_barrier = true;
164 for (const auto& data : core_data) {
165 all_barrier &= data.is_paused.load() && data.initialized.load();
166 }
167 }
168 for (auto& data : core_data) {
169 data.exit_barrier->Set();
170 }
171 }
172 } else {
173 /// Wait until all cores are paused.
174 bool all_barrier = false;
175 while (!all_barrier) {
176 all_barrier = true;
177 for (const auto& data : core_data) {
178 all_barrier &= data.is_paused.load() && data.initialized.load();
179 }
180 }
181 /// Don't release the barrier
182 }
183 paused_state = paused;
184}
185
186/////////////////////////////////////////////////////////////////////////////// 152///////////////////////////////////////////////////////////////////////////////
187/// SingleCore /// 153/// SingleCore ///
188/////////////////////////////////////////////////////////////////////////////// 154///////////////////////////////////////////////////////////////////////////////
@@ -235,8 +201,9 @@ void CpuManager::SingleCoreRunSuspendThread() {
235 auto core = kernel.GetCurrentHostThreadID(); 201 auto core = kernel.GetCurrentHostThreadID();
236 auto& scheduler = *kernel.CurrentScheduler(); 202 auto& scheduler = *kernel.CurrentScheduler();
237 Kernel::KThread* current_thread = scheduler.GetCurrentThread(); 203 Kernel::KThread* current_thread = scheduler.GetCurrentThread();
204 current_thread->DisableDispatch();
205
238 Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[0].host_context); 206 Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[0].host_context);
239 ASSERT(scheduler.ContextSwitchPending());
240 ASSERT(core == kernel.GetCurrentHostThreadID()); 207 ASSERT(core == kernel.GetCurrentHostThreadID());
241 scheduler.RescheduleCurrentCore(); 208 scheduler.RescheduleCurrentCore();
242 } 209 }
@@ -274,37 +241,21 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
274 } 241 }
275} 242}
276 243
277void CpuManager::SingleCorePause(bool paused) {
278 if (!paused) {
279 bool all_not_barrier = false;
280 while (!all_not_barrier) {
281 all_not_barrier = !core_data[0].is_running.load() && core_data[0].initialized.load();
282 }
283 core_data[0].enter_barrier->Set();
284 if (paused_state.load()) {
285 bool all_barrier = false;
286 while (!all_barrier) {
287 all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
288 }
289 core_data[0].exit_barrier->Set();
290 }
291 } else {
292 /// Wait until all cores are paused.
293 bool all_barrier = false;
294 while (!all_barrier) {
295 all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
296 }
297 /// Don't release the barrier
298 }
299 paused_state = paused;
300}
301
302void CpuManager::Pause(bool paused) { 244void CpuManager::Pause(bool paused) {
303 if (is_multicore) { 245 std::scoped_lock lk{pause_lock};
304 MultiCorePause(paused); 246
305 } else { 247 if (pause_state == paused) {
306 SingleCorePause(paused); 248 return;
307 } 249 }
250
251 // Set the new state
252 pause_state.store(paused);
253
254 // Wake up any waiting threads
255 pause_state.notify_all();
256
257 // Wait for all threads to successfully change state before returning
258 pause_barrier->Sync();
308} 259}
309 260
310void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) { 261void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
@@ -320,27 +271,29 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
320 Common::SetCurrentThreadName(name.c_str()); 271 Common::SetCurrentThreadName(name.c_str());
321 Common::SetCurrentThreadPriority(Common::ThreadPriority::High); 272 Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
322 auto& data = core_data[core]; 273 auto& data = core_data[core];
323 data.enter_barrier = std::make_unique<Common::Event>();
324 data.exit_barrier = std::make_unique<Common::Event>();
325 data.host_context = Common::Fiber::ThreadToFiber(); 274 data.host_context = Common::Fiber::ThreadToFiber();
326 data.is_running = false;
327 data.initialized = true;
328 const bool sc_sync = !is_async_gpu && !is_multicore; 275 const bool sc_sync = !is_async_gpu && !is_multicore;
329 bool sc_sync_first_use = sc_sync; 276 bool sc_sync_first_use = sc_sync;
330 277
331 // Cleanup 278 // Cleanup
332 SCOPE_EXIT({ 279 SCOPE_EXIT({
333 data.host_context->Exit(); 280 data.host_context->Exit();
334 data.enter_barrier.reset();
335 data.exit_barrier.reset();
336 data.initialized = false;
337 MicroProfileOnThreadExit(); 281 MicroProfileOnThreadExit();
338 }); 282 });
339 283
340 /// Running 284 /// Running
341 while (running_mode) { 285 while (running_mode) {
342 data.is_running = false; 286 if (pause_state.load(std::memory_order_relaxed)) {
343 data.enter_barrier->Wait(); 287 // Wait for caller to acknowledge pausing
288 pause_barrier->Sync();
289
290 // Wait until unpaused
291 pause_state.wait(true, std::memory_order_relaxed);
292
293 // Wait for caller to acknowledge unpausing
294 pause_barrier->Sync();
295 }
296
344 if (sc_sync_first_use) { 297 if (sc_sync_first_use) {
345 system.GPU().ObtainContext(); 298 system.GPU().ObtainContext();
346 sc_sync_first_use = false; 299 sc_sync_first_use = false;
@@ -352,12 +305,7 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
352 } 305 }
353 306
354 auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread(); 307 auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
355 data.is_running = true;
356 Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext()); 308 Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
357 data.is_running = false;
358 data.is_paused = true;
359 data.exit_barrier->Wait();
360 data.is_paused = false;
361 } 309 }
362} 310}
363 311