summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/scheduler.cpp
diff options
context:
space:
mode:
authorGravatar Fernando Sahmkow2019-09-11 12:14:37 -0400
committerGravatar FernandoS272019-10-15 11:55:16 -0400
commit0cf26cee593c3c6abe909f3db52d972f846b13a9 (patch)
tree6e8e4b08271d1c3bd2348ef2bdd3cf5c4912dc9f /src/core/hle/kernel/scheduler.cpp
parentScheduler: Corrections to YieldAndBalanceLoad and Yield bombing protection. (diff)
downloadyuzu-0cf26cee593c3c6abe909f3db52d972f846b13a9.tar.gz
yuzu-0cf26cee593c3c6abe909f3db52d972f846b13a9.tar.xz
yuzu-0cf26cee593c3c6abe909f3db52d972f846b13a9.zip
Scheduler: Implement Yield Count and Core migration on Thread Preemption.
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
-rw-r--r--src/core/hle/kernel/scheduler.cpp81
1 files changed, 76 insertions, 5 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 78463cef5..5581c43bf 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -241,10 +241,83 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
241void GlobalScheduler::PreemptThreads() { 241void GlobalScheduler::PreemptThreads() {
242 for (std::size_t core_id = 0; core_id < NUM_CPU_CORES; core_id++) { 242 for (std::size_t core_id = 0; core_id < NUM_CPU_CORES; core_id++) {
243 const u32 priority = preemption_priorities[core_id]; 243 const u32 priority = preemption_priorities[core_id];
244 if (scheduled_queue[core_id].size(priority) > 1) { 244
245 if (scheduled_queue[core_id].size(priority) > 0) {
246 scheduled_queue[core_id].front(priority)->IncrementYieldCount();
245 scheduled_queue[core_id].yield(priority); 247 scheduled_queue[core_id].yield(priority);
246 reselection_pending.store(true, std::memory_order_release); 248 if (scheduled_queue[core_id].size(priority) > 1) {
249 scheduled_queue[core_id].front(priority)->IncrementYieldCount();
250 }
247 } 251 }
252
253 Thread* current_thread =
254 scheduled_queue[core_id].empty() ? nullptr : scheduled_queue[core_id].front();
255 Thread* winner = nullptr;
256 for (auto& thread : suggested_queue[core_id]) {
257 const s32 source_core = thread->GetProcessorID();
258 if (thread->GetPriority() != priority) {
259 continue;
260 }
261 if (source_core >= 0) {
262 Thread* next_thread = scheduled_queue[source_core].empty()
263 ? nullptr
264 : scheduled_queue[source_core].front();
265 if (next_thread != nullptr && next_thread->GetPriority() < 2) {
266 break;
267 }
268 if (next_thread == thread) {
269 continue;
270 }
271 }
272 if (current_thread != nullptr &&
273 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
274 winner = thread;
275 break;
276 }
277 }
278
279 if (winner != nullptr) {
280 if (winner->IsRunning()) {
281 UnloadThread(winner->GetProcessorID());
282 }
283 TransferToCore(winner->GetPriority(), core_id, winner);
284 current_thread = winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread;
285 }
286
287 if (current_thread != nullptr && current_thread->GetPriority() > priority) {
288 for (auto& thread : suggested_queue[core_id]) {
289 const s32 source_core = thread->GetProcessorID();
290 if (thread->GetPriority() > priority) {
291 continue;
292 }
293 if (source_core >= 0) {
294 Thread* next_thread = scheduled_queue[source_core].empty()
295 ? nullptr
296 : scheduled_queue[source_core].front();
297 if (next_thread != nullptr && next_thread->GetPriority() < 2) {
298 break;
299 }
300 if (next_thread == thread) {
301 continue;
302 }
303 }
304 if (current_thread != nullptr &&
305 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
306 winner = thread;
307 break;
308 }
309 }
310
311 if (winner != nullptr) {
312 if (winner->IsRunning()) {
313 UnloadThread(winner->GetProcessorID());
314 }
315 TransferToCore(winner->GetPriority(), core_id, winner);
316 current_thread = winner;
317 }
318 }
319
320 reselection_pending.store(true, std::memory_order_release);
248 } 321 }
249} 322}
250 323
@@ -260,9 +333,7 @@ void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) {
260 333
261bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) { 334bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
262 if (current_thread == winner) { 335 if (current_thread == winner) {
263 // TODO(blinkhawk): manage redundant operations, this is not implemented. 336 current_thread->IncrementYieldCount();
264 // as its mostly an optimization.
265 // current_thread->SetRedundantSchedulerOperation();
266 return true; 337 return true;
267 } else { 338 } else {
268 reselection_pending.store(true, std::memory_order_release); 339 reselection_pending.store(true, std::memory_order_release);