summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/thread.cpp
diff options
context:
space:
mode:
authorGravatar David2019-10-28 10:53:27 +1100
committerGravatar GitHub2019-10-28 10:53:27 +1100
commit4c5731c34f0915457a31c60c9f70a2f169ea575d (patch)
tree7f03a7f892370b59e56ae06c6c74514f1cc44998 /src/core/hle/kernel/thread.cpp
parentMerge pull request #3034 from ReinUsesLisp/w4244-maxwell3d (diff)
parentKernel Thread: Cleanup THREADPROCESSORID_DONT_UPDATE. (diff)
downloadyuzu-4c5731c34f0915457a31c60c9f70a2f169ea575d.tar.gz
yuzu-4c5731c34f0915457a31c60c9f70a2f169ea575d.tar.xz
yuzu-4c5731c34f0915457a31c60c9f70a2f169ea575d.zip
Merge pull request #2971 from FernandoS27/new-scheduler-v2
Kernel: Implement a New Thread Scheduler V2
Diffstat (limited to 'src/core/hle/kernel/thread.cpp')
-rw-r--r--src/core/hle/kernel/thread.cpp252
1 files changed, 190 insertions, 62 deletions
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index ec529e7f2..962530d2d 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -45,15 +45,7 @@ void Thread::Stop() {
45 callback_handle); 45 callback_handle);
46 kernel.ThreadWakeupCallbackHandleTable().Close(callback_handle); 46 kernel.ThreadWakeupCallbackHandleTable().Close(callback_handle);
47 callback_handle = 0; 47 callback_handle = 0;
48 48 SetStatus(ThreadStatus::Dead);
49 // Clean up thread from ready queue
50 // This is only needed when the thread is terminated forcefully (SVC TerminateProcess)
51 if (status == ThreadStatus::Ready || status == ThreadStatus::Paused) {
52 scheduler->UnscheduleThread(this, current_priority);
53 }
54
55 status = ThreadStatus::Dead;
56
57 WakeupAllWaitingThreads(); 49 WakeupAllWaitingThreads();
58 50
59 // Clean up any dangling references in objects that this thread was waiting for 51 // Clean up any dangling references in objects that this thread was waiting for
@@ -132,17 +124,16 @@ void Thread::ResumeFromWait() {
132 wakeup_callback = nullptr; 124 wakeup_callback = nullptr;
133 125
134 if (activity == ThreadActivity::Paused) { 126 if (activity == ThreadActivity::Paused) {
135 status = ThreadStatus::Paused; 127 SetStatus(ThreadStatus::Paused);
136 return; 128 return;
137 } 129 }
138 130
139 status = ThreadStatus::Ready; 131 SetStatus(ThreadStatus::Ready);
140
141 ChangeScheduler();
142} 132}
143 133
144void Thread::CancelWait() { 134void Thread::CancelWait() {
145 ASSERT(GetStatus() == ThreadStatus::WaitSynch); 135 ASSERT(GetStatus() == ThreadStatus::WaitSynch);
136 ClearWaitObjects();
146 SetWaitSynchronizationResult(ERR_SYNCHRONIZATION_CANCELED); 137 SetWaitSynchronizationResult(ERR_SYNCHRONIZATION_CANCELED);
147 ResumeFromWait(); 138 ResumeFromWait();
148} 139}
@@ -205,9 +196,9 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
205 thread->name = std::move(name); 196 thread->name = std::move(name);
206 thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap(); 197 thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap();
207 thread->owner_process = &owner_process; 198 thread->owner_process = &owner_process;
199 auto& scheduler = kernel.GlobalScheduler();
200 scheduler.AddThread(thread);
208 thread->tls_address = thread->owner_process->CreateTLSRegion(); 201 thread->tls_address = thread->owner_process->CreateTLSRegion();
209 thread->scheduler = &system.Scheduler(processor_id);
210 thread->scheduler->AddThread(thread);
211 202
212 thread->owner_process->RegisterThread(thread.get()); 203 thread->owner_process->RegisterThread(thread.get());
213 204
@@ -250,6 +241,22 @@ void Thread::SetStatus(ThreadStatus new_status) {
250 return; 241 return;
251 } 242 }
252 243
244 switch (new_status) {
245 case ThreadStatus::Ready:
246 case ThreadStatus::Running:
247 SetSchedulingStatus(ThreadSchedStatus::Runnable);
248 break;
249 case ThreadStatus::Dormant:
250 SetSchedulingStatus(ThreadSchedStatus::None);
251 break;
252 case ThreadStatus::Dead:
253 SetSchedulingStatus(ThreadSchedStatus::Exited);
254 break;
255 default:
256 SetSchedulingStatus(ThreadSchedStatus::Paused);
257 break;
258 }
259
253 if (status == ThreadStatus::Running) { 260 if (status == ThreadStatus::Running) {
254 last_running_ticks = Core::System::GetInstance().CoreTiming().GetTicks(); 261 last_running_ticks = Core::System::GetInstance().CoreTiming().GetTicks();
255 } 262 }
@@ -311,8 +318,7 @@ void Thread::UpdatePriority() {
311 return; 318 return;
312 } 319 }
313 320
314 scheduler->SetThreadPriority(this, new_priority); 321 SetCurrentPriority(new_priority);
315 current_priority = new_priority;
316 322
317 if (!lock_owner) { 323 if (!lock_owner) {
318 return; 324 return;
@@ -328,47 +334,7 @@ void Thread::UpdatePriority() {
328} 334}
329 335
330void Thread::ChangeCore(u32 core, u64 mask) { 336void Thread::ChangeCore(u32 core, u64 mask) {
331 ideal_core = core; 337 SetCoreAndAffinityMask(core, mask);
332 affinity_mask = mask;
333 ChangeScheduler();
334}
335
336void Thread::ChangeScheduler() {
337 if (status != ThreadStatus::Ready) {
338 return;
339 }
340
341 auto& system = Core::System::GetInstance();
342 std::optional<s32> new_processor_id{GetNextProcessorId(affinity_mask)};
343
344 if (!new_processor_id) {
345 new_processor_id = processor_id;
346 }
347 if (ideal_core != -1 && system.Scheduler(ideal_core).GetCurrentThread() == nullptr) {
348 new_processor_id = ideal_core;
349 }
350
351 ASSERT(*new_processor_id < 4);
352
353 // Add thread to new core's scheduler
354 auto& next_scheduler = system.Scheduler(*new_processor_id);
355
356 if (*new_processor_id != processor_id) {
357 // Remove thread from previous core's scheduler
358 scheduler->RemoveThread(this);
359 next_scheduler.AddThread(this);
360 }
361
362 processor_id = *new_processor_id;
363
364 // If the thread was ready, unschedule from the previous core and schedule on the new core
365 scheduler->UnscheduleThread(this, current_priority);
366 next_scheduler.ScheduleThread(this, current_priority);
367
368 // Change thread's scheduler
369 scheduler = &next_scheduler;
370
371 system.CpuCore(processor_id).PrepareReschedule();
372} 338}
373 339
374bool Thread::AllWaitObjectsReady() const { 340bool Thread::AllWaitObjectsReady() const {
@@ -388,10 +354,8 @@ void Thread::SetActivity(ThreadActivity value) {
388 354
389 if (value == ThreadActivity::Paused) { 355 if (value == ThreadActivity::Paused) {
390 // Set status if not waiting 356 // Set status if not waiting
391 if (status == ThreadStatus::Ready) { 357 if (status == ThreadStatus::Ready || status == ThreadStatus::Running) {
392 status = ThreadStatus::Paused; 358 SetStatus(ThreadStatus::Paused);
393 } else if (status == ThreadStatus::Running) {
394 status = ThreadStatus::Paused;
395 Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule(); 359 Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule();
396 } 360 }
397 } else if (status == ThreadStatus::Paused) { 361 } else if (status == ThreadStatus::Paused) {
@@ -408,6 +372,170 @@ void Thread::Sleep(s64 nanoseconds) {
408 WakeAfterDelay(nanoseconds); 372 WakeAfterDelay(nanoseconds);
409} 373}
410 374
375bool Thread::YieldSimple() {
376 auto& scheduler = kernel.GlobalScheduler();
377 return scheduler.YieldThread(this);
378}
379
380bool Thread::YieldAndBalanceLoad() {
381 auto& scheduler = kernel.GlobalScheduler();
382 return scheduler.YieldThreadAndBalanceLoad(this);
383}
384
385bool Thread::YieldAndWaitForLoadBalancing() {
386 auto& scheduler = kernel.GlobalScheduler();
387 return scheduler.YieldThreadAndWaitForLoadBalancing(this);
388}
389
390void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
391 const u32 old_flags = scheduling_state;
392 scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
393 static_cast<u32>(new_status);
394 AdjustSchedulingOnStatus(old_flags);
395}
396
397void Thread::SetCurrentPriority(u32 new_priority) {
398 const u32 old_priority = std::exchange(current_priority, new_priority);
399 AdjustSchedulingOnPriority(old_priority);
400}
401
402ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
403 const auto HighestSetCore = [](u64 mask, u32 max_cores) {
404 for (s32 core = max_cores - 1; core >= 0; core--) {
405 if (((mask >> core) & 1) != 0) {
406 return core;
407 }
408 }
409 return -1;
410 };
411
412 const bool use_override = affinity_override_count != 0;
413 if (new_core == THREADPROCESSORID_DONT_UPDATE) {
414 new_core = use_override ? ideal_core_override : ideal_core;
415 if ((new_affinity_mask & (1ULL << new_core)) == 0) {
416 return ERR_INVALID_COMBINATION;
417 }
418 }
419 if (use_override) {
420 ideal_core_override = new_core;
421 affinity_mask_override = new_affinity_mask;
422 } else {
423 const u64 old_affinity_mask = std::exchange(affinity_mask, new_affinity_mask);
424 ideal_core = new_core;
425 if (old_affinity_mask != new_affinity_mask) {
426 const s32 old_core = processor_id;
427 if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) {
428 if (ideal_core < 0) {
429 processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES);
430 } else {
431 processor_id = ideal_core;
432 }
433 }
434 AdjustSchedulingOnAffinity(old_affinity_mask, old_core);
435 }
436 }
437 return RESULT_SUCCESS;
438}
439
440void Thread::AdjustSchedulingOnStatus(u32 old_flags) {
441 if (old_flags == scheduling_state) {
442 return;
443 }
444
445 auto& scheduler = kernel.GlobalScheduler();
446 if (static_cast<ThreadSchedStatus>(old_flags & static_cast<u32>(ThreadSchedMasks::LowMask)) ==
447 ThreadSchedStatus::Runnable) {
448 // In this case the thread was running, now it's pausing/exitting
449 if (processor_id >= 0) {
450 scheduler.Unschedule(current_priority, processor_id, this);
451 }
452
453 for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
454 if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
455 scheduler.Unsuggest(current_priority, static_cast<u32>(core), this);
456 }
457 }
458 } else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) {
459 // The thread is now set to running from being stopped
460 if (processor_id >= 0) {
461 scheduler.Schedule(current_priority, processor_id, this);
462 }
463
464 for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
465 if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
466 scheduler.Suggest(current_priority, static_cast<u32>(core), this);
467 }
468 }
469 }
470
471 scheduler.SetReselectionPending();
472}
473
474void Thread::AdjustSchedulingOnPriority(u32 old_priority) {
475 if (GetSchedulingStatus() != ThreadSchedStatus::Runnable) {
476 return;
477 }
478 auto& scheduler = Core::System::GetInstance().GlobalScheduler();
479 if (processor_id >= 0) {
480 scheduler.Unschedule(old_priority, processor_id, this);
481 }
482
483 for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
484 if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
485 scheduler.Unsuggest(old_priority, core, this);
486 }
487 }
488
489 // Add thread to the new priority queues.
490 Thread* current_thread = GetCurrentThread();
491
492 if (processor_id >= 0) {
493 if (current_thread == this) {
494 scheduler.SchedulePrepend(current_priority, processor_id, this);
495 } else {
496 scheduler.Schedule(current_priority, processor_id, this);
497 }
498 }
499
500 for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
501 if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
502 scheduler.Suggest(current_priority, core, this);
503 }
504 }
505
506 scheduler.SetReselectionPending();
507}
508
509void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) {
510 auto& scheduler = Core::System::GetInstance().GlobalScheduler();
511 if (GetSchedulingStatus() != ThreadSchedStatus::Runnable ||
512 current_priority >= THREADPRIO_COUNT) {
513 return;
514 }
515
516 for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
517 if (((old_affinity_mask >> core) & 1) != 0) {
518 if (core == old_core) {
519 scheduler.Unschedule(current_priority, core, this);
520 } else {
521 scheduler.Unsuggest(current_priority, core, this);
522 }
523 }
524 }
525
526 for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
527 if (((affinity_mask >> core) & 1) != 0) {
528 if (core == processor_id) {
529 scheduler.Schedule(current_priority, core, this);
530 } else {
531 scheduler.Suggest(current_priority, core, this);
532 }
533 }
534 }
535
536 scheduler.SetReselectionPending();
537}
538
411//////////////////////////////////////////////////////////////////////////////////////////////////// 539////////////////////////////////////////////////////////////////////////////////////////////////////
412 540
413/** 541/**