summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/thread.cpp
diff options
context:
space:
mode:
authorGravatar Fernando Sahmkow2019-03-29 17:01:46 -0400
committerGravatar FernandoS272019-10-15 11:55:06 -0400
commita1ac0c6cb47e10863b0bfbb1a6aadc71ccc513ab (patch)
treed4476f115b69c74f543f7992006f8e5548cd7f54 /src/core/hle/kernel/thread.cpp
parentImplement a new Core Scheduler (diff)
downloadyuzu-a1ac0c6cb47e10863b0bfbb1a6aadc71ccc513ab.tar.gz
yuzu-a1ac0c6cb47e10863b0bfbb1a6aadc71ccc513ab.tar.xz
yuzu-a1ac0c6cb47e10863b0bfbb1a6aadc71ccc513ab.zip
Addapt thread class to the new Scheduler
Diffstat (limited to 'src/core/hle/kernel/thread.cpp')
-rw-r--r--src/core/hle/kernel/thread.cpp242
1 files changed, 183 insertions, 59 deletions
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index ec529e7f2..d0fa7b370 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -45,15 +45,7 @@ void Thread::Stop() {
45 callback_handle); 45 callback_handle);
46 kernel.ThreadWakeupCallbackHandleTable().Close(callback_handle); 46 kernel.ThreadWakeupCallbackHandleTable().Close(callback_handle);
47 callback_handle = 0; 47 callback_handle = 0;
48 48 SetStatus(ThreadStatus::Dead);
49 // Clean up thread from ready queue
50 // This is only needed when the thread is terminated forcefully (SVC TerminateProcess)
51 if (status == ThreadStatus::Ready || status == ThreadStatus::Paused) {
52 scheduler->UnscheduleThread(this, current_priority);
53 }
54
55 status = ThreadStatus::Dead;
56
57 WakeupAllWaitingThreads(); 49 WakeupAllWaitingThreads();
58 50
59 // Clean up any dangling references in objects that this thread was waiting for 51 // Clean up any dangling references in objects that this thread was waiting for
@@ -132,13 +124,11 @@ void Thread::ResumeFromWait() {
132 wakeup_callback = nullptr; 124 wakeup_callback = nullptr;
133 125
134 if (activity == ThreadActivity::Paused) { 126 if (activity == ThreadActivity::Paused) {
135 status = ThreadStatus::Paused; 127 SetStatus(ThreadStatus::Paused);
136 return; 128 return;
137 } 129 }
138 130
139 status = ThreadStatus::Ready; 131 SetStatus(ThreadStatus::Ready);
140
141 ChangeScheduler();
142} 132}
143 133
144void Thread::CancelWait() { 134void Thread::CancelWait() {
@@ -205,9 +195,9 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
205 thread->name = std::move(name); 195 thread->name = std::move(name);
206 thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap(); 196 thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap();
207 thread->owner_process = &owner_process; 197 thread->owner_process = &owner_process;
198 auto& scheduler = kernel.GlobalScheduler();
199 scheduler.AddThread(thread);
208 thread->tls_address = thread->owner_process->CreateTLSRegion(); 200 thread->tls_address = thread->owner_process->CreateTLSRegion();
209 thread->scheduler = &system.Scheduler(processor_id);
210 thread->scheduler->AddThread(thread);
211 201
212 thread->owner_process->RegisterThread(thread.get()); 202 thread->owner_process->RegisterThread(thread.get());
213 203
@@ -250,6 +240,22 @@ void Thread::SetStatus(ThreadStatus new_status) {
250 return; 240 return;
251 } 241 }
252 242
243 switch (new_status) {
244 case ThreadStatus::Ready:
245 case ThreadStatus::Running:
246 SetSchedulingStatus(ThreadSchedStatus::Runnable);
247 break;
248 case ThreadStatus::Dormant:
249 SetSchedulingStatus(ThreadSchedStatus::None);
250 break;
251 case ThreadStatus::Dead:
252 SetSchedulingStatus(ThreadSchedStatus::Exited);
253 break;
254 default:
255 SetSchedulingStatus(ThreadSchedStatus::Paused);
256 break;
257 }
258
253 if (status == ThreadStatus::Running) { 259 if (status == ThreadStatus::Running) {
254 last_running_ticks = Core::System::GetInstance().CoreTiming().GetTicks(); 260 last_running_ticks = Core::System::GetInstance().CoreTiming().GetTicks();
255 } 261 }
@@ -311,8 +317,7 @@ void Thread::UpdatePriority() {
311 return; 317 return;
312 } 318 }
313 319
314 scheduler->SetThreadPriority(this, new_priority); 320 SetCurrentPriority(new_priority);
315 current_priority = new_priority;
316 321
317 if (!lock_owner) { 322 if (!lock_owner) {
318 return; 323 return;
@@ -328,47 +333,7 @@ void Thread::UpdatePriority() {
328} 333}
329 334
330void Thread::ChangeCore(u32 core, u64 mask) { 335void Thread::ChangeCore(u32 core, u64 mask) {
331 ideal_core = core; 336 SetCoreAndAffinityMask(core, mask);
332 affinity_mask = mask;
333 ChangeScheduler();
334}
335
336void Thread::ChangeScheduler() {
337 if (status != ThreadStatus::Ready) {
338 return;
339 }
340
341 auto& system = Core::System::GetInstance();
342 std::optional<s32> new_processor_id{GetNextProcessorId(affinity_mask)};
343
344 if (!new_processor_id) {
345 new_processor_id = processor_id;
346 }
347 if (ideal_core != -1 && system.Scheduler(ideal_core).GetCurrentThread() == nullptr) {
348 new_processor_id = ideal_core;
349 }
350
351 ASSERT(*new_processor_id < 4);
352
353 // Add thread to new core's scheduler
354 auto& next_scheduler = system.Scheduler(*new_processor_id);
355
356 if (*new_processor_id != processor_id) {
357 // Remove thread from previous core's scheduler
358 scheduler->RemoveThread(this);
359 next_scheduler.AddThread(this);
360 }
361
362 processor_id = *new_processor_id;
363
364 // If the thread was ready, unschedule from the previous core and schedule on the new core
365 scheduler->UnscheduleThread(this, current_priority);
366 next_scheduler.ScheduleThread(this, current_priority);
367
368 // Change thread's scheduler
369 scheduler = &next_scheduler;
370
371 system.CpuCore(processor_id).PrepareReschedule();
372} 337}
373 338
374bool Thread::AllWaitObjectsReady() const { 339bool Thread::AllWaitObjectsReady() const {
@@ -391,7 +356,7 @@ void Thread::SetActivity(ThreadActivity value) {
391 if (status == ThreadStatus::Ready) { 356 if (status == ThreadStatus::Ready) {
392 status = ThreadStatus::Paused; 357 status = ThreadStatus::Paused;
393 } else if (status == ThreadStatus::Running) { 358 } else if (status == ThreadStatus::Running) {
394 status = ThreadStatus::Paused; 359 SetStatus(ThreadStatus::Paused);
395 Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule(); 360 Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule();
396 } 361 }
397 } else if (status == ThreadStatus::Paused) { 362 } else if (status == ThreadStatus::Paused) {
@@ -408,6 +373,165 @@ void Thread::Sleep(s64 nanoseconds) {
408 WakeAfterDelay(nanoseconds); 373 WakeAfterDelay(nanoseconds);
409} 374}
410 375
376void Thread::YieldType0() {
377 auto& scheduler = kernel.GlobalScheduler();
378 scheduler.YieldThread(this);
379}
380
381void Thread::YieldType1() {
382 auto& scheduler = kernel.GlobalScheduler();
383 scheduler.YieldThreadAndBalanceLoad(this);
384}
385
386void Thread::YieldType2() {
387 auto& scheduler = kernel.GlobalScheduler();
388 scheduler.YieldThreadAndWaitForLoadBalancing(this);
389}
390
391void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
392 u32 old_flags = scheduling_state;
393 scheduling_state =
394 (scheduling_state & ThreadSchedMasks::HighMask) | static_cast<u32>(new_status);
395 AdjustSchedulingOnStatus(old_flags);
396}
397
398void Thread::SetCurrentPriority(u32 new_priority) {
399 u32 old_priority = current_priority;
400 current_priority = new_priority;
401 AdjustSchedulingOnPriority(old_priority);
402}
403
404ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
405 auto HighestSetCore = [](u64 mask, u32 max_cores) {
406 for (s32 core = max_cores - 1; core >= 0; core--) {
407 if (((mask >> core) & 1) != 0)
408 return core;
409 }
410 return -1;
411 };
412 bool use_override = affinity_override_count != 0;
413 // The value -3 is "do not change the ideal core".
414 if (new_core == -3) {
415 new_core = use_override ? ideal_core_override : ideal_core;
416 if ((new_affinity_mask & (1 << new_core)) == 0) {
417 return ERR_INVALID_COMBINATION;
418 }
419 }
420 if (use_override) {
421 ideal_core_override = new_core;
422 affinity_mask_override = new_affinity_mask;
423 } else {
424 u64 old_affinity_mask = affinity_mask;
425 ideal_core = new_core;
426 affinity_mask = new_affinity_mask;
427 if (old_affinity_mask != new_affinity_mask) {
428 s32 old_core = processor_id;
429 if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) {
430 if (ideal_core < 0) {
431 processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES);
432 } else {
433 processor_id = ideal_core;
434 }
435 }
436 AdjustSchedulingOnAffinity(old_affinity_mask, old_core);
437 }
438 }
439 return RESULT_SUCCESS;
440}
441
442void Thread::AdjustSchedulingOnStatus(u32 old_flags) {
443 if (old_flags == scheduling_state)
444 return;
445
446 auto& scheduler = kernel.GlobalScheduler();
447 if (static_cast<ThreadSchedStatus>(old_flags & ThreadSchedMasks::LowMask) ==
448 ThreadSchedStatus::Runnable) {
449 // In this case the thread was running, now it's pausing/exitting
450 if (processor_id >= 0)
451 scheduler.Unschedule(current_priority, processor_id, this);
452
453 for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
454 if (core != processor_id && ((affinity_mask >> core) & 1) != 0)
455 scheduler.Unsuggest(current_priority, core, this);
456 }
457 } else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) {
458 // The thread is now set to running from being stopped
459 if (processor_id >= 0)
460 scheduler.Schedule(current_priority, processor_id, this);
461
462 for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
463 if (core != processor_id && ((affinity_mask >> core) & 1) != 0)
464 scheduler.Suggest(current_priority, core, this);
465 }
466 }
467
468 scheduler.SetReselectionPending();
469}
470
471void Thread::AdjustSchedulingOnPriority(u32 old_priority) {
472 if (GetSchedulingStatus() != ThreadSchedStatus::Runnable) {
473 return;
474 }
475 auto& scheduler = Core::System::GetInstance().GlobalScheduler();
476 if (processor_id >= 0) {
477 scheduler.Unschedule(old_priority, processor_id, this);
478 }
479
480 for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
481 if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
482 scheduler.Unsuggest(old_priority, core, this);
483 }
484 }
485
486 // Add thread to the new priority queues.
487 Thread* current_thread = GetCurrentThread();
488
489 if (processor_id >= 0) {
490 if (current_thread == this) {
491 scheduler.SchedulePrepend(current_priority, processor_id, this);
492 } else {
493 scheduler.Schedule(current_priority, processor_id, this);
494 }
495 }
496
497 for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
498 if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
499 scheduler.Suggest(current_priority, core, this);
500 }
501 }
502
503 scheduler.SetReselectionPending();
504}
505
506void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) {
507 auto& scheduler = Core::System::GetInstance().GlobalScheduler();
508 if (GetSchedulingStatus() != ThreadSchedStatus::Runnable ||
509 current_priority >= THREADPRIO_COUNT)
510 return;
511
512 for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
513 if (((old_affinity_mask >> core) & 1) != 0) {
514 if (core == old_core) {
515 scheduler.Unschedule(current_priority, core, this);
516 } else {
517 scheduler.Unsuggest(current_priority, core, this);
518 }
519 }
520 }
521
522 for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
523 if (((affinity_mask >> core) & 1) != 0) {
524 if (core == processor_id) {
525 scheduler.Schedule(current_priority, core, this);
526 } else {
527 scheduler.Suggest(current_priority, core, this);
528 }
529 }
530 }
531
532 scheduler.SetReselectionPending();
533}
534
411//////////////////////////////////////////////////////////////////////////////////////////////////// 535////////////////////////////////////////////////////////////////////////////////////////////////////
412 536
413/** 537/**