summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/thread.cpp
diff options
context:
space:
mode:
authorGravatar bunnei2020-12-20 20:57:54 -0800
committerGravatar GitHub2020-12-20 20:57:54 -0800
commit1279c7ce7afd3d1bf2b4e33aa922158acf2cd060 (patch)
tree6db8088caed2bd957187e4730f51424325038fa5 /src/core/hle/kernel/thread.cpp
parentMerge pull request #5201 from ameerj/bufferq-refactor (diff)
parenthle: kernel: Process: Various style fixes based on code review feedback. (diff)
downloadyuzu-1279c7ce7afd3d1bf2b4e33aa922158acf2cd060.tar.gz
yuzu-1279c7ce7afd3d1bf2b4e33aa922158acf2cd060.tar.xz
yuzu-1279c7ce7afd3d1bf2b4e33aa922158acf2cd060.zip
Merge pull request #5131 from bunnei/scheduler-rewrite
Rewrite Kernel scheduler based on Atmosphere
Diffstat (limited to 'src/core/hle/kernel/thread.cpp')
-rw-r--r--src/core/hle/kernel/thread.cpp79
1 files changed, 28 insertions, 51 deletions
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 7d1eb2c6e..a4f9e0d97 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -17,10 +17,11 @@
17#include "core/hardware_properties.h" 17#include "core/hardware_properties.h"
18#include "core/hle/kernel/errors.h" 18#include "core/hle/kernel/errors.h"
19#include "core/hle/kernel/handle_table.h" 19#include "core/hle/kernel/handle_table.h"
20#include "core/hle/kernel/k_scheduler.h"
21#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
20#include "core/hle/kernel/kernel.h" 22#include "core/hle/kernel/kernel.h"
21#include "core/hle/kernel/object.h" 23#include "core/hle/kernel/object.h"
22#include "core/hle/kernel/process.h" 24#include "core/hle/kernel/process.h"
23#include "core/hle/kernel/scheduler.h"
24#include "core/hle/kernel/thread.h" 25#include "core/hle/kernel/thread.h"
25#include "core/hle/kernel/time_manager.h" 26#include "core/hle/kernel/time_manager.h"
26#include "core/hle/result.h" 27#include "core/hle/result.h"
@@ -50,7 +51,7 @@ Thread::~Thread() = default;
50 51
51void Thread::Stop() { 52void Thread::Stop() {
52 { 53 {
53 SchedulerLock lock(kernel); 54 KScopedSchedulerLock lock(kernel);
54 SetStatus(ThreadStatus::Dead); 55 SetStatus(ThreadStatus::Dead);
55 Signal(); 56 Signal();
56 kernel.GlobalHandleTable().Close(global_handle); 57 kernel.GlobalHandleTable().Close(global_handle);
@@ -67,7 +68,7 @@ void Thread::Stop() {
67} 68}
68 69
69void Thread::ResumeFromWait() { 70void Thread::ResumeFromWait() {
70 SchedulerLock lock(kernel); 71 KScopedSchedulerLock lock(kernel);
71 switch (status) { 72 switch (status) {
72 case ThreadStatus::Paused: 73 case ThreadStatus::Paused:
73 case ThreadStatus::WaitSynch: 74 case ThreadStatus::WaitSynch:
@@ -99,19 +100,18 @@ void Thread::ResumeFromWait() {
99} 100}
100 101
101void Thread::OnWakeUp() { 102void Thread::OnWakeUp() {
102 SchedulerLock lock(kernel); 103 KScopedSchedulerLock lock(kernel);
103
104 SetStatus(ThreadStatus::Ready); 104 SetStatus(ThreadStatus::Ready);
105} 105}
106 106
107ResultCode Thread::Start() { 107ResultCode Thread::Start() {
108 SchedulerLock lock(kernel); 108 KScopedSchedulerLock lock(kernel);
109 SetStatus(ThreadStatus::Ready); 109 SetStatus(ThreadStatus::Ready);
110 return RESULT_SUCCESS; 110 return RESULT_SUCCESS;
111} 111}
112 112
113void Thread::CancelWait() { 113void Thread::CancelWait() {
114 SchedulerLock lock(kernel); 114 KScopedSchedulerLock lock(kernel);
115 if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) { 115 if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) {
116 is_sync_cancelled = true; 116 is_sync_cancelled = true;
117 return; 117 return;
@@ -186,12 +186,14 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
186 thread->status = ThreadStatus::Dormant; 186 thread->status = ThreadStatus::Dormant;
187 thread->entry_point = entry_point; 187 thread->entry_point = entry_point;
188 thread->stack_top = stack_top; 188 thread->stack_top = stack_top;
189 thread->disable_count = 1;
189 thread->tpidr_el0 = 0; 190 thread->tpidr_el0 = 0;
190 thread->nominal_priority = thread->current_priority = priority; 191 thread->nominal_priority = thread->current_priority = priority;
191 thread->last_running_ticks = 0; 192 thread->schedule_count = -1;
193 thread->last_scheduled_tick = 0;
192 thread->processor_id = processor_id; 194 thread->processor_id = processor_id;
193 thread->ideal_core = processor_id; 195 thread->ideal_core = processor_id;
194 thread->affinity_mask = 1ULL << processor_id; 196 thread->affinity_mask.SetAffinity(processor_id, true);
195 thread->wait_objects = nullptr; 197 thread->wait_objects = nullptr;
196 thread->mutex_wait_address = 0; 198 thread->mutex_wait_address = 0;
197 thread->condvar_wait_address = 0; 199 thread->condvar_wait_address = 0;
@@ -201,7 +203,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
201 thread->owner_process = owner_process; 203 thread->owner_process = owner_process;
202 thread->type = type_flags; 204 thread->type = type_flags;
203 if ((type_flags & THREADTYPE_IDLE) == 0) { 205 if ((type_flags & THREADTYPE_IDLE) == 0) {
204 auto& scheduler = kernel.GlobalScheduler(); 206 auto& scheduler = kernel.GlobalSchedulerContext();
205 scheduler.AddThread(thread); 207 scheduler.AddThread(thread);
206 } 208 }
207 if (owner_process) { 209 if (owner_process) {
@@ -225,7 +227,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
225} 227}
226 228
227void Thread::SetPriority(u32 priority) { 229void Thread::SetPriority(u32 priority) {
228 SchedulerLock lock(kernel); 230 KScopedSchedulerLock lock(kernel);
229 ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, 231 ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
230 "Invalid priority value."); 232 "Invalid priority value.");
231 nominal_priority = priority; 233 nominal_priority = priority;
@@ -362,7 +364,7 @@ bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) {
362} 364}
363 365
364ResultCode Thread::SetActivity(ThreadActivity value) { 366ResultCode Thread::SetActivity(ThreadActivity value) {
365 SchedulerLock lock(kernel); 367 KScopedSchedulerLock lock(kernel);
366 368
367 auto sched_status = GetSchedulingStatus(); 369 auto sched_status = GetSchedulingStatus();
368 370
@@ -391,7 +393,7 @@ ResultCode Thread::SetActivity(ThreadActivity value) {
391ResultCode Thread::Sleep(s64 nanoseconds) { 393ResultCode Thread::Sleep(s64 nanoseconds) {
392 Handle event_handle{}; 394 Handle event_handle{};
393 { 395 {
394 SchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); 396 KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
395 SetStatus(ThreadStatus::WaitSleep); 397 SetStatus(ThreadStatus::WaitSleep);
396 } 398 }
397 399
@@ -402,39 +404,12 @@ ResultCode Thread::Sleep(s64 nanoseconds) {
402 return RESULT_SUCCESS; 404 return RESULT_SUCCESS;
403} 405}
404 406
405std::pair<ResultCode, bool> Thread::YieldSimple() {
406 bool is_redundant = false;
407 {
408 SchedulerLock lock(kernel);
409 is_redundant = kernel.GlobalScheduler().YieldThread(this);
410 }
411 return {RESULT_SUCCESS, is_redundant};
412}
413
414std::pair<ResultCode, bool> Thread::YieldAndBalanceLoad() {
415 bool is_redundant = false;
416 {
417 SchedulerLock lock(kernel);
418 is_redundant = kernel.GlobalScheduler().YieldThreadAndBalanceLoad(this);
419 }
420 return {RESULT_SUCCESS, is_redundant};
421}
422
423std::pair<ResultCode, bool> Thread::YieldAndWaitForLoadBalancing() {
424 bool is_redundant = false;
425 {
426 SchedulerLock lock(kernel);
427 is_redundant = kernel.GlobalScheduler().YieldThreadAndWaitForLoadBalancing(this);
428 }
429 return {RESULT_SUCCESS, is_redundant};
430}
431
432void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { 407void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
433 const u32 old_state = scheduling_state; 408 const u32 old_state = scheduling_state;
434 pausing_state |= static_cast<u32>(flag); 409 pausing_state |= static_cast<u32>(flag);
435 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); 410 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
436 scheduling_state = base_scheduling | pausing_state; 411 scheduling_state = base_scheduling | pausing_state;
437 kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); 412 KScheduler::OnThreadStateChanged(kernel, this, old_state);
438} 413}
439 414
440void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { 415void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
@@ -442,23 +417,24 @@ void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
442 pausing_state &= ~static_cast<u32>(flag); 417 pausing_state &= ~static_cast<u32>(flag);
443 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); 418 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
444 scheduling_state = base_scheduling | pausing_state; 419 scheduling_state = base_scheduling | pausing_state;
445 kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); 420 KScheduler::OnThreadStateChanged(kernel, this, old_state);
446} 421}
447 422
448void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { 423void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
449 const u32 old_state = scheduling_state; 424 const u32 old_state = scheduling_state;
450 scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) | 425 scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
451 static_cast<u32>(new_status); 426 static_cast<u32>(new_status);
452 kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); 427 KScheduler::OnThreadStateChanged(kernel, this, old_state);
453} 428}
454 429
455void Thread::SetCurrentPriority(u32 new_priority) { 430void Thread::SetCurrentPriority(u32 new_priority) {
456 const u32 old_priority = std::exchange(current_priority, new_priority); 431 const u32 old_priority = std::exchange(current_priority, new_priority);
457 kernel.GlobalScheduler().AdjustSchedulingOnPriority(this, old_priority); 432 KScheduler::OnThreadPriorityChanged(kernel, this, kernel.CurrentScheduler()->GetCurrentThread(),
433 old_priority);
458} 434}
459 435
460ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { 436ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
461 SchedulerLock lock(kernel); 437 KScopedSchedulerLock lock(kernel);
462 const auto HighestSetCore = [](u64 mask, u32 max_cores) { 438 const auto HighestSetCore = [](u64 mask, u32 max_cores) {
463 for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) { 439 for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) {
464 if (((mask >> core) & 1) != 0) { 440 if (((mask >> core) & 1) != 0) {
@@ -479,20 +455,21 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
479 } 455 }
480 if (use_override) { 456 if (use_override) {
481 ideal_core_override = new_core; 457 ideal_core_override = new_core;
482 affinity_mask_override = new_affinity_mask;
483 } else { 458 } else {
484 const u64 old_affinity_mask = std::exchange(affinity_mask, new_affinity_mask); 459 const auto old_affinity_mask = affinity_mask;
460 affinity_mask.SetAffinityMask(new_affinity_mask);
485 ideal_core = new_core; 461 ideal_core = new_core;
486 if (old_affinity_mask != new_affinity_mask) { 462 if (old_affinity_mask.GetAffinityMask() != new_affinity_mask) {
487 const s32 old_core = processor_id; 463 const s32 old_core = processor_id;
488 if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { 464 if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) {
489 if (static_cast<s32>(ideal_core) < 0) { 465 if (static_cast<s32>(ideal_core) < 0) {
490 processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES); 466 processor_id = HighestSetCore(affinity_mask.GetAffinityMask(),
467 Core::Hardware::NUM_CPU_CORES);
491 } else { 468 } else {
492 processor_id = ideal_core; 469 processor_id = ideal_core;
493 } 470 }
494 } 471 }
495 kernel.GlobalScheduler().AdjustSchedulingOnAffinity(this, old_affinity_mask, old_core); 472 KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_affinity_mask, old_core);
496 } 473 }
497 } 474 }
498 return RESULT_SUCCESS; 475 return RESULT_SUCCESS;