diff options
| author | 2018-11-22 00:33:53 -0500 | |
|---|---|---|
| committer | 2018-11-22 00:33:53 -0500 | |
| commit | 820d81b9a5392951c18daa5a47d6c0ffd28baa9b (patch) | |
| tree | dab20f1ff49ab76cdcd511e189799f4d6e40677e /src/core | |
| parent | svc: Implement yield types 0 and -1 (diff) | |
| download | yuzu-820d81b9a5392951c18daa5a47d6c0ffd28baa9b.tar.gz yuzu-820d81b9a5392951c18daa5a47d6c0ffd28baa9b.tar.xz yuzu-820d81b9a5392951c18daa5a47d6c0ffd28baa9b.zip | |
scheduler: Add explanations for YieldWith and WithoutLoadBalancing
Diffstat (limited to 'src/core')
| -rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 61 | ||||
| -rw-r--r-- | src/core/hle/kernel/scheduler.h | 70 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc.cpp | 21 | ||||
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 60 | ||||
| -rw-r--r-- | src/core/hle/kernel/thread.h | 4 |
5 files changed, 139 insertions, 77 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index fb5e14950..624c841ad 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include "common/logging/log.h" | 9 | #include "common/logging/log.h" |
| 10 | #include "core/arm/arm_interface.h" | 10 | #include "core/arm/arm_interface.h" |
| 11 | #include "core/core.h" | 11 | #include "core/core.h" |
| 12 | #include "core/core_cpu.h" | ||
| 12 | #include "core/core_timing.h" | 13 | #include "core/core_timing.h" |
| 13 | #include "core/hle/kernel/kernel.h" | 14 | #include "core/hle/kernel/kernel.h" |
| 14 | #include "core/hle/kernel/process.h" | 15 | #include "core/hle/kernel/process.h" |
| @@ -169,7 +170,7 @@ void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { | |||
| 169 | ready_queue.remove(priority, thread); | 170 | ready_queue.remove(priority, thread); |
| 170 | } | 171 | } |
| 171 | 172 | ||
| 172 | void Scheduler::RescheduleThread(Thread* thread, u32 priority) { | 173 | void Scheduler::MoveThreadToBackOfPriorityQueue(Thread* thread, u32 priority) { |
| 173 | std::lock_guard<std::mutex> lock(scheduler_mutex); | 174 | std::lock_guard<std::mutex> lock(scheduler_mutex); |
| 174 | 175 | ||
| 175 | // Thread is not in queue | 176 | // Thread is not in queue |
| @@ -189,12 +190,64 @@ void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { | |||
| 189 | ready_queue.prepare(priority); | 190 | ready_queue.prepare(priority); |
| 190 | } | 191 | } |
| 191 | 192 | ||
| 192 | Thread* Scheduler::GetNextSuggestedThread(u32 core) { | 193 | Thread* Scheduler::GetNextSuggestedThread(u32 core) const { |
| 193 | std::lock_guard<std::mutex> lock(scheduler_mutex); | 194 | std::lock_guard<std::mutex> lock(scheduler_mutex); |
| 194 | 195 | ||
| 195 | const auto mask = 1 << core; | 196 | const u32 mask = 1U << core; |
| 196 | return ready_queue.get_first_filter( | 197 | return ready_queue.get_first_filter( |
| 197 | [&mask](Thread* thread) { return (thread->GetAffinityMask() & mask) != 0; }); | 198 | [mask](Thread const* thread) { return (thread->GetAffinityMask() & mask) != 0; }); |
| 199 | } | ||
| 200 | |||
| 201 | void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { | ||
| 202 | ASSERT(thread != nullptr); | ||
| 203 | // Avoid yielding if the thread isn't even running. | ||
| 204 | ASSERT(thread->GetStatus() == ThreadStatus::Running); | ||
| 205 | |||
| 206 | // Sanity check that the priority is valid | ||
| 207 | ASSERT(thread->GetPriority() < THREADPRIO_COUNT); | ||
| 208 | |||
| 209 | // Yield this thread | ||
| 210 | MoveThreadToBackOfPriorityQueue(thread, thread->GetPriority()); | ||
| 211 | Reschedule(); | ||
| 212 | } | ||
| 213 | |||
| 214 | void Scheduler::YieldWithLoadBalancing(Thread* thread) { | ||
| 215 | ASSERT(thread != nullptr); | ||
| 216 | const auto priority = thread->GetPriority(); | ||
| 217 | const auto core = static_cast<u32>(thread->GetProcessorID()); | ||
| 218 | |||
| 219 | // Avoid yielding if the thread isn't even running. | ||
| 220 | ASSERT(thread->GetStatus() == ThreadStatus::Running); | ||
| 221 | |||
| 222 | // Sanity check that the priority is valid | ||
| 223 | ASSERT(priority < THREADPRIO_COUNT); | ||
| 224 | |||
| 225 | // Reschedule thread to end of queue. | ||
| 226 | MoveThreadToBackOfPriorityQueue(thread, priority); | ||
| 227 | |||
| 228 | Thread* suggested_thread = nullptr; | ||
| 229 | |||
| 230 | // Search through all of the cpu cores (except this one) for a suggested thread. | ||
| 231 | // Take the first non-nullptr one | ||
| 232 | for (unsigned cur_core = 0; cur_core < Core::NUM_CPU_CORES; ++cur_core) { | ||
| 233 | if (cur_core == core) | ||
| 234 | continue; | ||
| 235 | |||
| 236 | const auto res = | ||
| 237 | Core::System::GetInstance().CpuCore(cur_core).Scheduler().GetNextSuggestedThread(core); | ||
| 238 | if (res != nullptr) { | ||
| 239 | suggested_thread = res; | ||
| 240 | break; | ||
| 241 | } | ||
| 242 | } | ||
| 243 | |||
| 244 | // If a suggested thread was found, queue that for this core | ||
| 245 | if (suggested_thread != nullptr) | ||
| 246 | suggested_thread->ChangeCore(core, suggested_thread->GetAffinityMask()); | ||
| 247 | } | ||
| 248 | |||
| 249 | void Scheduler::YieldAndWaitForLoadBalancing(Thread* thread) { | ||
| 250 | UNIMPLEMENTED_MSG("Wait for load balancing thread yield type is not implemented!"); | ||
| 198 | } | 251 | } |
| 199 | 252 | ||
| 200 | } // namespace Kernel | 253 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index 8444afdbc..71b32589a 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h | |||
| @@ -49,13 +49,79 @@ public: | |||
| 49 | void UnscheduleThread(Thread* thread, u32 priority); | 49 | void UnscheduleThread(Thread* thread, u32 priority); |
| 50 | 50 | ||
| 51 | /// Moves a thread to the back of the current priority queue | 51 | /// Moves a thread to the back of the current priority queue |
| 52 | void RescheduleThread(Thread* thread, u32 priority); | 52 | void MoveThreadToBackOfPriorityQueue(Thread* thread, u32 priority); |
| 53 | 53 | ||
| 54 | /// Sets the priority of a thread in the scheduler | 54 | /// Sets the priority of a thread in the scheduler |
| 55 | void SetThreadPriority(Thread* thread, u32 priority); | 55 | void SetThreadPriority(Thread* thread, u32 priority); |
| 56 | 56 | ||
| 57 | /// Gets the next suggested thread for load balancing | 57 | /// Gets the next suggested thread for load balancing |
| 58 | Thread* GetNextSuggestedThread(u32 core); | 58 | Thread* GetNextSuggestedThread(u32 core) const; |
| 59 | |||
| 60 | /** | ||
| 61 | * YieldWithoutLoadBalancing -- analogous to normal yield on a system | ||
| 62 | * Moves the thread to the end of the ready queue for its priority, and then reschedules the | ||
| 63 | * system to the new head of the queue. | ||
| 64 | * | ||
| 65 | * Example (Single Core -- but can be extrapolated to multi): | ||
| 66 | * ready_queue[prio=0]: ThreadA, ThreadB, ThreadC (->exec order->) | ||
| 67 | * Currently Running: ThreadR | ||
| 68 | * | ||
| 69 | * ThreadR calls YieldWithoutLoadBalancing | ||
| 70 | * | ||
| 71 | * ThreadR is moved to the end of ready_queue[prio=0]: | ||
| 72 | * ready_queue[prio=0]: ThreadA, ThreadB, ThreadC, ThreadR (->exec order->) | ||
| 73 | * Currently Running: Nothing | ||
| 74 | * | ||
| 75 | * System is rescheduled (ThreadA is popped off of queue): | ||
| 76 | * ready_queue[prio=0]: ThreadB, ThreadC, ThreadR (->exec order->) | ||
| 77 | * Currently Running: ThreadA | ||
| 78 | * | ||
| 79 | * If the queue is empty at time of call, no yielding occurs. This does not cross between cores | ||
| 80 | * or priorities at all. | ||
| 81 | */ | ||
| 82 | void YieldWithoutLoadBalancing(Thread* thread); | ||
| 83 | |||
| 84 | /** | ||
| 85 | * YieldWithLoadBalancing -- yield but with better selection of the new running thread | ||
| 86 | * Moves the current thread to the end of the ready queue for its priority, then selects a | ||
| 87 | * 'suggested thread' (a thread on a different core that could run on this core) from the | ||
| 88 | * scheduler, changes its core, and reschedules the current core to that thread. | ||
| 89 | * | ||
| 90 | * Example (Dual Core -- can be extrapolated to Quad Core, this is just normal yield if it were | ||
| 91 | * single core): | ||
| 92 | * ready_queue[core=0][prio=0]: ThreadA, ThreadB (affinities not pictured as irrelevant | ||
| 93 | * ready_queue[core=1][prio=0]: ThreadC[affinity=both], ThreadD[affinity=core1only] | ||
| 94 | * Currently Running: ThreadQ on Core 0 || ThreadP on Core 1 | ||
| 95 | * | ||
| 96 | * ThreadQ calls YieldWithLoadBalancing | ||
| 97 | * | ||
| 98 | * ThreadQ is moved to the end of ready_queue[core=0][prio=0]: | ||
| 99 | * ready_queue[core=0][prio=0]: ThreadA, ThreadB | ||
| 100 | * ready_queue[core=1][prio=0]: ThreadC[affinity=both], ThreadD[affinity=core1only] | ||
| 101 | * Currently Running: ThreadQ on Core 0 || ThreadP on Core 1 | ||
| 102 | * | ||
| 103 | * A list of suggested threads for each core is compiled | ||
| 104 | * Suggested Threads: {ThreadC on Core 1} | ||
| 105 | * If this were quad core (as the switch is), there could be between 0 and 3 threads in this | ||
| 106 | * list. If there are more than one, the thread is selected by highest prio. | ||
| 107 | * | ||
| 108 | * ThreadC is core changed to Core 0: | ||
| 109 | * ready_queue[core=0][prio=0]: ThreadC, ThreadA, ThreadB, ThreadQ | ||
| 110 | * ready_queue[core=1][prio=0]: ThreadD | ||
| 111 | * Currently Running: None on Core 0 || ThreadP on Core 1 | ||
| 112 | * | ||
| 113 | * System is rescheduled (ThreadC is popped off of queue): | ||
| 114 | * ready_queue[core=0][prio=0]: ThreadA, ThreadB, ThreadQ | ||
| 115 | * ready_queue[core=1][prio=0]: ThreadD | ||
| 116 | * Currently Running: ThreadC on Core 0 || ThreadP on Core 1 | ||
| 117 | * | ||
| 118 | * If no suggested threads can be found this will behave just as normal yield. If there are | ||
| 119 | * multiple candidates for the suggested thread on a core, the highest prio is taken. | ||
| 120 | */ | ||
| 121 | void YieldWithLoadBalancing(Thread* thread); | ||
| 122 | |||
| 123 | /// Currently unknown -- asserts as unimplemented on call | ||
| 124 | void YieldAndWaitForLoadBalancing(Thread* thread); | ||
| 59 | 125 | ||
| 60 | /// Returns a list of all threads managed by the scheduler | 126 | /// Returns a list of all threads managed by the scheduler |
| 61 | const std::vector<SharedPtr<Thread>>& GetThreadList() const { | 127 | const std::vector<SharedPtr<Thread>>& GetThreadList() const { |
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 467575c93..205706033 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -965,16 +965,23 @@ static void SleepThread(s64 nanoseconds) { | |||
| 965 | if (!Core::System::GetInstance().CurrentScheduler().HaveReadyThreads()) | 965 | if (!Core::System::GetInstance().CurrentScheduler().HaveReadyThreads()) |
| 966 | return; | 966 | return; |
| 967 | 967 | ||
| 968 | enum class SleepType : s64 { | ||
| 969 | YieldWithoutLoadBalancing = 0, | ||
| 970 | YieldWithLoadBalancing = 1, | ||
| 971 | YieldAndWaitForLoadBalancing = 2, | ||
| 972 | }; | ||
| 973 | |||
| 968 | if (nanoseconds <= 0) { | 974 | if (nanoseconds <= 0) { |
| 969 | switch (nanoseconds) { | 975 | auto& scheduler{Core::System::GetInstance().CurrentScheduler()}; |
| 970 | case 0: | 976 | switch (static_cast<SleepType>(nanoseconds)) { |
| 971 | GetCurrentThread()->YieldNormal(); | 977 | case SleepType::YieldWithoutLoadBalancing: |
| 978 | scheduler.YieldWithoutLoadBalancing(GetCurrentThread()); | ||
| 972 | break; | 979 | break; |
| 973 | case -1: | 980 | case SleepType::YieldWithLoadBalancing: |
| 974 | GetCurrentThread()->YieldWithLoadBalancing(); | 981 | scheduler.YieldWithLoadBalancing(GetCurrentThread()); |
| 975 | break; | 982 | break; |
| 976 | case -2: | 983 | case SleepType::YieldAndWaitForLoadBalancing: |
| 977 | GetCurrentThread()->YieldAndWaitForLoadBalancing(); | 984 | scheduler.YieldAndWaitForLoadBalancing(GetCurrentThread()); |
| 978 | break; | 985 | break; |
| 979 | default: | 986 | default: |
| 980 | UNREACHABLE_MSG( | 987 | UNREACHABLE_MSG( |
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index ddc4da1c0..4ffb76818 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -388,66 +388,6 @@ bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> t | |||
| 388 | return wakeup_callback(reason, std::move(thread), std::move(object), index); | 388 | return wakeup_callback(reason, std::move(thread), std::move(object), index); |
| 389 | } | 389 | } |
| 390 | 390 | ||
| 391 | void Thread::YieldNormal() { | ||
| 392 | // Avoid yielding if the thread isn't even running. | ||
| 393 | if (status != ThreadStatus::Running) { | ||
| 394 | return; | ||
| 395 | } | ||
| 396 | |||
| 397 | if (nominal_priority < THREADPRIO_COUNT) { | ||
| 398 | scheduler->RescheduleThread(this, nominal_priority); | ||
| 399 | scheduler->Reschedule(); | ||
| 400 | } | ||
| 401 | } | ||
| 402 | |||
| 403 | void Thread::YieldWithLoadBalancing() { | ||
| 404 | auto priority = nominal_priority; | ||
| 405 | auto core = processor_id; | ||
| 406 | |||
| 407 | // Avoid yielding if the thread isn't even running. | ||
| 408 | if (status != ThreadStatus::Running) { | ||
| 409 | Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule(); | ||
| 410 | return; | ||
| 411 | } | ||
| 412 | |||
| 413 | SharedPtr<Thread> next; | ||
| 414 | const auto& threads = scheduler->GetThreadList(); | ||
| 415 | |||
| 416 | if (priority < THREADPRIO_COUNT) { | ||
| 417 | // Reschedule thread to end of queue. | ||
| 418 | scheduler->RescheduleThread(this, priority); | ||
| 419 | |||
| 420 | const auto iter = std::find_if(threads.begin(), threads.end(), | ||
| 421 | [&priority](const SharedPtr<Thread>& thread) { | ||
| 422 | return thread->GetNominalPriority() == priority; | ||
| 423 | }); | ||
| 424 | |||
| 425 | if (iter != threads.end()) | ||
| 426 | next = iter->get(); | ||
| 427 | } | ||
| 428 | |||
| 429 | Thread* suggested_thread = nullptr; | ||
| 430 | |||
| 431 | for (int i = 0; i < 4; ++i) { | ||
| 432 | if (i == core) | ||
| 433 | continue; | ||
| 434 | |||
| 435 | const auto res = | ||
| 436 | Core::System::GetInstance().CpuCore(i).Scheduler().GetNextSuggestedThread(core); | ||
| 437 | if (res != nullptr) { | ||
| 438 | suggested_thread = res; | ||
| 439 | break; | ||
| 440 | } | ||
| 441 | } | ||
| 442 | |||
| 443 | if (suggested_thread != nullptr) | ||
| 444 | suggested_thread->ChangeCore(core, suggested_thread->GetAffinityMask()); | ||
| 445 | } | ||
| 446 | |||
| 447 | void Thread::YieldAndWaitForLoadBalancing() { | ||
| 448 | UNIMPLEMENTED_MSG("Wait for load balancing thread yield type is not implemented!"); | ||
| 449 | } | ||
| 450 | |||
| 451 | //////////////////////////////////////////////////////////////////////////////////////////////////// | 391 | //////////////////////////////////////////////////////////////////////////////////////////////////// |
| 452 | 392 | ||
| 453 | /** | 393 | /** |
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index e97434dd8..77aec099a 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h | |||
| @@ -371,10 +371,6 @@ public: | |||
| 371 | return affinity_mask; | 371 | return affinity_mask; |
| 372 | } | 372 | } |
| 373 | 373 | ||
| 374 | void YieldNormal(); | ||
| 375 | void YieldWithLoadBalancing(); | ||
| 376 | void YieldAndWaitForLoadBalancing(); | ||
| 377 | |||
| 378 | private: | 374 | private: |
| 379 | explicit Thread(KernelCore& kernel); | 375 | explicit Thread(KernelCore& kernel); |
| 380 | ~Thread() override; | 376 | ~Thread() override; |