diff options
| author | 2018-11-18 23:44:19 -0500 | |
|---|---|---|
| committer | 2018-11-18 23:44:19 -0500 | |
| commit | 409dcf0e0aecfdb676fd3b64223a25e47c1b1c1a (patch) | |
| tree | ccb9eae7c7e8b93760f3087fb6e67a13cbb24f2c /src | |
| parent | Merge pull request #1717 from FreddyFunk/swizzle-gob (diff) | |
| download | yuzu-409dcf0e0aecfdb676fd3b64223a25e47c1b1c1a.tar.gz yuzu-409dcf0e0aecfdb676fd3b64223a25e47c1b1c1a.tar.xz yuzu-409dcf0e0aecfdb676fd3b64223a25e47c1b1c1a.zip | |
svc: Implement yield types 0 and -1
Diffstat (limited to 'src')
| -rw-r--r-- | src/common/thread_queue_list.h | 16 | ||||
| -rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 18 | ||||
| -rw-r--r-- | src/core/hle/kernel/scheduler.h | 6 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc.cpp | 27 | ||||
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 60 | ||||
| -rw-r--r-- | src/core/hle/kernel/thread.h | 5 |
6 files changed, 130 insertions, 2 deletions
diff --git a/src/common/thread_queue_list.h b/src/common/thread_queue_list.h index 133122c5f..323eab97c 100644 --- a/src/common/thread_queue_list.h +++ b/src/common/thread_queue_list.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | 6 | ||
| 7 | #include <array> | 7 | #include <array> |
| 8 | #include <deque> | 8 | #include <deque> |
| 9 | #include <functional> | ||
| 9 | #include <boost/range/algorithm_ext/erase.hpp> | 10 | #include <boost/range/algorithm_ext/erase.hpp> |
| 10 | 11 | ||
| 11 | namespace Common { | 12 | namespace Common { |
| @@ -49,6 +50,21 @@ struct ThreadQueueList { | |||
| 49 | return T(); | 50 | return T(); |
| 50 | } | 51 | } |
| 51 | 52 | ||
| 53 | T get_first_filter(std::function<bool(T)> filter) const { | ||
| 54 | const Queue* cur = first; | ||
| 55 | while (cur != nullptr) { | ||
| 56 | if (!cur->data.empty()) { | ||
| 57 | for (const auto& item : cur->data) { | ||
| 58 | if (filter(item)) | ||
| 59 | return item; | ||
| 60 | } | ||
| 61 | } | ||
| 62 | cur = cur->next_nonempty; | ||
| 63 | } | ||
| 64 | |||
| 65 | return T(); | ||
| 66 | } | ||
| 67 | |||
| 52 | T pop_first() { | 68 | T pop_first() { |
| 53 | Queue* cur = first; | 69 | Queue* cur = first; |
| 54 | while (cur != nullptr) { | 70 | while (cur != nullptr) { |
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 5a5f4cef1..fb5e14950 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -169,6 +169,16 @@ void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { | |||
| 169 | ready_queue.remove(priority, thread); | 169 | ready_queue.remove(priority, thread); |
| 170 | } | 170 | } |
| 171 | 171 | ||
| 172 | void Scheduler::RescheduleThread(Thread* thread, u32 priority) { | ||
| 173 | std::lock_guard<std::mutex> lock(scheduler_mutex); | ||
| 174 | |||
| 175 | // Thread is not in queue | ||
| 176 | ASSERT(ready_queue.contains(thread) != -1); | ||
| 177 | |||
| 178 | ready_queue.remove(priority, thread); | ||
| 179 | ready_queue.push_back(priority, thread); | ||
| 180 | } | ||
| 181 | |||
| 172 | void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { | 182 | void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { |
| 173 | std::lock_guard<std::mutex> lock(scheduler_mutex); | 183 | std::lock_guard<std::mutex> lock(scheduler_mutex); |
| 174 | 184 | ||
| @@ -179,4 +189,12 @@ void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { | |||
| 179 | ready_queue.prepare(priority); | 189 | ready_queue.prepare(priority); |
| 180 | } | 190 | } |
| 181 | 191 | ||
| 192 | Thread* Scheduler::GetNextSuggestedThread(u32 core) { | ||
| 193 | std::lock_guard<std::mutex> lock(scheduler_mutex); | ||
| 194 | |||
| 195 | const auto mask = 1 << core; | ||
| 196 | return ready_queue.get_first_filter( | ||
| 197 | [&mask](Thread* thread) { return (thread->GetAffinityMask() & mask) != 0; }); | ||
| 198 | } | ||
| 199 | |||
| 182 | } // namespace Kernel | 200 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index c63032b7d..8444afdbc 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h | |||
| @@ -48,9 +48,15 @@ public: | |||
| 48 | /// Unschedules a thread that was already scheduled | 48 | /// Unschedules a thread that was already scheduled |
| 49 | void UnscheduleThread(Thread* thread, u32 priority); | 49 | void UnscheduleThread(Thread* thread, u32 priority); |
| 50 | 50 | ||
| 51 | /// Moves a thread to the back of the current priority queue | ||
| 52 | void RescheduleThread(Thread* thread, u32 priority); | ||
| 53 | |||
| 51 | /// Sets the priority of a thread in the scheduler | 54 | /// Sets the priority of a thread in the scheduler |
| 52 | void SetThreadPriority(Thread* thread, u32 priority); | 55 | void SetThreadPriority(Thread* thread, u32 priority); |
| 53 | 56 | ||
| 57 | /// Gets the next suggested thread for load balancing | ||
| 58 | Thread* GetNextSuggestedThread(u32 core); | ||
| 59 | |||
| 54 | /// Returns a list of all threads managed by the scheduler | 60 | /// Returns a list of all threads managed by the scheduler |
| 55 | const std::vector<SharedPtr<Thread>>& GetThreadList() const { | 61 | const std::vector<SharedPtr<Thread>>& GetThreadList() const { |
| 56 | return thread_list; | 62 | return thread_list; |
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 75dbfc31d..467575c93 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -962,16 +962,39 @@ static void SleepThread(s64 nanoseconds) { | |||
| 962 | 962 | ||
| 963 | // Don't attempt to yield execution if there are no available threads to run, | 963 | // Don't attempt to yield execution if there are no available threads to run, |
| 964 | // this way we avoid a useless reschedule to the idle thread. | 964 | // this way we avoid a useless reschedule to the idle thread. |
| 965 | if (nanoseconds == 0 && !Core::System::GetInstance().CurrentScheduler().HaveReadyThreads()) | 965 | if (!Core::System::GetInstance().CurrentScheduler().HaveReadyThreads()) |
| 966 | return; | 966 | return; |
| 967 | 967 | ||
| 968 | if (nanoseconds <= 0) { | ||
| 969 | switch (nanoseconds) { | ||
| 970 | case 0: | ||
| 971 | GetCurrentThread()->YieldNormal(); | ||
| 972 | break; | ||
| 973 | case -1: | ||
| 974 | GetCurrentThread()->YieldWithLoadBalancing(); | ||
| 975 | break; | ||
| 976 | case -2: | ||
| 977 | GetCurrentThread()->YieldAndWaitForLoadBalancing(); | ||
| 978 | break; | ||
| 979 | default: | ||
| 980 | UNREACHABLE_MSG( | ||
| 981 | "Unimplemented sleep yield type '{:016X}'! Falling back to forced reschedule...", | ||
| 982 | nanoseconds); | ||
| 983 | } | ||
| 984 | |||
| 985 | nanoseconds = 0; | ||
| 986 | } | ||
| 987 | |||
| 968 | // Sleep current thread and check for next thread to schedule | 988 | // Sleep current thread and check for next thread to schedule |
| 969 | WaitCurrentThread_Sleep(); | 989 | WaitCurrentThread_Sleep(); |
| 970 | 990 | ||
| 971 | // Create an event to wake the thread up after the specified nanosecond delay has passed | 991 | // Create an event to wake the thread up after the specified nanosecond delay has passed |
| 972 | GetCurrentThread()->WakeAfterDelay(nanoseconds); | 992 | GetCurrentThread()->WakeAfterDelay(nanoseconds); |
| 973 | 993 | ||
| 974 | Core::System::GetInstance().PrepareReschedule(); | 994 | Core::System::GetInstance().CpuCore(0).PrepareReschedule(); |
| 995 | Core::System::GetInstance().CpuCore(1).PrepareReschedule(); | ||
| 996 | Core::System::GetInstance().CpuCore(2).PrepareReschedule(); | ||
| 997 | Core::System::GetInstance().CpuCore(3).PrepareReschedule(); | ||
| 975 | } | 998 | } |
| 976 | 999 | ||
| 977 | /// Wait process wide key atomic | 1000 | /// Wait process wide key atomic |
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 4ffb76818..ddc4da1c0 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -388,6 +388,66 @@ bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> t | |||
| 388 | return wakeup_callback(reason, std::move(thread), std::move(object), index); | 388 | return wakeup_callback(reason, std::move(thread), std::move(object), index); |
| 389 | } | 389 | } |
| 390 | 390 | ||
| 391 | void Thread::YieldNormal() { | ||
| 392 | // Avoid yielding if the thread isn't even running. | ||
| 393 | if (status != ThreadStatus::Running) { | ||
| 394 | return; | ||
| 395 | } | ||
| 396 | |||
| 397 | if (nominal_priority < THREADPRIO_COUNT) { | ||
| 398 | scheduler->RescheduleThread(this, nominal_priority); | ||
| 399 | scheduler->Reschedule(); | ||
| 400 | } | ||
| 401 | } | ||
| 402 | |||
| 403 | void Thread::YieldWithLoadBalancing() { | ||
| 404 | auto priority = nominal_priority; | ||
| 405 | auto core = processor_id; | ||
| 406 | |||
| 407 | // Avoid yielding if the thread isn't even running. | ||
| 408 | if (status != ThreadStatus::Running) { | ||
| 409 | Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule(); | ||
| 410 | return; | ||
| 411 | } | ||
| 412 | |||
| 413 | SharedPtr<Thread> next; | ||
| 414 | const auto& threads = scheduler->GetThreadList(); | ||
| 415 | |||
| 416 | if (priority < THREADPRIO_COUNT) { | ||
| 417 | // Reschedule thread to end of queue. | ||
| 418 | scheduler->RescheduleThread(this, priority); | ||
| 419 | |||
| 420 | const auto iter = std::find_if(threads.begin(), threads.end(), | ||
| 421 | [&priority](const SharedPtr<Thread>& thread) { | ||
| 422 | return thread->GetNominalPriority() == priority; | ||
| 423 | }); | ||
| 424 | |||
| 425 | if (iter != threads.end()) | ||
| 426 | next = iter->get(); | ||
| 427 | } | ||
| 428 | |||
| 429 | Thread* suggested_thread = nullptr; | ||
| 430 | |||
| 431 | for (int i = 0; i < 4; ++i) { | ||
| 432 | if (i == core) | ||
| 433 | continue; | ||
| 434 | |||
| 435 | const auto res = | ||
| 436 | Core::System::GetInstance().CpuCore(i).Scheduler().GetNextSuggestedThread(core); | ||
| 437 | if (res != nullptr) { | ||
| 438 | suggested_thread = res; | ||
| 439 | break; | ||
| 440 | } | ||
| 441 | } | ||
| 442 | |||
| 443 | if (suggested_thread != nullptr) | ||
| 444 | suggested_thread->ChangeCore(core, suggested_thread->GetAffinityMask()); | ||
| 445 | } | ||
| 446 | |||
| 447 | void Thread::YieldAndWaitForLoadBalancing() { | ||
| 448 | UNIMPLEMENTED_MSG("Wait for load balancing thread yield type is not implemented!"); | ||
| 449 | } | ||
| 450 | |||
| 391 | //////////////////////////////////////////////////////////////////////////////////////////////////// | 451 | //////////////////////////////////////////////////////////////////////////////////////////////////// |
| 392 | 452 | ||
| 393 | /** | 453 | /** |
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index d384d50db..e97434dd8 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h | |||
| @@ -26,6 +26,7 @@ enum ThreadPriority : u32 { | |||
| 26 | THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps | 26 | THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps |
| 27 | THREADPRIO_DEFAULT = 44, ///< Default thread priority for userland apps | 27 | THREADPRIO_DEFAULT = 44, ///< Default thread priority for userland apps |
| 28 | THREADPRIO_LOWEST = 63, ///< Lowest thread priority | 28 | THREADPRIO_LOWEST = 63, ///< Lowest thread priority |
| 29 | THREADPRIO_COUNT = 64, ///< Total number of possible thread priorities. | ||
| 29 | }; | 30 | }; |
| 30 | 31 | ||
| 31 | enum ThreadProcessorId : s32 { | 32 | enum ThreadProcessorId : s32 { |
| @@ -370,6 +371,10 @@ public: | |||
| 370 | return affinity_mask; | 371 | return affinity_mask; |
| 371 | } | 372 | } |
| 372 | 373 | ||
| 374 | void YieldNormal(); | ||
| 375 | void YieldWithLoadBalancing(); | ||
| 376 | void YieldAndWaitForLoadBalancing(); | ||
| 377 | |||
| 373 | private: | 378 | private: |
| 374 | explicit Thread(KernelCore& kernel); | 379 | explicit Thread(KernelCore& kernel); |
| 375 | ~Thread() override; | 380 | ~Thread() override; |