summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/scheduler.cpp
diff options
context:
space:
mode:
authorGravatar Zach Hilman2018-12-03 17:29:21 -0500
committerGravatar Zach Hilman2018-12-03 17:29:30 -0500
commitb5af41a07bebc0a378428e7d7ddc68c9c750d2d1 (patch)
tree97d4e53060fda78d6445ddd99ad24b36b38cec14 /src/core/hle/kernel/scheduler.cpp
parentsvc: Avoid performance-degrading unnecessary reschedule (diff)
downloadyuzu-b5af41a07bebc0a378428e7d7ddc68c9c750d2d1.tar.gz
yuzu-b5af41a07bebc0a378428e7d7ddc68c9c750d2d1.tar.xz
yuzu-b5af41a07bebc0a378428e7d7ddc68c9c750d2d1.zip
scheduler: Only work steal higher priority threads from other cores
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
-rw-r--r--src/core/hle/kernel/scheduler.cpp38
1 files changed, 17 insertions, 21 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index efe3551e2..c6b7d5232 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -170,16 +170,6 @@ void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
170 ready_queue.remove(priority, thread); 170 ready_queue.remove(priority, thread);
171} 171}
172 172
173void Scheduler::MoveThreadToBackOfPriorityQueue(Thread* thread, u32 priority) {
174 std::lock_guard<std::mutex> lock(scheduler_mutex);
175
176 // Thread is not in queue
177 ASSERT(ready_queue.contains(thread) != -1);
178
179 ready_queue.remove(priority, thread);
180 ready_queue.push_back(priority, thread);
181}
182
183void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { 173void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
184 std::lock_guard<std::mutex> lock(scheduler_mutex); 174 std::lock_guard<std::mutex> lock(scheduler_mutex);
185 175
@@ -190,12 +180,13 @@ void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
190 ready_queue.prepare(priority); 180 ready_queue.prepare(priority);
191} 181}
192 182
193Thread* Scheduler::GetNextSuggestedThread(u32 core) const { 183Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const {
194 std::lock_guard<std::mutex> lock(scheduler_mutex); 184 std::lock_guard<std::mutex> lock(scheduler_mutex);
195 185
196 const u32 mask = 1U << core; 186 const u32 mask = 1U << core;
197 return ready_queue.get_first_filter( 187 return ready_queue.get_first_filter([mask, maximum_priority](Thread const* thread) {
198 [mask](Thread const* thread) { return (thread->GetAffinityMask() & mask) != 0; }); 188 return (thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority;
189 });
199} 190}
200 191
201void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { 192void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
@@ -206,9 +197,10 @@ void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
206 // Sanity check that the priority is valid 197 // Sanity check that the priority is valid
207 ASSERT(thread->GetPriority() < THREADPRIO_COUNT); 198 ASSERT(thread->GetPriority() < THREADPRIO_COUNT);
208 199
209 // Yield this thread 200 // Yield this thread -- sleep for zero time and force reschedule to different thread
201 WaitCurrentThread_Sleep();
202 GetCurrentThread()->WakeAfterDelay(0);
210 Reschedule(); 203 Reschedule();
211 MoveThreadToBackOfPriorityQueue(thread, thread->GetPriority());
212} 204}
213 205
214void Scheduler::YieldWithLoadBalancing(Thread* thread) { 206void Scheduler::YieldWithLoadBalancing(Thread* thread) {
@@ -222,9 +214,9 @@ void Scheduler::YieldWithLoadBalancing(Thread* thread) {
222 // Sanity check that the priority is valid 214 // Sanity check that the priority is valid
223 ASSERT(priority < THREADPRIO_COUNT); 215 ASSERT(priority < THREADPRIO_COUNT);
224 216
225 // Reschedule thread to end of queue. 217 // Sleep for zero time to be able to force reschedule to different thread
226 Reschedule(); 218 WaitCurrentThread_Sleep();
227 MoveThreadToBackOfPriorityQueue(thread, priority); 219 GetCurrentThread()->WakeAfterDelay(0);
228 220
229 Thread* suggested_thread = nullptr; 221 Thread* suggested_thread = nullptr;
230 222
@@ -235,16 +227,20 @@ void Scheduler::YieldWithLoadBalancing(Thread* thread) {
235 continue; 227 continue;
236 228
237 const auto res = 229 const auto res =
238 Core::System::GetInstance().CpuCore(cur_core).Scheduler().GetNextSuggestedThread(core); 230 Core::System::GetInstance().CpuCore(cur_core).Scheduler().GetNextSuggestedThread(
239 if (res != nullptr) { 231 core, priority);
232 if (res != nullptr &&
233 (suggested_thread == nullptr || suggested_thread->GetPriority() > res->GetPriority())) {
240 suggested_thread = res; 234 suggested_thread = res;
241 break;
242 } 235 }
243 } 236 }
244 237
245 // If a suggested thread was found, queue that for this core 238 // If a suggested thread was found, queue that for this core
246 if (suggested_thread != nullptr) 239 if (suggested_thread != nullptr)
247 suggested_thread->ChangeCore(core, suggested_thread->GetAffinityMask()); 240 suggested_thread->ChangeCore(core, suggested_thread->GetAffinityMask());
241
242 // Perform actual yielding.
243 Reschedule();
248} 244}
249 245
250void Scheduler::YieldAndWaitForLoadBalancing(Thread* thread) { 246void Scheduler::YieldAndWaitForLoadBalancing(Thread* thread) {