summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/scheduler.cpp
diff options
context:
space:
mode:
authorGravatar bunnei2020-06-28 12:37:50 -0400
committerGravatar GitHub2020-06-28 12:37:50 -0400
commitb05795d704e0c194215f815a5703db09e524b59a (patch)
treeecf4023b4ee0c91555c1d8263762fcb9dcb04a17 /src/core/hle/kernel/scheduler.cpp
parentMerge pull request #4196 from ogniK5377/nrr-nro-fixes (diff)
parentCore/Common: Address Feedback. (diff)
downloadyuzu-b05795d704e0c194215f815a5703db09e524b59a.tar.gz
yuzu-b05795d704e0c194215f815a5703db09e524b59a.tar.xz
yuzu-b05795d704e0c194215f815a5703db09e524b59a.zip
Merge pull request #3955 from FernandoS27/prometheus-2b
Remake Kernel Scheduling, CPU Management & Boot Management (Prometheus)
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
-rw-r--r--src/core/hle/kernel/scheduler.cpp576
1 files changed, 443 insertions, 133 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 1140c72a3..2b12c0dbf 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -11,11 +11,15 @@
11#include <utility> 11#include <utility>
12 12
13#include "common/assert.h" 13#include "common/assert.h"
14#include "common/bit_util.h"
15#include "common/fiber.h"
14#include "common/logging/log.h" 16#include "common/logging/log.h"
15#include "core/arm/arm_interface.h" 17#include "core/arm/arm_interface.h"
16#include "core/core.h" 18#include "core/core.h"
17#include "core/core_timing.h" 19#include "core/core_timing.h"
20#include "core/cpu_manager.h"
18#include "core/hle/kernel/kernel.h" 21#include "core/hle/kernel/kernel.h"
22#include "core/hle/kernel/physical_core.h"
19#include "core/hle/kernel/process.h" 23#include "core/hle/kernel/process.h"
20#include "core/hle/kernel/scheduler.h" 24#include "core/hle/kernel/scheduler.h"
21#include "core/hle/kernel/time_manager.h" 25#include "core/hle/kernel/time_manager.h"
@@ -27,103 +31,151 @@ GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {}
27GlobalScheduler::~GlobalScheduler() = default; 31GlobalScheduler::~GlobalScheduler() = default;
28 32
29void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) { 33void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) {
34 global_list_guard.lock();
30 thread_list.push_back(std::move(thread)); 35 thread_list.push_back(std::move(thread));
36 global_list_guard.unlock();
31} 37}
32 38
33void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) { 39void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) {
40 global_list_guard.lock();
34 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), 41 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
35 thread_list.end()); 42 thread_list.end());
43 global_list_guard.unlock();
36} 44}
37 45
38void GlobalScheduler::UnloadThread(std::size_t core) { 46u32 GlobalScheduler::SelectThreads() {
39 Scheduler& sched = kernel.Scheduler(core); 47 ASSERT(is_locked);
40 sched.UnloadThread();
41}
42
43void GlobalScheduler::SelectThread(std::size_t core) {
44 const auto update_thread = [](Thread* thread, Scheduler& sched) { 48 const auto update_thread = [](Thread* thread, Scheduler& sched) {
45 if (thread != sched.selected_thread.get()) { 49 sched.guard.lock();
50 if (thread != sched.selected_thread_set.get()) {
46 if (thread == nullptr) { 51 if (thread == nullptr) {
47 ++sched.idle_selection_count; 52 ++sched.idle_selection_count;
48 } 53 }
49 sched.selected_thread = SharedFrom(thread); 54 sched.selected_thread_set = SharedFrom(thread);
50 } 55 }
51 sched.is_context_switch_pending = sched.selected_thread != sched.current_thread; 56 const bool reschedule_pending =
57 sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread);
58 sched.is_context_switch_pending = reschedule_pending;
52 std::atomic_thread_fence(std::memory_order_seq_cst); 59 std::atomic_thread_fence(std::memory_order_seq_cst);
60 sched.guard.unlock();
61 return reschedule_pending;
53 }; 62 };
54 Scheduler& sched = kernel.Scheduler(core); 63 if (!is_reselection_pending.load()) {
55 Thread* current_thread = nullptr; 64 return 0;
56 // Step 1: Get top thread in schedule queue.
57 current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
58 if (current_thread) {
59 update_thread(current_thread, sched);
60 return;
61 } 65 }
62 // Step 2: Try selecting a suggested thread. 66 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> top_threads{};
63 Thread* winner = nullptr; 67
64 std::set<s32> sug_cores; 68 u32 idle_cores{};
65 for (auto thread : suggested_queue[core]) { 69
66 s32 this_core = thread->GetProcessorID(); 70 // Step 1: Get top thread in schedule queue.
67 Thread* thread_on_core = nullptr; 71 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
68 if (this_core >= 0) { 72 Thread* top_thread =
69 thread_on_core = scheduled_queue[this_core].front(); 73 scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
70 } 74 if (top_thread != nullptr) {
71 if (this_core < 0 || thread != thread_on_core) { 75 // TODO(Blinkhawk): Implement Thread Pinning
72 winner = thread; 76 } else {
73 break; 77 idle_cores |= (1ul << core);
74 } 78 }
75 sug_cores.insert(this_core); 79 top_threads[core] = top_thread;
76 } 80 }
77 // if we got a suggested thread, select it, else do a second pass. 81
78 if (winner && winner->GetPriority() > 2) { 82 while (idle_cores != 0) {
79 if (winner->IsRunning()) { 83 u32 core_id = Common::CountTrailingZeroes32(idle_cores);
80 UnloadThread(static_cast<u32>(winner->GetProcessorID())); 84
85 if (!suggested_queue[core_id].empty()) {
86 std::array<s32, Core::Hardware::NUM_CPU_CORES> migration_candidates{};
87 std::size_t num_candidates = 0;
88 auto iter = suggested_queue[core_id].begin();
89 Thread* suggested = nullptr;
90 // Step 2: Try selecting a suggested thread.
91 while (iter != suggested_queue[core_id].end()) {
92 suggested = *iter;
93 iter++;
94 s32 suggested_core_id = suggested->GetProcessorID();
95 Thread* top_thread =
96 suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr;
97 if (top_thread != suggested) {
98 if (top_thread != nullptr &&
99 top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) {
100 suggested = nullptr;
101 break;
102 // There's a too high thread to do core migration, cancel
103 }
104 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested);
105 break;
106 }
107 suggested = nullptr;
108 migration_candidates[num_candidates++] = suggested_core_id;
109 }
110 // Step 3: Select a suggested thread from another core
111 if (suggested == nullptr) {
112 for (std::size_t i = 0; i < num_candidates; i++) {
113 s32 candidate_core = migration_candidates[i];
114 suggested = top_threads[candidate_core];
115 auto it = scheduled_queue[candidate_core].begin();
116 it++;
117 Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr;
118 if (next != nullptr) {
119 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id),
120 suggested);
121 top_threads[candidate_core] = next;
122 break;
123 } else {
124 suggested = nullptr;
125 }
126 }
127 }
128 top_threads[core_id] = suggested;
81 } 129 }
82 TransferToCore(winner->GetPriority(), static_cast<s32>(core), winner); 130
83 update_thread(winner, sched); 131 idle_cores &= ~(1ul << core_id);
84 return;
85 } 132 }
86 // Step 3: Select a suggested thread from another core 133 u32 cores_needing_context_switch{};
87 for (auto& src_core : sug_cores) { 134 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
88 auto it = scheduled_queue[src_core].begin(); 135 Scheduler& sched = kernel.Scheduler(core);
89 it++; 136 ASSERT(top_threads[core] == nullptr || top_threads[core]->GetProcessorID() == core);
90 if (it != scheduled_queue[src_core].end()) { 137 if (update_thread(top_threads[core], sched)) {
91 Thread* thread_on_core = scheduled_queue[src_core].front(); 138 cores_needing_context_switch |= (1ul << core);
92 Thread* to_change = *it;
93 if (thread_on_core->IsRunning() || to_change->IsRunning()) {
94 UnloadThread(static_cast<u32>(src_core));
95 }
96 TransferToCore(thread_on_core->GetPriority(), static_cast<s32>(core), thread_on_core);
97 current_thread = thread_on_core;
98 break;
99 } 139 }
100 } 140 }
101 update_thread(current_thread, sched); 141 return cores_needing_context_switch;
102} 142}
103 143
104bool GlobalScheduler::YieldThread(Thread* yielding_thread) { 144bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
145 ASSERT(is_locked);
105 // Note: caller should use critical section, etc. 146 // Note: caller should use critical section, etc.
147 if (!yielding_thread->IsRunnable()) {
148 // Normally this case shouldn't happen except for SetThreadActivity.
149 is_reselection_pending.store(true, std::memory_order_release);
150 return false;
151 }
106 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); 152 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
107 const u32 priority = yielding_thread->GetPriority(); 153 const u32 priority = yielding_thread->GetPriority();
108 154
109 // Yield the thread 155 // Yield the thread
110 const Thread* const winner = scheduled_queue[core_id].front(priority); 156 Reschedule(priority, core_id, yielding_thread);
111 ASSERT_MSG(yielding_thread == winner, "Thread yielding without being in front"); 157 const Thread* const winner = scheduled_queue[core_id].front();
112 scheduled_queue[core_id].yield(priority); 158 if (kernel.GetCurrentHostThreadID() != core_id) {
159 is_reselection_pending.store(true, std::memory_order_release);
160 }
113 161
114 return AskForReselectionOrMarkRedundant(yielding_thread, winner); 162 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
115} 163}
116 164
117bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { 165bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
166 ASSERT(is_locked);
118 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, 167 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
119 // etc. 168 // etc.
169 if (!yielding_thread->IsRunnable()) {
170 // Normally this case shouldn't happen except for SetThreadActivity.
171 is_reselection_pending.store(true, std::memory_order_release);
172 return false;
173 }
120 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); 174 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
121 const u32 priority = yielding_thread->GetPriority(); 175 const u32 priority = yielding_thread->GetPriority();
122 176
123 // Yield the thread 177 // Yield the thread
124 ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority), 178 Reschedule(priority, core_id, yielding_thread);
125 "Thread yielding without being in front");
126 scheduled_queue[core_id].yield(priority);
127 179
128 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads; 180 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
129 for (std::size_t i = 0; i < current_threads.size(); i++) { 181 for (std::size_t i = 0; i < current_threads.size(); i++) {
@@ -153,21 +205,28 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
153 205
154 if (winner != nullptr) { 206 if (winner != nullptr) {
155 if (winner != yielding_thread) { 207 if (winner != yielding_thread) {
156 if (winner->IsRunning()) {
157 UnloadThread(static_cast<u32>(winner->GetProcessorID()));
158 }
159 TransferToCore(winner->GetPriority(), s32(core_id), winner); 208 TransferToCore(winner->GetPriority(), s32(core_id), winner);
160 } 209 }
161 } else { 210 } else {
162 winner = next_thread; 211 winner = next_thread;
163 } 212 }
164 213
214 if (kernel.GetCurrentHostThreadID() != core_id) {
215 is_reselection_pending.store(true, std::memory_order_release);
216 }
217
165 return AskForReselectionOrMarkRedundant(yielding_thread, winner); 218 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
166} 219}
167 220
168bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) { 221bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
222 ASSERT(is_locked);
169 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, 223 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
170 // etc. 224 // etc.
225 if (!yielding_thread->IsRunnable()) {
226 // Normally this case shouldn't happen except for SetThreadActivity.
227 is_reselection_pending.store(true, std::memory_order_release);
228 return false;
229 }
171 Thread* winner = nullptr; 230 Thread* winner = nullptr;
172 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); 231 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
173 232
@@ -195,25 +254,31 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
195 } 254 }
196 if (winner != nullptr) { 255 if (winner != nullptr) {
197 if (winner != yielding_thread) { 256 if (winner != yielding_thread) {
198 if (winner->IsRunning()) {
199 UnloadThread(static_cast<u32>(winner->GetProcessorID()));
200 }
201 TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner); 257 TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner);
202 } 258 }
203 } else { 259 } else {
204 winner = yielding_thread; 260 winner = yielding_thread;
205 } 261 }
262 } else {
263 winner = scheduled_queue[core_id].front();
264 }
265
266 if (kernel.GetCurrentHostThreadID() != core_id) {
267 is_reselection_pending.store(true, std::memory_order_release);
206 } 268 }
207 269
208 return AskForReselectionOrMarkRedundant(yielding_thread, winner); 270 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
209} 271}
210 272
211void GlobalScheduler::PreemptThreads() { 273void GlobalScheduler::PreemptThreads() {
274 ASSERT(is_locked);
212 for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { 275 for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
213 const u32 priority = preemption_priorities[core_id]; 276 const u32 priority = preemption_priorities[core_id];
214 277
215 if (scheduled_queue[core_id].size(priority) > 0) { 278 if (scheduled_queue[core_id].size(priority) > 0) {
216 scheduled_queue[core_id].front(priority)->IncrementYieldCount(); 279 if (scheduled_queue[core_id].size(priority) > 1) {
280 scheduled_queue[core_id].front(priority)->IncrementYieldCount();
281 }
217 scheduled_queue[core_id].yield(priority); 282 scheduled_queue[core_id].yield(priority);
218 if (scheduled_queue[core_id].size(priority) > 1) { 283 if (scheduled_queue[core_id].size(priority) > 1) {
219 scheduled_queue[core_id].front(priority)->IncrementYieldCount(); 284 scheduled_queue[core_id].front(priority)->IncrementYieldCount();
@@ -247,9 +312,6 @@ void GlobalScheduler::PreemptThreads() {
247 } 312 }
248 313
249 if (winner != nullptr) { 314 if (winner != nullptr) {
250 if (winner->IsRunning()) {
251 UnloadThread(static_cast<u32>(winner->GetProcessorID()));
252 }
253 TransferToCore(winner->GetPriority(), s32(core_id), winner); 315 TransferToCore(winner->GetPriority(), s32(core_id), winner);
254 current_thread = 316 current_thread =
255 winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread; 317 winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread;
@@ -280,9 +342,6 @@ void GlobalScheduler::PreemptThreads() {
280 } 342 }
281 343
282 if (winner != nullptr) { 344 if (winner != nullptr) {
283 if (winner->IsRunning()) {
284 UnloadThread(static_cast<u32>(winner->GetProcessorID()));
285 }
286 TransferToCore(winner->GetPriority(), s32(core_id), winner); 345 TransferToCore(winner->GetPriority(), s32(core_id), winner);
287 current_thread = winner; 346 current_thread = winner;
288 } 347 }
@@ -292,34 +351,65 @@ void GlobalScheduler::PreemptThreads() {
292 } 351 }
293} 352}
294 353
354void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
355 Core::EmuThreadHandle global_thread) {
356 u32 current_core = global_thread.host_handle;
357 bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
358 (current_core < Core::Hardware::NUM_CPU_CORES);
359 while (cores_pending_reschedule != 0) {
360 u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule);
361 ASSERT(core < Core::Hardware::NUM_CPU_CORES);
362 if (!must_context_switch || core != current_core) {
363 auto& phys_core = kernel.PhysicalCore(core);
364 phys_core.Interrupt();
365 } else {
366 must_context_switch = true;
367 }
368 cores_pending_reschedule &= ~(1ul << core);
369 }
370 if (must_context_switch) {
371 auto& core_scheduler = kernel.CurrentScheduler();
372 kernel.ExitSVCProfile();
373 core_scheduler.TryDoContextSwitch();
374 kernel.EnterSVCProfile();
375 }
376}
377
295void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) { 378void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) {
379 ASSERT(is_locked);
296 suggested_queue[core].add(thread, priority); 380 suggested_queue[core].add(thread, priority);
297} 381}
298 382
299void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) { 383void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) {
384 ASSERT(is_locked);
300 suggested_queue[core].remove(thread, priority); 385 suggested_queue[core].remove(thread, priority);
301} 386}
302 387
303void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) { 388void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) {
389 ASSERT(is_locked);
304 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); 390 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
305 scheduled_queue[core].add(thread, priority); 391 scheduled_queue[core].add(thread, priority);
306} 392}
307 393
308void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) { 394void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) {
395 ASSERT(is_locked);
309 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); 396 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
310 scheduled_queue[core].add(thread, priority, false); 397 scheduled_queue[core].add(thread, priority, false);
311} 398}
312 399
313void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) { 400void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) {
401 ASSERT(is_locked);
314 scheduled_queue[core].remove(thread, priority); 402 scheduled_queue[core].remove(thread, priority);
315 scheduled_queue[core].add(thread, priority); 403 scheduled_queue[core].add(thread, priority);
316} 404}
317 405
318void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) { 406void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) {
407 ASSERT(is_locked);
319 scheduled_queue[core].remove(thread, priority); 408 scheduled_queue[core].remove(thread, priority);
320} 409}
321 410
322void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) { 411void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
412 ASSERT(is_locked);
323 const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT; 413 const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
324 const s32 source_core = thread->GetProcessorID(); 414 const s32 source_core = thread->GetProcessorID();
325 if (source_core == destination_core || !schedulable) { 415 if (source_core == destination_core || !schedulable) {
@@ -349,6 +439,108 @@ bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread,
349 } 439 }
350} 440}
351 441
442void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) {
443 if (old_flags == thread->scheduling_state) {
444 return;
445 }
446 ASSERT(is_locked);
447
448 if (old_flags == static_cast<u32>(ThreadSchedStatus::Runnable)) {
449 // In this case the thread was running, now it's pausing/exitting
450 if (thread->processor_id >= 0) {
451 Unschedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
452 }
453
454 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
455 if (core != static_cast<u32>(thread->processor_id) &&
456 ((thread->affinity_mask >> core) & 1) != 0) {
457 Unsuggest(thread->current_priority, core, thread);
458 }
459 }
460 } else if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
461 // The thread is now set to running from being stopped
462 if (thread->processor_id >= 0) {
463 Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
464 }
465
466 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
467 if (core != static_cast<u32>(thread->processor_id) &&
468 ((thread->affinity_mask >> core) & 1) != 0) {
469 Suggest(thread->current_priority, core, thread);
470 }
471 }
472 }
473
474 SetReselectionPending();
475}
476
477void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priority) {
478 if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable)) {
479 return;
480 }
481 ASSERT(is_locked);
482 if (thread->processor_id >= 0) {
483 Unschedule(old_priority, static_cast<u32>(thread->processor_id), thread);
484 }
485
486 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
487 if (core != static_cast<u32>(thread->processor_id) &&
488 ((thread->affinity_mask >> core) & 1) != 0) {
489 Unsuggest(old_priority, core, thread);
490 }
491 }
492
493 if (thread->processor_id >= 0) {
494 if (thread == kernel.CurrentScheduler().GetCurrentThread()) {
495 SchedulePrepend(thread->current_priority, static_cast<u32>(thread->processor_id),
496 thread);
497 } else {
498 Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
499 }
500 }
501
502 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
503 if (core != static_cast<u32>(thread->processor_id) &&
504 ((thread->affinity_mask >> core) & 1) != 0) {
505 Suggest(thread->current_priority, core, thread);
506 }
507 }
508 thread->IncrementYieldCount();
509 SetReselectionPending();
510}
511
512void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask,
513 s32 old_core) {
514 if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable) ||
515 thread->current_priority >= THREADPRIO_COUNT) {
516 return;
517 }
518 ASSERT(is_locked);
519
520 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
521 if (((old_affinity_mask >> core) & 1) != 0) {
522 if (core == static_cast<u32>(old_core)) {
523 Unschedule(thread->current_priority, core, thread);
524 } else {
525 Unsuggest(thread->current_priority, core, thread);
526 }
527 }
528 }
529
530 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
531 if (((thread->affinity_mask >> core) & 1) != 0) {
532 if (core == static_cast<u32>(thread->processor_id)) {
533 Schedule(thread->current_priority, core, thread);
534 } else {
535 Suggest(thread->current_priority, core, thread);
536 }
537 }
538 }
539
540 thread->IncrementYieldCount();
541 SetReselectionPending();
542}
543
352void GlobalScheduler::Shutdown() { 544void GlobalScheduler::Shutdown() {
353 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { 545 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
354 scheduled_queue[core].clear(); 546 scheduled_queue[core].clear();
@@ -359,10 +551,12 @@ void GlobalScheduler::Shutdown() {
359 551
360void GlobalScheduler::Lock() { 552void GlobalScheduler::Lock() {
361 Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID(); 553 Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID();
554 ASSERT(!current_thread.IsInvalid());
362 if (current_thread == current_owner) { 555 if (current_thread == current_owner) {
363 ++scope_lock; 556 ++scope_lock;
364 } else { 557 } else {
365 inner_lock.lock(); 558 inner_lock.lock();
559 is_locked = true;
366 current_owner = current_thread; 560 current_owner = current_thread;
367 ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle()); 561 ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle());
368 scope_lock = 1; 562 scope_lock = 1;
@@ -374,17 +568,18 @@ void GlobalScheduler::Unlock() {
374 ASSERT(scope_lock > 0); 568 ASSERT(scope_lock > 0);
375 return; 569 return;
376 } 570 }
377 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 571 u32 cores_pending_reschedule = SelectThreads();
378 SelectThread(i); 572 Core::EmuThreadHandle leaving_thread = current_owner;
379 }
380 current_owner = Core::EmuThreadHandle::InvalidHandle(); 573 current_owner = Core::EmuThreadHandle::InvalidHandle();
381 scope_lock = 1; 574 scope_lock = 1;
575 is_locked = false;
382 inner_lock.unlock(); 576 inner_lock.unlock();
383 // TODO(Blinkhawk): Setup the interrupts and change context on current core. 577 EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread);
384} 578}
385 579
386Scheduler::Scheduler(Core::System& system, std::size_t core_id) 580Scheduler::Scheduler(Core::System& system, std::size_t core_id) : system(system), core_id(core_id) {
387 : system{system}, core_id{core_id} {} 581 switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this);
582}
388 583
389Scheduler::~Scheduler() = default; 584Scheduler::~Scheduler() = default;
390 585
@@ -393,56 +588,128 @@ bool Scheduler::HaveReadyThreads() const {
393} 588}
394 589
395Thread* Scheduler::GetCurrentThread() const { 590Thread* Scheduler::GetCurrentThread() const {
396 return current_thread.get(); 591 if (current_thread) {
592 return current_thread.get();
593 }
594 return idle_thread.get();
397} 595}
398 596
399Thread* Scheduler::GetSelectedThread() const { 597Thread* Scheduler::GetSelectedThread() const {
400 return selected_thread.get(); 598 return selected_thread.get();
401} 599}
402 600
403void Scheduler::SelectThreads() {
404 system.GlobalScheduler().SelectThread(core_id);
405}
406
407u64 Scheduler::GetLastContextSwitchTicks() const { 601u64 Scheduler::GetLastContextSwitchTicks() const {
408 return last_context_switch_time; 602 return last_context_switch_time;
409} 603}
410 604
411void Scheduler::TryDoContextSwitch() { 605void Scheduler::TryDoContextSwitch() {
606 auto& phys_core = system.Kernel().CurrentPhysicalCore();
607 if (phys_core.IsInterrupted()) {
608 phys_core.ClearInterrupt();
609 }
610 guard.lock();
412 if (is_context_switch_pending) { 611 if (is_context_switch_pending) {
413 SwitchContext(); 612 SwitchContext();
613 } else {
614 guard.unlock();
414 } 615 }
415} 616}
416 617
417void Scheduler::UnloadThread() { 618void Scheduler::OnThreadStart() {
418 Thread* const previous_thread = GetCurrentThread(); 619 SwitchContextStep2();
419 Process* const previous_process = system.Kernel().CurrentProcess(); 620}
420 621
421 UpdateLastContextSwitchTime(previous_thread, previous_process); 622void Scheduler::Unload() {
623 Thread* thread = current_thread.get();
624 if (thread) {
625 thread->SetContinuousOnSVC(false);
626 thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
627 thread->SetIsRunning(false);
628 if (!thread->IsHLEThread() && !thread->HasExited()) {
629 Core::ARM_Interface& cpu_core = thread->ArmInterface();
630 cpu_core.SaveContext(thread->GetContext32());
631 cpu_core.SaveContext(thread->GetContext64());
632 // Save the TPIDR_EL0 system register in case it was modified.
633 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
634 cpu_core.ClearExclusiveState();
635 }
636 thread->context_guard.unlock();
637 }
638}
422 639
423 // Save context for previous thread 640void Scheduler::Reload() {
424 if (previous_thread) { 641 Thread* thread = current_thread.get();
425 system.ArmInterface(core_id).SaveContext(previous_thread->GetContext32()); 642 if (thread) {
426 system.ArmInterface(core_id).SaveContext(previous_thread->GetContext64()); 643 ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
427 // Save the TPIDR_EL0 system register in case it was modified. 644 "Thread must be runnable.");
428 previous_thread->SetTPIDR_EL0(system.ArmInterface(core_id).GetTPIDR_EL0());
429 645
430 if (previous_thread->GetStatus() == ThreadStatus::Running) { 646 // Cancel any outstanding wakeup events for this thread
431 // This is only the case when a reschedule is triggered without the current thread 647 thread->SetIsRunning(true);
432 // yielding execution (i.e. an event triggered, system core time-sliced, etc) 648 thread->SetWasRunning(false);
433 previous_thread->SetStatus(ThreadStatus::Ready); 649 thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
650
651 auto* const thread_owner_process = thread->GetOwnerProcess();
652 if (thread_owner_process != nullptr) {
653 system.Kernel().MakeCurrentProcess(thread_owner_process);
654 }
655 if (!thread->IsHLEThread()) {
656 Core::ARM_Interface& cpu_core = thread->ArmInterface();
657 cpu_core.LoadContext(thread->GetContext32());
658 cpu_core.LoadContext(thread->GetContext64());
659 cpu_core.SetTlsAddress(thread->GetTLSAddress());
660 cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
661 cpu_core.ChangeProcessorID(this->core_id);
662 cpu_core.ClearExclusiveState();
434 } 663 }
435 previous_thread->SetIsRunning(false);
436 } 664 }
437 current_thread = nullptr; 665}
666
667void Scheduler::SwitchContextStep2() {
668 Thread* previous_thread = current_thread_prev.get();
669 Thread* new_thread = selected_thread.get();
670
671 // Load context of new thread
672 Process* const previous_process =
673 previous_thread != nullptr ? previous_thread->GetOwnerProcess() : nullptr;
674
675 if (new_thread) {
676 ASSERT_MSG(new_thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
677 "Thread must be runnable.");
678
679 // Cancel any outstanding wakeup events for this thread
680 new_thread->SetIsRunning(true);
681 new_thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
682 new_thread->SetWasRunning(false);
683
684 auto* const thread_owner_process = current_thread->GetOwnerProcess();
685 if (thread_owner_process != nullptr) {
686 system.Kernel().MakeCurrentProcess(thread_owner_process);
687 }
688 if (!new_thread->IsHLEThread()) {
689 Core::ARM_Interface& cpu_core = new_thread->ArmInterface();
690 cpu_core.LoadContext(new_thread->GetContext32());
691 cpu_core.LoadContext(new_thread->GetContext64());
692 cpu_core.SetTlsAddress(new_thread->GetTLSAddress());
693 cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0());
694 cpu_core.ChangeProcessorID(this->core_id);
695 cpu_core.ClearExclusiveState();
696 }
697 }
698
699 TryDoContextSwitch();
438} 700}
439 701
440void Scheduler::SwitchContext() { 702void Scheduler::SwitchContext() {
441 Thread* const previous_thread = GetCurrentThread(); 703 current_thread_prev = current_thread;
442 Thread* const new_thread = GetSelectedThread(); 704 selected_thread = selected_thread_set;
705 Thread* previous_thread = current_thread_prev.get();
706 Thread* new_thread = selected_thread.get();
707 current_thread = selected_thread;
443 708
444 is_context_switch_pending = false; 709 is_context_switch_pending = false;
710
445 if (new_thread == previous_thread) { 711 if (new_thread == previous_thread) {
712 guard.unlock();
446 return; 713 return;
447 } 714 }
448 715
@@ -452,51 +719,75 @@ void Scheduler::SwitchContext() {
452 719
453 // Save context for previous thread 720 // Save context for previous thread
454 if (previous_thread) { 721 if (previous_thread) {
455 system.ArmInterface(core_id).SaveContext(previous_thread->GetContext32()); 722 if (new_thread != nullptr && new_thread->IsSuspendThread()) {
456 system.ArmInterface(core_id).SaveContext(previous_thread->GetContext64()); 723 previous_thread->SetWasRunning(true);
457 // Save the TPIDR_EL0 system register in case it was modified.
458 previous_thread->SetTPIDR_EL0(system.ArmInterface(core_id).GetTPIDR_EL0());
459
460 if (previous_thread->GetStatus() == ThreadStatus::Running) {
461 // This is only the case when a reschedule is triggered without the current thread
462 // yielding execution (i.e. an event triggered, system core time-sliced, etc)
463 previous_thread->SetStatus(ThreadStatus::Ready);
464 } 724 }
725 previous_thread->SetContinuousOnSVC(false);
726 previous_thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
465 previous_thread->SetIsRunning(false); 727 previous_thread->SetIsRunning(false);
466 } 728 if (!previous_thread->IsHLEThread() && !previous_thread->HasExited()) {
467 729 Core::ARM_Interface& cpu_core = previous_thread->ArmInterface();
468 // Load context of new thread 730 cpu_core.SaveContext(previous_thread->GetContext32());
469 if (new_thread) { 731 cpu_core.SaveContext(previous_thread->GetContext64());
470 ASSERT_MSG(new_thread->GetProcessorID() == s32(this->core_id), 732 // Save the TPIDR_EL0 system register in case it was modified.
471 "Thread must be assigned to this core."); 733 previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
472 ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready, 734 cpu_core.ClearExclusiveState();
473 "Thread must be ready to become running.");
474
475 // Cancel any outstanding wakeup events for this thread
476 new_thread->CancelWakeupTimer();
477 current_thread = SharedFrom(new_thread);
478 new_thread->SetStatus(ThreadStatus::Running);
479 new_thread->SetIsRunning(true);
480
481 auto* const thread_owner_process = current_thread->GetOwnerProcess();
482 if (previous_process != thread_owner_process) {
483 system.Kernel().MakeCurrentProcess(thread_owner_process);
484 } 735 }
736 previous_thread->context_guard.unlock();
737 }
485 738
486 system.ArmInterface(core_id).LoadContext(new_thread->GetContext32()); 739 std::shared_ptr<Common::Fiber>* old_context;
487 system.ArmInterface(core_id).LoadContext(new_thread->GetContext64()); 740 if (previous_thread != nullptr) {
488 system.ArmInterface(core_id).SetTlsAddress(new_thread->GetTLSAddress()); 741 old_context = &previous_thread->GetHostContext();
489 system.ArmInterface(core_id).SetTPIDR_EL0(new_thread->GetTPIDR_EL0());
490 } else { 742 } else {
491 current_thread = nullptr; 743 old_context = &idle_thread->GetHostContext();
492 // Note: We do not reset the current process and current page table when idling because 744 }
493 // technically we haven't changed processes, our threads are just paused. 745 guard.unlock();
746
747 Common::Fiber::YieldTo(*old_context, switch_fiber);
748 /// When a thread wakes up, the scheduler may have changed to other in another core.
749 auto& next_scheduler = system.Kernel().CurrentScheduler();
750 next_scheduler.SwitchContextStep2();
751}
752
753void Scheduler::OnSwitch(void* this_scheduler) {
754 Scheduler* sched = static_cast<Scheduler*>(this_scheduler);
755 sched->SwitchToCurrent();
756}
757
758void Scheduler::SwitchToCurrent() {
759 while (true) {
760 guard.lock();
761 selected_thread = selected_thread_set;
762 current_thread = selected_thread;
763 is_context_switch_pending = false;
764 guard.unlock();
765 while (!is_context_switch_pending) {
766 if (current_thread != nullptr && !current_thread->IsHLEThread()) {
767 current_thread->context_guard.lock();
768 if (!current_thread->IsRunnable()) {
769 current_thread->context_guard.unlock();
770 break;
771 }
772 if (current_thread->GetProcessorID() != core_id) {
773 current_thread->context_guard.unlock();
774 break;
775 }
776 }
777 std::shared_ptr<Common::Fiber>* next_context;
778 if (current_thread != nullptr) {
779 next_context = &current_thread->GetHostContext();
780 } else {
781 next_context = &idle_thread->GetHostContext();
782 }
783 Common::Fiber::YieldTo(switch_fiber, *next_context);
784 }
494 } 785 }
495} 786}
496 787
497void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { 788void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
498 const u64 prev_switch_ticks = last_context_switch_time; 789 const u64 prev_switch_ticks = last_context_switch_time;
499 const u64 most_recent_switch_ticks = system.CoreTiming().GetTicks(); 790 const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
500 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; 791 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
501 792
502 if (thread != nullptr) { 793 if (thread != nullptr) {
@@ -510,6 +801,16 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
510 last_context_switch_time = most_recent_switch_ticks; 801 last_context_switch_time = most_recent_switch_ticks;
511} 802}
512 803
804void Scheduler::Initialize() {
805 std::string name = "Idle Thread Id:" + std::to_string(core_id);
806 std::function<void(void*)> init_func = system.GetCpuManager().GetIdleThreadStartFunc();
807 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
808 ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
809 auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
810 nullptr, std::move(init_func), init_func_parameter);
811 idle_thread = std::move(thread_res).Unwrap();
812}
813
513void Scheduler::Shutdown() { 814void Scheduler::Shutdown() {
514 current_thread = nullptr; 815 current_thread = nullptr;
515 selected_thread = nullptr; 816 selected_thread = nullptr;
@@ -538,4 +839,13 @@ SchedulerLockAndSleep::~SchedulerLockAndSleep() {
538 time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); 839 time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
539} 840}
540 841
842void SchedulerLockAndSleep::Release() {
843 if (sleep_cancelled) {
844 return;
845 }
846 auto& time_manager = kernel.TimeManager();
847 time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
848 sleep_cancelled = true;
849}
850
541} // namespace Kernel 851} // namespace Kernel