summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/hle/kernel/scheduler.cpp449
-rw-r--r--src/core/hle/kernel/scheduler.h220
2 files changed, 411 insertions, 258 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index e8447b69a..878aeed6d 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -3,6 +3,8 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <algorithm> 5#include <algorithm>
6#include <set>
7#include <unordered_set>
6#include <utility> 8#include <utility>
7 9
8#include "common/assert.h" 10#include "common/assert.h"
@@ -17,57 +19,314 @@
17 19
18namespace Kernel { 20namespace Kernel {
19 21
20std::mutex Scheduler::scheduler_mutex; 22void GlobalScheduler::AddThread(SharedPtr<Thread> thread) {
23 thread_list.push_back(std::move(thread));
24}
25
26void GlobalScheduler::RemoveThread(Thread* thread) {
27 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
28 thread_list.end());
29}
30
31/*
32 * SelectThreads, Yield functions originally by TuxSH.
33 * licensed under GPLv2 or later under exception provided by the author.
34 */
35
36void GlobalScheduler::UnloadThread(s32 core) {
37 Scheduler& sched = Core::System::GetInstance().Scheduler(core);
38 sched.UnloadThread();
39}
40
41void GlobalScheduler::SelectThread(u32 core) {
42 auto update_thread = [](Thread* thread, Scheduler& sched) {
43 if (thread != sched.selected_thread) {
44 if (thread == nullptr) {
45 ++sched.idle_selection_count;
46 }
47 sched.selected_thread = thread;
48 }
49 sched.context_switch_pending = sched.selected_thread != sched.current_thread;
50 std::atomic_thread_fence(std::memory_order_seq_cst);
51 };
52 Scheduler& sched = Core::System::GetInstance().Scheduler(core);
53 Thread* current_thread = nullptr;
54 current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
55 if (!current_thread) {
56 Thread* winner = nullptr;
57 std::set<s32> sug_cores;
58 for (auto thread : suggested_queue[core]) {
59 s32 this_core = thread->GetProcessorID();
60 Thread* thread_on_core = nullptr;
61 if (this_core >= 0) {
62 thread_on_core = scheduled_queue[this_core].front();
63 }
64 if (this_core < 0 || thread != thread_on_core) {
65 winner = thread;
66 break;
67 }
68 sug_cores.insert(this_core);
69 }
70 if (winner && winner->GetPriority() > 2) {
71 if (winner->IsRunning()) {
72 UnloadThread(winner->GetProcessorID());
73 }
74 TransferToCore(winner->GetPriority(), core, winner);
75 current_thread = winner;
76 } else {
77 for (auto& src_core : sug_cores) {
78 auto it = scheduled_queue[src_core].begin();
79 it++;
80 if (it != scheduled_queue[src_core].end()) {
81 Thread* thread_on_core = scheduled_queue[src_core].front();
82 Thread* to_change = *it;
83 if (thread_on_core->IsRunning() || to_change->IsRunning()) {
84 UnloadThread(src_core);
85 }
86 TransferToCore(thread_on_core->GetPriority(), core, thread_on_core);
87 current_thread = thread_on_core;
88 }
89 }
90 }
91 }
92 update_thread(current_thread, sched);
93}
21 94
22Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core) 95void GlobalScheduler::SelectThreads() {
23 : cpu_core{cpu_core}, system{system} {} 96 auto update_thread = [](Thread* thread, Scheduler& sched) {
97 if (thread != sched.selected_thread) {
98 if (thread == nullptr) {
99 ++sched.idle_selection_count;
100 }
101 sched.selected_thread = thread;
102 }
103 sched.context_switch_pending = sched.selected_thread != sched.current_thread;
104 std::atomic_thread_fence(std::memory_order_seq_cst);
105 };
106
107 auto& system = Core::System::GetInstance();
108
109 std::unordered_set<Thread*> picked_threads;
110 // This maintain the "current thread is on front of queue" invariant
111 std::array<Thread*, NUM_CPU_CORES> current_threads;
112 for (u32 i = 0; i < NUM_CPU_CORES; i++) {
113 Scheduler& sched = system.Scheduler(i);
114 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
115 if (current_threads[i])
116 picked_threads.insert(current_threads[i]);
117 update_thread(current_threads[i], sched);
118 }
119
120 // Do some load-balancing. Allow second pass.
121 std::array<Thread*, NUM_CPU_CORES> current_threads_2 = current_threads;
122 for (u32 i = 0; i < NUM_CPU_CORES; i++) {
123 if (!scheduled_queue[i].empty()) {
124 continue;
125 }
126 Thread* winner = nullptr;
127 for (auto thread : suggested_queue[i]) {
128 if (thread->GetProcessorID() < 0 || thread != current_threads[i]) {
129 if (picked_threads.count(thread) == 0 && !thread->IsRunning()) {
130 winner = thread;
131 break;
132 }
133 }
134 }
135 if (winner) {
136 TransferToCore(winner->GetPriority(), i, winner);
137 current_threads_2[i] = winner;
138 picked_threads.insert(winner);
139 }
140 }
24 141
25Scheduler::~Scheduler() { 142 // See which to-be-current threads have changed & update accordingly
26 for (auto& thread : thread_list) { 143 for (u32 i = 0; i < NUM_CPU_CORES; i++) {
27 thread->Stop(); 144 Scheduler& sched = system.Scheduler(i);
145 if (current_threads_2[i] != current_threads[i]) {
146 update_thread(current_threads_2[i], sched);
147 }
28 } 148 }
149
150 reselection_pending.store(false, std::memory_order_release);
29} 151}
30 152
153void GlobalScheduler::YieldThread(Thread* yielding_thread) {
154 // Note: caller should use critical section, etc.
155 u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
156 u32 priority = yielding_thread->GetPriority();
157
158 // Yield the thread
159 ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority),
160 "Thread yielding without being in front");
161 scheduled_queue[core_id].yield(priority);
162
163 Thread* winner = scheduled_queue[core_id].front(priority);
164 AskForReselectionOrMarkRedundant(yielding_thread, winner);
165}
166
167void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
168 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
169 // etc.
170 u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
171 u32 priority = yielding_thread->GetPriority();
172
173 // Yield the thread
174 ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority),
175 "Thread yielding without being in front");
176 scheduled_queue[core_id].yield(priority);
177
178 std::array<Thread*, NUM_CPU_CORES> current_threads;
179 for (u32 i = 0; i < NUM_CPU_CORES; i++) {
180 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
181 }
182
183 Thread* next_thread = scheduled_queue[core_id].front(priority);
184 Thread* winner = nullptr;
185 for (auto& thread : suggested_queue[core_id]) {
186 s32 source_core = thread->GetProcessorID();
187 if (source_core >= 0) {
188 if (current_threads[source_core] != nullptr) {
189 if (thread == current_threads[source_core] ||
190 current_threads[source_core]->GetPriority() < min_regular_priority)
191 continue;
192 }
193 if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() ||
194 next_thread->GetPriority() < thread->GetPriority()) {
195 if (thread->GetPriority() <= priority) {
196 winner = thread;
197 break;
198 }
199 }
200 }
201 }
202
203 if (winner != nullptr) {
204 if (winner != yielding_thread) {
205 if (winner->IsRunning())
206 UnloadThread(winner->GetProcessorID());
207 TransferToCore(winner->GetPriority(), core_id, winner);
208 }
209 } else {
210 winner = next_thread;
211 }
212
213 AskForReselectionOrMarkRedundant(yielding_thread, winner);
214}
215
216void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
217 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
218 // etc.
219 Thread* winner = nullptr;
220 u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
221
222 // Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead
223 TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread);
224
225 // If the core is idle, perform load balancing, excluding the threads that have just used this
226 // function...
227 if (scheduled_queue[core_id].empty()) {
228 // Here, "current_threads" is calculated after the ""yield"", unlike yield -1
229 std::array<Thread*, NUM_CPU_CORES> current_threads;
230 for (u32 i = 0; i < NUM_CPU_CORES; i++) {
231 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
232 }
233 for (auto& thread : suggested_queue[core_id]) {
234 s32 source_core = thread->GetProcessorID();
235 if (source_core < 0 || thread == current_threads[source_core])
236 continue;
237 if (current_threads[source_core] == nullptr ||
238 current_threads[source_core]->GetPriority() >= min_regular_priority) {
239 winner = thread;
240 }
241 break;
242 }
243 if (winner != nullptr) {
244 if (winner != yielding_thread) {
245 if (winner->IsRunning())
246 UnloadThread(winner->GetProcessorID());
247 TransferToCore(winner->GetPriority(), core_id, winner);
248 }
249 } else {
250 winner = yielding_thread;
251 }
252 }
253
254 AskForReselectionOrMarkRedundant(yielding_thread, winner);
255}
256
257void GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
258 if (current_thread == winner) {
259 // Nintendo (not us) has a nullderef bug on current_thread->owner, but which is never
260 // triggered.
261 // current_thread->SetRedundantSchedulerOperation();
262 } else {
263 reselection_pending.store(true, std::memory_order_release);
264 }
265}
266
267GlobalScheduler::~GlobalScheduler() = default;
268
269Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 id)
270 : system(system), cpu_core(cpu_core), id(id) {}
271
272Scheduler::~Scheduler() {}
273
31bool Scheduler::HaveReadyThreads() const { 274bool Scheduler::HaveReadyThreads() const {
32 std::lock_guard lock{scheduler_mutex}; 275 return system.GlobalScheduler().HaveReadyThreads(id);
33 return !ready_queue.empty();
34} 276}
35 277
36Thread* Scheduler::GetCurrentThread() const { 278Thread* Scheduler::GetCurrentThread() const {
37 return current_thread.get(); 279 return current_thread.get();
38} 280}
39 281
282Thread* Scheduler::GetSelectedThread() const {
283 return selected_thread.get();
284}
285
286void Scheduler::SelectThreads() {
287 system.GlobalScheduler().SelectThread(id);
288}
289
40u64 Scheduler::GetLastContextSwitchTicks() const { 290u64 Scheduler::GetLastContextSwitchTicks() const {
41 return last_context_switch_time; 291 return last_context_switch_time;
42} 292}
43 293
44Thread* Scheduler::PopNextReadyThread() { 294void Scheduler::TryDoContextSwitch() {
45 Thread* next = nullptr; 295 if (context_switch_pending)
46 Thread* thread = GetCurrentThread(); 296 SwitchContext();
297}
298
299void Scheduler::UnloadThread() {
300 Thread* const previous_thread = GetCurrentThread();
301 Process* const previous_process = Core::CurrentProcess();
47 302
48 if (thread && thread->GetStatus() == ThreadStatus::Running) { 303 UpdateLastContextSwitchTime(previous_thread, previous_process);
49 if (ready_queue.empty()) { 304
50 return thread; 305 // Save context for previous thread
51 } 306 if (previous_thread) {
52 // We have to do better than the current thread. 307 cpu_core.SaveContext(previous_thread->GetContext());
53 // This call returns null when that's not possible. 308 // Save the TPIDR_EL0 system register in case it was modified.
54 next = ready_queue.front(); 309 previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
55 if (next == nullptr || next->GetPriority() >= thread->GetPriority()) { 310
56 next = thread; 311 if (previous_thread->GetStatus() == ThreadStatus::Running) {
57 } 312 // This is only the case when a reschedule is triggered without the current thread
58 } else { 313 // yielding execution (i.e. an event triggered, system core time-sliced, etc)
59 if (ready_queue.empty()) { 314 previous_thread->SetStatus(ThreadStatus::Ready);
60 return nullptr;
61 } 315 }
62 next = ready_queue.front(); 316 previous_thread->SetIsRunning(false);
63 } 317 }
64 318 current_thread = nullptr;
65 return next;
66} 319}
67 320
68void Scheduler::SwitchContext(Thread* new_thread) { 321void Scheduler::SwitchContext() {
69 Thread* previous_thread = GetCurrentThread(); 322 Thread* const previous_thread = GetCurrentThread();
70 Process* const previous_process = system.Kernel().CurrentProcess(); 323 Thread* const new_thread = GetSelectedThread();
324
325 context_switch_pending = false;
326 if (new_thread == previous_thread)
327 return;
328
329 Process* const previous_process = Core::CurrentProcess();
71 330
72 UpdateLastContextSwitchTime(previous_thread, previous_process); 331 UpdateLastContextSwitchTime(previous_thread, previous_process);
73 332
@@ -80,23 +339,23 @@ void Scheduler::SwitchContext(Thread* new_thread) {
80 if (previous_thread->GetStatus() == ThreadStatus::Running) { 339 if (previous_thread->GetStatus() == ThreadStatus::Running) {
81 // This is only the case when a reschedule is triggered without the current thread 340 // This is only the case when a reschedule is triggered without the current thread
82 // yielding execution (i.e. an event triggered, system core time-sliced, etc) 341 // yielding execution (i.e. an event triggered, system core time-sliced, etc)
83 ready_queue.add(previous_thread, previous_thread->GetPriority(), false);
84 previous_thread->SetStatus(ThreadStatus::Ready); 342 previous_thread->SetStatus(ThreadStatus::Ready);
85 } 343 }
344 previous_thread->SetIsRunning(false);
86 } 345 }
87 346
88 // Load context of new thread 347 // Load context of new thread
89 if (new_thread) { 348 if (new_thread) {
349 ASSERT_MSG(new_thread->GetProcessorID() == this->id,
350 "Thread must be assigned to this core.");
90 ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready, 351 ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready,
91 "Thread must be ready to become running."); 352 "Thread must be ready to become running.");
92 353
93 // Cancel any outstanding wakeup events for this thread 354 // Cancel any outstanding wakeup events for this thread
94 new_thread->CancelWakeupTimer(); 355 new_thread->CancelWakeupTimer();
95
96 current_thread = new_thread; 356 current_thread = new_thread;
97
98 ready_queue.remove(new_thread, new_thread->GetPriority());
99 new_thread->SetStatus(ThreadStatus::Running); 357 new_thread->SetStatus(ThreadStatus::Running);
358 new_thread->SetIsRunning(true);
100 359
101 auto* const thread_owner_process = current_thread->GetOwnerProcess(); 360 auto* const thread_owner_process = current_thread->GetOwnerProcess();
102 if (previous_process != thread_owner_process) { 361 if (previous_process != thread_owner_process) {
@@ -116,7 +375,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
116 375
117void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { 376void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
118 const u64 prev_switch_ticks = last_context_switch_time; 377 const u64 prev_switch_ticks = last_context_switch_time;
119 const u64 most_recent_switch_ticks = system.CoreTiming().GetTicks(); 378 const u64 most_recent_switch_ticks = Core::System::GetInstance().CoreTiming().GetTicks();
120 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; 379 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
121 380
122 if (thread != nullptr) { 381 if (thread != nullptr) {
@@ -130,124 +389,4 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
130 last_context_switch_time = most_recent_switch_ticks; 389 last_context_switch_time = most_recent_switch_ticks;
131} 390}
132 391
133void Scheduler::Reschedule() {
134 std::lock_guard lock{scheduler_mutex};
135
136 Thread* cur = GetCurrentThread();
137 Thread* next = PopNextReadyThread();
138
139 if (cur && next) {
140 LOG_TRACE(Kernel, "context switch {} -> {}", cur->GetObjectId(), next->GetObjectId());
141 } else if (cur) {
142 LOG_TRACE(Kernel, "context switch {} -> idle", cur->GetObjectId());
143 } else if (next) {
144 LOG_TRACE(Kernel, "context switch idle -> {}", next->GetObjectId());
145 }
146
147 SwitchContext(next);
148}
149
150void Scheduler::AddThread(SharedPtr<Thread> thread) {
151 std::lock_guard lock{scheduler_mutex};
152
153 thread_list.push_back(std::move(thread));
154}
155
156void Scheduler::RemoveThread(Thread* thread) {
157 std::lock_guard lock{scheduler_mutex};
158
159 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
160 thread_list.end());
161}
162
163void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
164 std::lock_guard lock{scheduler_mutex};
165
166 ASSERT(thread->GetStatus() == ThreadStatus::Ready);
167 ready_queue.add(thread, priority);
168}
169
170void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
171 std::lock_guard lock{scheduler_mutex};
172
173 ASSERT(thread->GetStatus() == ThreadStatus::Ready);
174 ready_queue.remove(thread, priority);
175}
176
177void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
178 std::lock_guard lock{scheduler_mutex};
179 if (thread->GetPriority() == priority) {
180 return;
181 }
182
183 // If thread was ready, adjust queues
184 if (thread->GetStatus() == ThreadStatus::Ready)
185 ready_queue.adjust(thread, thread->GetPriority(), priority);
186}
187
188Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const {
189 std::lock_guard lock{scheduler_mutex};
190
191 const u32 mask = 1U << core;
192 for (auto* thread : ready_queue) {
193 if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) {
194 return thread;
195 }
196 }
197 return nullptr;
198}
199
200void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
201 ASSERT(thread != nullptr);
202 // Avoid yielding if the thread isn't even running.
203 ASSERT(thread->GetStatus() == ThreadStatus::Running);
204
205 // Sanity check that the priority is valid
206 ASSERT(thread->GetPriority() < THREADPRIO_COUNT);
207
208 // Yield this thread -- sleep for zero time and force reschedule to different thread
209 GetCurrentThread()->Sleep(0);
210}
211
212void Scheduler::YieldWithLoadBalancing(Thread* thread) {
213 ASSERT(thread != nullptr);
214 const auto priority = thread->GetPriority();
215 const auto core = static_cast<u32>(thread->GetProcessorID());
216
217 // Avoid yielding if the thread isn't even running.
218 ASSERT(thread->GetStatus() == ThreadStatus::Running);
219
220 // Sanity check that the priority is valid
221 ASSERT(priority < THREADPRIO_COUNT);
222
223 // Sleep for zero time to be able to force reschedule to different thread
224 GetCurrentThread()->Sleep(0);
225
226 Thread* suggested_thread = nullptr;
227
228 // Search through all of the cpu cores (except this one) for a suggested thread.
229 // Take the first non-nullptr one
230 for (unsigned cur_core = 0; cur_core < Core::NUM_CPU_CORES; ++cur_core) {
231 const auto res =
232 system.CpuCore(cur_core).Scheduler().GetNextSuggestedThread(core, priority);
233
234 // If scheduler provides a suggested thread
235 if (res != nullptr) {
236 // And its better than the current suggested thread (or is the first valid one)
237 if (suggested_thread == nullptr ||
238 suggested_thread->GetPriority() > res->GetPriority()) {
239 suggested_thread = res;
240 }
241 }
242 }
243
244 // If a suggested thread was found, queue that for this core
245 if (suggested_thread != nullptr)
246 suggested_thread->ChangeCore(core, suggested_thread->GetAffinityMask());
247}
248
249void Scheduler::YieldAndWaitForLoadBalancing(Thread* thread) {
250 UNIMPLEMENTED_MSG("Wait for load balancing thread yield type is not implemented!");
251}
252
253} // namespace Kernel 392} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index b29bf7be8..50fa7376b 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -20,124 +20,141 @@ namespace Kernel {
20 20
21class Process; 21class Process;
22 22
23class Scheduler final { 23class GlobalScheduler final {
24public: 24public:
25 explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core); 25 static constexpr u32 NUM_CPU_CORES = 4;
26 ~Scheduler();
27
28 /// Returns whether there are any threads that are ready to run.
29 bool HaveReadyThreads() const;
30
31 /// Reschedules to the next available thread (call after current thread is suspended)
32 void Reschedule();
33
34 /// Gets the current running thread
35 Thread* GetCurrentThread() const;
36
37 /// Gets the timestamp for the last context switch in ticks.
38 u64 GetLastContextSwitchTicks() const;
39 26
27 GlobalScheduler() {
28 reselection_pending = false;
29 }
30 ~GlobalScheduler();
40 /// Adds a new thread to the scheduler 31 /// Adds a new thread to the scheduler
41 void AddThread(SharedPtr<Thread> thread); 32 void AddThread(SharedPtr<Thread> thread);
42 33
43 /// Removes a thread from the scheduler 34 /// Removes a thread from the scheduler
44 void RemoveThread(Thread* thread); 35 void RemoveThread(Thread* thread);
45 36
46 /// Schedules a thread that has become "ready" 37 /// Returns a list of all threads managed by the scheduler
47 void ScheduleThread(Thread* thread, u32 priority); 38 const std::vector<SharedPtr<Thread>>& GetThreadList() const {
39 return thread_list;
40 }
48 41
49 /// Unschedules a thread that was already scheduled 42 void Suggest(u32 priority, u32 core, Thread* thread) {
50 void UnscheduleThread(Thread* thread, u32 priority); 43 suggested_queue[core].add(thread, priority);
44 }
51 45
52 /// Sets the priority of a thread in the scheduler 46 void Unsuggest(u32 priority, u32 core, Thread* thread) {
53 void SetThreadPriority(Thread* thread, u32 priority); 47 suggested_queue[core].remove(thread, priority);
48 }
54 49
55 /// Gets the next suggested thread for load balancing 50 void Schedule(u32 priority, u32 core, Thread* thread) {
56 Thread* GetNextSuggestedThread(u32 core, u32 minimum_priority) const; 51 ASSERT_MSG(thread->GetProcessorID() == core,
52 "Thread must be assigned to this core.");
53 scheduled_queue[core].add(thread, priority);
54 }
57 55
58 /** 56 void SchedulePrepend(u32 priority, u32 core, Thread* thread) {
59 * YieldWithoutLoadBalancing -- analogous to normal yield on a system 57 ASSERT_MSG(thread->GetProcessorID() == core,
60 * Moves the thread to the end of the ready queue for its priority, and then reschedules the 58 "Thread must be assigned to this core.");
61 * system to the new head of the queue. 59 scheduled_queue[core].add(thread, priority, false);
62 * 60 }
63 * Example (Single Core -- but can be extrapolated to multi):
64 * ready_queue[prio=0]: ThreadA, ThreadB, ThreadC (->exec order->)
65 * Currently Running: ThreadR
66 *
67 * ThreadR calls YieldWithoutLoadBalancing
68 *
69 * ThreadR is moved to the end of ready_queue[prio=0]:
70 * ready_queue[prio=0]: ThreadA, ThreadB, ThreadC, ThreadR (->exec order->)
71 * Currently Running: Nothing
72 *
73 * System is rescheduled (ThreadA is popped off of queue):
74 * ready_queue[prio=0]: ThreadB, ThreadC, ThreadR (->exec order->)
75 * Currently Running: ThreadA
76 *
77 * If the queue is empty at time of call, no yielding occurs. This does not cross between cores
78 * or priorities at all.
79 */
80 void YieldWithoutLoadBalancing(Thread* thread);
81 61
82 /** 62 void Reschedule(u32 priority, u32 core, Thread* thread) {
83 * YieldWithLoadBalancing -- yield but with better selection of the new running thread 63 scheduled_queue[core].remove(thread, priority);
84 * Moves the current thread to the end of the ready queue for its priority, then selects a 64 scheduled_queue[core].add(thread, priority);
85 * 'suggested thread' (a thread on a different core that could run on this core) from the 65 }
86 * scheduler, changes its core, and reschedules the current core to that thread.
87 *
88 * Example (Dual Core -- can be extrapolated to Quad Core, this is just normal yield if it were
89 * single core):
90 * ready_queue[core=0][prio=0]: ThreadA, ThreadB (affinities not pictured as irrelevant
91 * ready_queue[core=1][prio=0]: ThreadC[affinity=both], ThreadD[affinity=core1only]
92 * Currently Running: ThreadQ on Core 0 || ThreadP on Core 1
93 *
94 * ThreadQ calls YieldWithLoadBalancing
95 *
96 * ThreadQ is moved to the end of ready_queue[core=0][prio=0]:
97 * ready_queue[core=0][prio=0]: ThreadA, ThreadB
98 * ready_queue[core=1][prio=0]: ThreadC[affinity=both], ThreadD[affinity=core1only]
99 * Currently Running: ThreadQ on Core 0 || ThreadP on Core 1
100 *
101 * A list of suggested threads for each core is compiled
102 * Suggested Threads: {ThreadC on Core 1}
103 * If this were quad core (as the switch is), there could be between 0 and 3 threads in this
104 * list. If there are more than one, the thread is selected by highest prio.
105 *
106 * ThreadC is core changed to Core 0:
107 * ready_queue[core=0][prio=0]: ThreadC, ThreadA, ThreadB, ThreadQ
108 * ready_queue[core=1][prio=0]: ThreadD
109 * Currently Running: None on Core 0 || ThreadP on Core 1
110 *
111 * System is rescheduled (ThreadC is popped off of queue):
112 * ready_queue[core=0][prio=0]: ThreadA, ThreadB, ThreadQ
113 * ready_queue[core=1][prio=0]: ThreadD
114 * Currently Running: ThreadC on Core 0 || ThreadP on Core 1
115 *
116 * If no suggested threads can be found this will behave just as normal yield. If there are
117 * multiple candidates for the suggested thread on a core, the highest prio is taken.
118 */
119 void YieldWithLoadBalancing(Thread* thread);
120 66
121 /// Currently unknown -- asserts as unimplemented on call 67 void Unschedule(u32 priority, u32 core, Thread* thread) {
122 void YieldAndWaitForLoadBalancing(Thread* thread); 68 scheduled_queue[core].remove(thread, priority);
69 }
123 70
124 /// Returns a list of all threads managed by the scheduler 71 void TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
125 const std::vector<SharedPtr<Thread>>& GetThreadList() const { 72 bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
126 return thread_list; 73 s32 source_core = thread->GetProcessorID();
74 if (source_core == destination_core || !schedulable)
75 return;
76 thread->SetProcessorID(destination_core);
77 if (source_core >= 0)
78 Unschedule(priority, source_core, thread);
79 if (destination_core >= 0) {
80 Unsuggest(priority, destination_core, thread);
81 Schedule(priority, destination_core, thread);
82 }
83 if (source_core >= 0)
84 Suggest(priority, source_core, thread);
85 }
86
87 void UnloadThread(s32 core);
88
89 void SelectThreads();
90 void SelectThread(u32 core);
91
92 bool HaveReadyThreads(u32 core_id) {
93 return !scheduled_queue[core_id].empty();
94 }
95
96 void YieldThread(Thread* thread);
97 void YieldThreadAndBalanceLoad(Thread* thread);
98 void YieldThreadAndWaitForLoadBalancing(Thread* thread);
99
100 u32 CpuCoresCount() const {
101 return NUM_CPU_CORES;
102 }
103
104 void SetReselectionPending() {
105 reselection_pending.store(true, std::memory_order_release);
106 }
107
108 bool IsReselectionPending() {
109 return reselection_pending.load(std::memory_order_acquire);
127 } 110 }
128 111
129private: 112private:
130 /** 113 void AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner);
131 * Pops and returns the next thread from the thread queue 114
132 * @return A pointer to the next ready thread 115 static constexpr u32 min_regular_priority = 2;
133 */ 116 std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> scheduled_queue;
134 Thread* PopNextReadyThread(); 117 std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> suggested_queue;
118 std::atomic<bool> reselection_pending;
119
120 /// Lists all thread ids that aren't deleted/etc.
121 std::vector<SharedPtr<Thread>> thread_list;
122};
123
124class Scheduler final {
125public:
126 explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, const u32 id);
127 ~Scheduler();
128
129 /// Returns whether there are any threads that are ready to run.
130 bool HaveReadyThreads() const;
131
132 /// Reschedules to the next available thread (call after current thread is suspended)
133 void TryDoContextSwitch();
134
135 void UnloadThread();
136
137 void SelectThreads();
138
139 /// Gets the current running thread
140 Thread* GetCurrentThread() const;
141
142 Thread* GetSelectedThread() const;
143
144 /// Gets the timestamp for the last context switch in ticks.
145 u64 GetLastContextSwitchTicks() const;
146
147 bool ContextSwitchPending() const {
148 return context_switch_pending;
149 }
135 150
151private:
152 friend class GlobalScheduler;
136 /** 153 /**
137 * Switches the CPU's active thread context to that of the specified thread 154 * Switches the CPU's active thread context to that of the specified thread
138 * @param new_thread The thread to switch to 155 * @param new_thread The thread to switch to
139 */ 156 */
140 void SwitchContext(Thread* new_thread); 157 void SwitchContext();
141 158
142 /** 159 /**
143 * Called on every context switch to update the internal timestamp 160 * Called on every context switch to update the internal timestamp
@@ -152,19 +169,16 @@ private:
152 */ 169 */
153 void UpdateLastContextSwitchTime(Thread* thread, Process* process); 170 void UpdateLastContextSwitchTime(Thread* thread, Process* process);
154 171
155 /// Lists all thread ids that aren't deleted/etc.
156 std::vector<SharedPtr<Thread>> thread_list;
157
158 /// Lists only ready thread ids.
159 Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
160
161 SharedPtr<Thread> current_thread = nullptr; 172 SharedPtr<Thread> current_thread = nullptr;
173 SharedPtr<Thread> selected_thread = nullptr;
162 174
175 Core::System& system;
163 Core::ARM_Interface& cpu_core; 176 Core::ARM_Interface& cpu_core;
164 u64 last_context_switch_time = 0; 177 u64 last_context_switch_time = 0;
178 u64 idle_selection_count = 0;
179 const u32 id;
165 180
166 Core::System& system; 181 bool context_switch_pending = false;
167 static std::mutex scheduler_mutex;
168}; 182};
169 183
170} // namespace Kernel 184} // namespace Kernel