diff options
| author | 2019-03-29 17:01:17 -0400 | |
|---|---|---|
| committer | 2019-10-15 11:55:04 -0400 | |
| commit | b164d8ee536dba526f9da2083433d529daf7b37b (patch) | |
| tree | fd0ad5dad048adccd1ed4f2564eff0cfa08dc470 /src/core/hle/kernel/scheduler.cpp | |
| parent | Merge pull request #2965 from FernandoS27/fair-core-timing (diff) | |
| download | yuzu-b164d8ee536dba526f9da2083433d529daf7b37b.tar.gz yuzu-b164d8ee536dba526f9da2083433d529daf7b37b.tar.xz yuzu-b164d8ee536dba526f9da2083433d529daf7b37b.zip | |
Implement a new Core Scheduler
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
| -rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 449 |
1 files changed, 294 insertions, 155 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index e8447b69a..878aeed6d 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -3,6 +3,8 @@ | |||
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <algorithm> | 5 | #include <algorithm> |
| 6 | #include <set> | ||
| 7 | #include <unordered_set> | ||
| 6 | #include <utility> | 8 | #include <utility> |
| 7 | 9 | ||
| 8 | #include "common/assert.h" | 10 | #include "common/assert.h" |
| @@ -17,57 +19,314 @@ | |||
| 17 | 19 | ||
| 18 | namespace Kernel { | 20 | namespace Kernel { |
| 19 | 21 | ||
| 20 | std::mutex Scheduler::scheduler_mutex; | 22 | void GlobalScheduler::AddThread(SharedPtr<Thread> thread) { |
| 23 | thread_list.push_back(std::move(thread)); | ||
| 24 | } | ||
| 25 | |||
| 26 | void GlobalScheduler::RemoveThread(Thread* thread) { | ||
| 27 | thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), | ||
| 28 | thread_list.end()); | ||
| 29 | } | ||
| 30 | |||
| 31 | /* | ||
| 32 | * SelectThreads, Yield functions originally by TuxSH. | ||
| 33 | * licensed under GPLv2 or later under exception provided by the author. | ||
| 34 | */ | ||
| 35 | |||
| 36 | void GlobalScheduler::UnloadThread(s32 core) { | ||
| 37 | Scheduler& sched = Core::System::GetInstance().Scheduler(core); | ||
| 38 | sched.UnloadThread(); | ||
| 39 | } | ||
| 40 | |||
| 41 | void GlobalScheduler::SelectThread(u32 core) { | ||
| 42 | auto update_thread = [](Thread* thread, Scheduler& sched) { | ||
| 43 | if (thread != sched.selected_thread) { | ||
| 44 | if (thread == nullptr) { | ||
| 45 | ++sched.idle_selection_count; | ||
| 46 | } | ||
| 47 | sched.selected_thread = thread; | ||
| 48 | } | ||
| 49 | sched.context_switch_pending = sched.selected_thread != sched.current_thread; | ||
| 50 | std::atomic_thread_fence(std::memory_order_seq_cst); | ||
| 51 | }; | ||
| 52 | Scheduler& sched = Core::System::GetInstance().Scheduler(core); | ||
| 53 | Thread* current_thread = nullptr; | ||
| 54 | current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front(); | ||
| 55 | if (!current_thread) { | ||
| 56 | Thread* winner = nullptr; | ||
| 57 | std::set<s32> sug_cores; | ||
| 58 | for (auto thread : suggested_queue[core]) { | ||
| 59 | s32 this_core = thread->GetProcessorID(); | ||
| 60 | Thread* thread_on_core = nullptr; | ||
| 61 | if (this_core >= 0) { | ||
| 62 | thread_on_core = scheduled_queue[this_core].front(); | ||
| 63 | } | ||
| 64 | if (this_core < 0 || thread != thread_on_core) { | ||
| 65 | winner = thread; | ||
| 66 | break; | ||
| 67 | } | ||
| 68 | sug_cores.insert(this_core); | ||
| 69 | } | ||
| 70 | if (winner && winner->GetPriority() > 2) { | ||
| 71 | if (winner->IsRunning()) { | ||
| 72 | UnloadThread(winner->GetProcessorID()); | ||
| 73 | } | ||
| 74 | TransferToCore(winner->GetPriority(), core, winner); | ||
| 75 | current_thread = winner; | ||
| 76 | } else { | ||
| 77 | for (auto& src_core : sug_cores) { | ||
| 78 | auto it = scheduled_queue[src_core].begin(); | ||
| 79 | it++; | ||
| 80 | if (it != scheduled_queue[src_core].end()) { | ||
| 81 | Thread* thread_on_core = scheduled_queue[src_core].front(); | ||
| 82 | Thread* to_change = *it; | ||
| 83 | if (thread_on_core->IsRunning() || to_change->IsRunning()) { | ||
| 84 | UnloadThread(src_core); | ||
| 85 | } | ||
| 86 | TransferToCore(thread_on_core->GetPriority(), core, thread_on_core); | ||
| 87 | current_thread = thread_on_core; | ||
| 88 | } | ||
| 89 | } | ||
| 90 | } | ||
| 91 | } | ||
| 92 | update_thread(current_thread, sched); | ||
| 93 | } | ||
| 21 | 94 | ||
| 22 | Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core) | 95 | void GlobalScheduler::SelectThreads() { |
| 23 | : cpu_core{cpu_core}, system{system} {} | 96 | auto update_thread = [](Thread* thread, Scheduler& sched) { |
| 97 | if (thread != sched.selected_thread) { | ||
| 98 | if (thread == nullptr) { | ||
| 99 | ++sched.idle_selection_count; | ||
| 100 | } | ||
| 101 | sched.selected_thread = thread; | ||
| 102 | } | ||
| 103 | sched.context_switch_pending = sched.selected_thread != sched.current_thread; | ||
| 104 | std::atomic_thread_fence(std::memory_order_seq_cst); | ||
| 105 | }; | ||
| 106 | |||
| 107 | auto& system = Core::System::GetInstance(); | ||
| 108 | |||
| 109 | std::unordered_set<Thread*> picked_threads; | ||
| 110 | // This maintain the "current thread is on front of queue" invariant | ||
| 111 | std::array<Thread*, NUM_CPU_CORES> current_threads; | ||
| 112 | for (u32 i = 0; i < NUM_CPU_CORES; i++) { | ||
| 113 | Scheduler& sched = system.Scheduler(i); | ||
| 114 | current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); | ||
| 115 | if (current_threads[i]) | ||
| 116 | picked_threads.insert(current_threads[i]); | ||
| 117 | update_thread(current_threads[i], sched); | ||
| 118 | } | ||
| 119 | |||
| 120 | // Do some load-balancing. Allow second pass. | ||
| 121 | std::array<Thread*, NUM_CPU_CORES> current_threads_2 = current_threads; | ||
| 122 | for (u32 i = 0; i < NUM_CPU_CORES; i++) { | ||
| 123 | if (!scheduled_queue[i].empty()) { | ||
| 124 | continue; | ||
| 125 | } | ||
| 126 | Thread* winner = nullptr; | ||
| 127 | for (auto thread : suggested_queue[i]) { | ||
| 128 | if (thread->GetProcessorID() < 0 || thread != current_threads[i]) { | ||
| 129 | if (picked_threads.count(thread) == 0 && !thread->IsRunning()) { | ||
| 130 | winner = thread; | ||
| 131 | break; | ||
| 132 | } | ||
| 133 | } | ||
| 134 | } | ||
| 135 | if (winner) { | ||
| 136 | TransferToCore(winner->GetPriority(), i, winner); | ||
| 137 | current_threads_2[i] = winner; | ||
| 138 | picked_threads.insert(winner); | ||
| 139 | } | ||
| 140 | } | ||
| 24 | 141 | ||
| 25 | Scheduler::~Scheduler() { | 142 | // See which to-be-current threads have changed & update accordingly |
| 26 | for (auto& thread : thread_list) { | 143 | for (u32 i = 0; i < NUM_CPU_CORES; i++) { |
| 27 | thread->Stop(); | 144 | Scheduler& sched = system.Scheduler(i); |
| 145 | if (current_threads_2[i] != current_threads[i]) { | ||
| 146 | update_thread(current_threads_2[i], sched); | ||
| 147 | } | ||
| 28 | } | 148 | } |
| 149 | |||
| 150 | reselection_pending.store(false, std::memory_order_release); | ||
| 29 | } | 151 | } |
| 30 | 152 | ||
| 153 | void GlobalScheduler::YieldThread(Thread* yielding_thread) { | ||
| 154 | // Note: caller should use critical section, etc. | ||
| 155 | u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); | ||
| 156 | u32 priority = yielding_thread->GetPriority(); | ||
| 157 | |||
| 158 | // Yield the thread | ||
| 159 | ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority), | ||
| 160 | "Thread yielding without being in front"); | ||
| 161 | scheduled_queue[core_id].yield(priority); | ||
| 162 | |||
| 163 | Thread* winner = scheduled_queue[core_id].front(priority); | ||
| 164 | AskForReselectionOrMarkRedundant(yielding_thread, winner); | ||
| 165 | } | ||
| 166 | |||
| 167 | void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { | ||
| 168 | // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, | ||
| 169 | // etc. | ||
| 170 | u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); | ||
| 171 | u32 priority = yielding_thread->GetPriority(); | ||
| 172 | |||
| 173 | // Yield the thread | ||
| 174 | ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority), | ||
| 175 | "Thread yielding without being in front"); | ||
| 176 | scheduled_queue[core_id].yield(priority); | ||
| 177 | |||
| 178 | std::array<Thread*, NUM_CPU_CORES> current_threads; | ||
| 179 | for (u32 i = 0; i < NUM_CPU_CORES; i++) { | ||
| 180 | current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); | ||
| 181 | } | ||
| 182 | |||
| 183 | Thread* next_thread = scheduled_queue[core_id].front(priority); | ||
| 184 | Thread* winner = nullptr; | ||
| 185 | for (auto& thread : suggested_queue[core_id]) { | ||
| 186 | s32 source_core = thread->GetProcessorID(); | ||
| 187 | if (source_core >= 0) { | ||
| 188 | if (current_threads[source_core] != nullptr) { | ||
| 189 | if (thread == current_threads[source_core] || | ||
| 190 | current_threads[source_core]->GetPriority() < min_regular_priority) | ||
| 191 | continue; | ||
| 192 | } | ||
| 193 | if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() || | ||
| 194 | next_thread->GetPriority() < thread->GetPriority()) { | ||
| 195 | if (thread->GetPriority() <= priority) { | ||
| 196 | winner = thread; | ||
| 197 | break; | ||
| 198 | } | ||
| 199 | } | ||
| 200 | } | ||
| 201 | } | ||
| 202 | |||
| 203 | if (winner != nullptr) { | ||
| 204 | if (winner != yielding_thread) { | ||
| 205 | if (winner->IsRunning()) | ||
| 206 | UnloadThread(winner->GetProcessorID()); | ||
| 207 | TransferToCore(winner->GetPriority(), core_id, winner); | ||
| 208 | } | ||
| 209 | } else { | ||
| 210 | winner = next_thread; | ||
| 211 | } | ||
| 212 | |||
| 213 | AskForReselectionOrMarkRedundant(yielding_thread, winner); | ||
| 214 | } | ||
| 215 | |||
| 216 | void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) { | ||
| 217 | // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, | ||
| 218 | // etc. | ||
| 219 | Thread* winner = nullptr; | ||
| 220 | u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); | ||
| 221 | |||
| 222 | // Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead | ||
| 223 | TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread); | ||
| 224 | |||
| 225 | // If the core is idle, perform load balancing, excluding the threads that have just used this | ||
| 226 | // function... | ||
| 227 | if (scheduled_queue[core_id].empty()) { | ||
| 228 | // Here, "current_threads" is calculated after the ""yield"", unlike yield -1 | ||
| 229 | std::array<Thread*, NUM_CPU_CORES> current_threads; | ||
| 230 | for (u32 i = 0; i < NUM_CPU_CORES; i++) { | ||
| 231 | current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); | ||
| 232 | } | ||
| 233 | for (auto& thread : suggested_queue[core_id]) { | ||
| 234 | s32 source_core = thread->GetProcessorID(); | ||
| 235 | if (source_core < 0 || thread == current_threads[source_core]) | ||
| 236 | continue; | ||
| 237 | if (current_threads[source_core] == nullptr || | ||
| 238 | current_threads[source_core]->GetPriority() >= min_regular_priority) { | ||
| 239 | winner = thread; | ||
| 240 | } | ||
| 241 | break; | ||
| 242 | } | ||
| 243 | if (winner != nullptr) { | ||
| 244 | if (winner != yielding_thread) { | ||
| 245 | if (winner->IsRunning()) | ||
| 246 | UnloadThread(winner->GetProcessorID()); | ||
| 247 | TransferToCore(winner->GetPriority(), core_id, winner); | ||
| 248 | } | ||
| 249 | } else { | ||
| 250 | winner = yielding_thread; | ||
| 251 | } | ||
| 252 | } | ||
| 253 | |||
| 254 | AskForReselectionOrMarkRedundant(yielding_thread, winner); | ||
| 255 | } | ||
| 256 | |||
| 257 | void GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) { | ||
| 258 | if (current_thread == winner) { | ||
| 259 | // Nintendo (not us) has a nullderef bug on current_thread->owner, but which is never | ||
| 260 | // triggered. | ||
| 261 | // current_thread->SetRedundantSchedulerOperation(); | ||
| 262 | } else { | ||
| 263 | reselection_pending.store(true, std::memory_order_release); | ||
| 264 | } | ||
| 265 | } | ||
| 266 | |||
| 267 | GlobalScheduler::~GlobalScheduler() = default; | ||
| 268 | |||
| 269 | Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 id) | ||
| 270 | : system(system), cpu_core(cpu_core), id(id) {} | ||
| 271 | |||
| 272 | Scheduler::~Scheduler() {} | ||
| 273 | |||
| 31 | bool Scheduler::HaveReadyThreads() const { | 274 | bool Scheduler::HaveReadyThreads() const { |
| 32 | std::lock_guard lock{scheduler_mutex}; | 275 | return system.GlobalScheduler().HaveReadyThreads(id); |
| 33 | return !ready_queue.empty(); | ||
| 34 | } | 276 | } |
| 35 | 277 | ||
| 36 | Thread* Scheduler::GetCurrentThread() const { | 278 | Thread* Scheduler::GetCurrentThread() const { |
| 37 | return current_thread.get(); | 279 | return current_thread.get(); |
| 38 | } | 280 | } |
| 39 | 281 | ||
| 282 | Thread* Scheduler::GetSelectedThread() const { | ||
| 283 | return selected_thread.get(); | ||
| 284 | } | ||
| 285 | |||
| 286 | void Scheduler::SelectThreads() { | ||
| 287 | system.GlobalScheduler().SelectThread(id); | ||
| 288 | } | ||
| 289 | |||
| 40 | u64 Scheduler::GetLastContextSwitchTicks() const { | 290 | u64 Scheduler::GetLastContextSwitchTicks() const { |
| 41 | return last_context_switch_time; | 291 | return last_context_switch_time; |
| 42 | } | 292 | } |
| 43 | 293 | ||
| 44 | Thread* Scheduler::PopNextReadyThread() { | 294 | void Scheduler::TryDoContextSwitch() { |
| 45 | Thread* next = nullptr; | 295 | if (context_switch_pending) |
| 46 | Thread* thread = GetCurrentThread(); | 296 | SwitchContext(); |
| 297 | } | ||
| 298 | |||
| 299 | void Scheduler::UnloadThread() { | ||
| 300 | Thread* const previous_thread = GetCurrentThread(); | ||
| 301 | Process* const previous_process = Core::CurrentProcess(); | ||
| 47 | 302 | ||
| 48 | if (thread && thread->GetStatus() == ThreadStatus::Running) { | 303 | UpdateLastContextSwitchTime(previous_thread, previous_process); |
| 49 | if (ready_queue.empty()) { | 304 | |
| 50 | return thread; | 305 | // Save context for previous thread |
| 51 | } | 306 | if (previous_thread) { |
| 52 | // We have to do better than the current thread. | 307 | cpu_core.SaveContext(previous_thread->GetContext()); |
| 53 | // This call returns null when that's not possible. | 308 | // Save the TPIDR_EL0 system register in case it was modified. |
| 54 | next = ready_queue.front(); | 309 | previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); |
| 55 | if (next == nullptr || next->GetPriority() >= thread->GetPriority()) { | 310 | |
| 56 | next = thread; | 311 | if (previous_thread->GetStatus() == ThreadStatus::Running) { |
| 57 | } | 312 | // This is only the case when a reschedule is triggered without the current thread |
| 58 | } else { | 313 | // yielding execution (i.e. an event triggered, system core time-sliced, etc) |
| 59 | if (ready_queue.empty()) { | 314 | previous_thread->SetStatus(ThreadStatus::Ready); |
| 60 | return nullptr; | ||
| 61 | } | 315 | } |
| 62 | next = ready_queue.front(); | 316 | previous_thread->SetIsRunning(false); |
| 63 | } | 317 | } |
| 64 | 318 | current_thread = nullptr; | |
| 65 | return next; | ||
| 66 | } | 319 | } |
| 67 | 320 | ||
| 68 | void Scheduler::SwitchContext(Thread* new_thread) { | 321 | void Scheduler::SwitchContext() { |
| 69 | Thread* previous_thread = GetCurrentThread(); | 322 | Thread* const previous_thread = GetCurrentThread(); |
| 70 | Process* const previous_process = system.Kernel().CurrentProcess(); | 323 | Thread* const new_thread = GetSelectedThread(); |
| 324 | |||
| 325 | context_switch_pending = false; | ||
| 326 | if (new_thread == previous_thread) | ||
| 327 | return; | ||
| 328 | |||
| 329 | Process* const previous_process = Core::CurrentProcess(); | ||
| 71 | 330 | ||
| 72 | UpdateLastContextSwitchTime(previous_thread, previous_process); | 331 | UpdateLastContextSwitchTime(previous_thread, previous_process); |
| 73 | 332 | ||
| @@ -80,23 +339,23 @@ void Scheduler::SwitchContext(Thread* new_thread) { | |||
| 80 | if (previous_thread->GetStatus() == ThreadStatus::Running) { | 339 | if (previous_thread->GetStatus() == ThreadStatus::Running) { |
| 81 | // This is only the case when a reschedule is triggered without the current thread | 340 | // This is only the case when a reschedule is triggered without the current thread |
| 82 | // yielding execution (i.e. an event triggered, system core time-sliced, etc) | 341 | // yielding execution (i.e. an event triggered, system core time-sliced, etc) |
| 83 | ready_queue.add(previous_thread, previous_thread->GetPriority(), false); | ||
| 84 | previous_thread->SetStatus(ThreadStatus::Ready); | 342 | previous_thread->SetStatus(ThreadStatus::Ready); |
| 85 | } | 343 | } |
| 344 | previous_thread->SetIsRunning(false); | ||
| 86 | } | 345 | } |
| 87 | 346 | ||
| 88 | // Load context of new thread | 347 | // Load context of new thread |
| 89 | if (new_thread) { | 348 | if (new_thread) { |
| 349 | ASSERT_MSG(new_thread->GetProcessorID() == this->id, | ||
| 350 | "Thread must be assigned to this core."); | ||
| 90 | ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready, | 351 | ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready, |
| 91 | "Thread must be ready to become running."); | 352 | "Thread must be ready to become running."); |
| 92 | 353 | ||
| 93 | // Cancel any outstanding wakeup events for this thread | 354 | // Cancel any outstanding wakeup events for this thread |
| 94 | new_thread->CancelWakeupTimer(); | 355 | new_thread->CancelWakeupTimer(); |
| 95 | |||
| 96 | current_thread = new_thread; | 356 | current_thread = new_thread; |
| 97 | |||
| 98 | ready_queue.remove(new_thread, new_thread->GetPriority()); | ||
| 99 | new_thread->SetStatus(ThreadStatus::Running); | 357 | new_thread->SetStatus(ThreadStatus::Running); |
| 358 | new_thread->SetIsRunning(true); | ||
| 100 | 359 | ||
| 101 | auto* const thread_owner_process = current_thread->GetOwnerProcess(); | 360 | auto* const thread_owner_process = current_thread->GetOwnerProcess(); |
| 102 | if (previous_process != thread_owner_process) { | 361 | if (previous_process != thread_owner_process) { |
| @@ -116,7 +375,7 @@ void Scheduler::SwitchContext(Thread* new_thread) { | |||
| 116 | 375 | ||
| 117 | void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { | 376 | void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { |
| 118 | const u64 prev_switch_ticks = last_context_switch_time; | 377 | const u64 prev_switch_ticks = last_context_switch_time; |
| 119 | const u64 most_recent_switch_ticks = system.CoreTiming().GetTicks(); | 378 | const u64 most_recent_switch_ticks = Core::System::GetInstance().CoreTiming().GetTicks(); |
| 120 | const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; | 379 | const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; |
| 121 | 380 | ||
| 122 | if (thread != nullptr) { | 381 | if (thread != nullptr) { |
| @@ -130,124 +389,4 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { | |||
| 130 | last_context_switch_time = most_recent_switch_ticks; | 389 | last_context_switch_time = most_recent_switch_ticks; |
| 131 | } | 390 | } |
| 132 | 391 | ||
| 133 | void Scheduler::Reschedule() { | ||
| 134 | std::lock_guard lock{scheduler_mutex}; | ||
| 135 | |||
| 136 | Thread* cur = GetCurrentThread(); | ||
| 137 | Thread* next = PopNextReadyThread(); | ||
| 138 | |||
| 139 | if (cur && next) { | ||
| 140 | LOG_TRACE(Kernel, "context switch {} -> {}", cur->GetObjectId(), next->GetObjectId()); | ||
| 141 | } else if (cur) { | ||
| 142 | LOG_TRACE(Kernel, "context switch {} -> idle", cur->GetObjectId()); | ||
| 143 | } else if (next) { | ||
| 144 | LOG_TRACE(Kernel, "context switch idle -> {}", next->GetObjectId()); | ||
| 145 | } | ||
| 146 | |||
| 147 | SwitchContext(next); | ||
| 148 | } | ||
| 149 | |||
| 150 | void Scheduler::AddThread(SharedPtr<Thread> thread) { | ||
| 151 | std::lock_guard lock{scheduler_mutex}; | ||
| 152 | |||
| 153 | thread_list.push_back(std::move(thread)); | ||
| 154 | } | ||
| 155 | |||
| 156 | void Scheduler::RemoveThread(Thread* thread) { | ||
| 157 | std::lock_guard lock{scheduler_mutex}; | ||
| 158 | |||
| 159 | thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), | ||
| 160 | thread_list.end()); | ||
| 161 | } | ||
| 162 | |||
| 163 | void Scheduler::ScheduleThread(Thread* thread, u32 priority) { | ||
| 164 | std::lock_guard lock{scheduler_mutex}; | ||
| 165 | |||
| 166 | ASSERT(thread->GetStatus() == ThreadStatus::Ready); | ||
| 167 | ready_queue.add(thread, priority); | ||
| 168 | } | ||
| 169 | |||
| 170 | void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { | ||
| 171 | std::lock_guard lock{scheduler_mutex}; | ||
| 172 | |||
| 173 | ASSERT(thread->GetStatus() == ThreadStatus::Ready); | ||
| 174 | ready_queue.remove(thread, priority); | ||
| 175 | } | ||
| 176 | |||
| 177 | void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { | ||
| 178 | std::lock_guard lock{scheduler_mutex}; | ||
| 179 | if (thread->GetPriority() == priority) { | ||
| 180 | return; | ||
| 181 | } | ||
| 182 | |||
| 183 | // If thread was ready, adjust queues | ||
| 184 | if (thread->GetStatus() == ThreadStatus::Ready) | ||
| 185 | ready_queue.adjust(thread, thread->GetPriority(), priority); | ||
| 186 | } | ||
| 187 | |||
| 188 | Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const { | ||
| 189 | std::lock_guard lock{scheduler_mutex}; | ||
| 190 | |||
| 191 | const u32 mask = 1U << core; | ||
| 192 | for (auto* thread : ready_queue) { | ||
| 193 | if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) { | ||
| 194 | return thread; | ||
| 195 | } | ||
| 196 | } | ||
| 197 | return nullptr; | ||
| 198 | } | ||
| 199 | |||
| 200 | void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { | ||
| 201 | ASSERT(thread != nullptr); | ||
| 202 | // Avoid yielding if the thread isn't even running. | ||
| 203 | ASSERT(thread->GetStatus() == ThreadStatus::Running); | ||
| 204 | |||
| 205 | // Sanity check that the priority is valid | ||
| 206 | ASSERT(thread->GetPriority() < THREADPRIO_COUNT); | ||
| 207 | |||
| 208 | // Yield this thread -- sleep for zero time and force reschedule to different thread | ||
| 209 | GetCurrentThread()->Sleep(0); | ||
| 210 | } | ||
| 211 | |||
| 212 | void Scheduler::YieldWithLoadBalancing(Thread* thread) { | ||
| 213 | ASSERT(thread != nullptr); | ||
| 214 | const auto priority = thread->GetPriority(); | ||
| 215 | const auto core = static_cast<u32>(thread->GetProcessorID()); | ||
| 216 | |||
| 217 | // Avoid yielding if the thread isn't even running. | ||
| 218 | ASSERT(thread->GetStatus() == ThreadStatus::Running); | ||
| 219 | |||
| 220 | // Sanity check that the priority is valid | ||
| 221 | ASSERT(priority < THREADPRIO_COUNT); | ||
| 222 | |||
| 223 | // Sleep for zero time to be able to force reschedule to different thread | ||
| 224 | GetCurrentThread()->Sleep(0); | ||
| 225 | |||
| 226 | Thread* suggested_thread = nullptr; | ||
| 227 | |||
| 228 | // Search through all of the cpu cores (except this one) for a suggested thread. | ||
| 229 | // Take the first non-nullptr one | ||
| 230 | for (unsigned cur_core = 0; cur_core < Core::NUM_CPU_CORES; ++cur_core) { | ||
| 231 | const auto res = | ||
| 232 | system.CpuCore(cur_core).Scheduler().GetNextSuggestedThread(core, priority); | ||
| 233 | |||
| 234 | // If scheduler provides a suggested thread | ||
| 235 | if (res != nullptr) { | ||
| 236 | // And its better than the current suggested thread (or is the first valid one) | ||
| 237 | if (suggested_thread == nullptr || | ||
| 238 | suggested_thread->GetPriority() > res->GetPriority()) { | ||
| 239 | suggested_thread = res; | ||
| 240 | } | ||
| 241 | } | ||
| 242 | } | ||
| 243 | |||
| 244 | // If a suggested thread was found, queue that for this core | ||
| 245 | if (suggested_thread != nullptr) | ||
| 246 | suggested_thread->ChangeCore(core, suggested_thread->GetAffinityMask()); | ||
| 247 | } | ||
| 248 | |||
| 249 | void Scheduler::YieldAndWaitForLoadBalancing(Thread* thread) { | ||
| 250 | UNIMPLEMENTED_MSG("Wait for load balancing thread yield type is not implemented!"); | ||
| 251 | } | ||
| 252 | |||
| 253 | } // namespace Kernel | 392 | } // namespace Kernel |