diff options
| author | 2019-10-28 10:53:27 +1100 | |
|---|---|---|
| committer | 2019-10-28 10:53:27 +1100 | |
| commit | 4c5731c34f0915457a31c60c9f70a2f169ea575d (patch) | |
| tree | 7f03a7f892370b59e56ae06c6c74514f1cc44998 /src/core/hle/kernel/scheduler.cpp | |
| parent | Merge pull request #3034 from ReinUsesLisp/w4244-maxwell3d (diff) | |
| parent | Kernel Thread: Cleanup THREADPROCESSORID_DONT_UPDATE. (diff) | |
| download | yuzu-4c5731c34f0915457a31c60c9f70a2f169ea575d.tar.gz yuzu-4c5731c34f0915457a31c60c9f70a2f169ea575d.tar.xz yuzu-4c5731c34f0915457a31c60c9f70a2f169ea575d.zip | |
Merge pull request #2971 from FernandoS27/new-scheduler-v2
Kernel: Implement a New Thread Scheduler V2
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
| -rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 570 |
1 files changed, 419 insertions, 151 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index e8447b69a..e6dcb9639 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -1,8 +1,13 @@ | |||
| 1 | // Copyright 2018 yuzu emulator team | 1 | // Copyright 2018 yuzu emulator team |
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | // | ||
| 5 | // SelectThreads, Yield functions originally by TuxSH. | ||
| 6 | // licensed under GPLv2 or later under exception provided by the author. | ||
| 4 | 7 | ||
| 5 | #include <algorithm> | 8 | #include <algorithm> |
| 9 | #include <set> | ||
| 10 | #include <unordered_set> | ||
| 6 | #include <utility> | 11 | #include <utility> |
| 7 | 12 | ||
| 8 | #include "common/assert.h" | 13 | #include "common/assert.h" |
| @@ -17,56 +22,434 @@ | |||
| 17 | 22 | ||
| 18 | namespace Kernel { | 23 | namespace Kernel { |
| 19 | 24 | ||
| 20 | std::mutex Scheduler::scheduler_mutex; | 25 | GlobalScheduler::GlobalScheduler(Core::System& system) : system{system} { |
| 26 | is_reselection_pending = false; | ||
| 27 | } | ||
| 28 | |||
| 29 | void GlobalScheduler::AddThread(SharedPtr<Thread> thread) { | ||
| 30 | thread_list.push_back(std::move(thread)); | ||
| 31 | } | ||
| 32 | |||
| 33 | void GlobalScheduler::RemoveThread(const Thread* thread) { | ||
| 34 | thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), | ||
| 35 | thread_list.end()); | ||
| 36 | } | ||
| 37 | |||
| 38 | /* | ||
| 39 | * UnloadThread selects a core and forces it to unload its current thread's context | ||
| 40 | */ | ||
| 41 | void GlobalScheduler::UnloadThread(s32 core) { | ||
| 42 | Scheduler& sched = system.Scheduler(core); | ||
| 43 | sched.UnloadThread(); | ||
| 44 | } | ||
| 45 | |||
| 46 | /* | ||
| 47 | * SelectThread takes care of selecting the new scheduled thread. | ||
| 48 | * It does it in 3 steps: | ||
| 49 | * - First a thread is selected from the top of the priority queue. If no thread | ||
| 50 | * is obtained then we move to step two, else we are done. | ||
| 51 | * - Second we try to get a suggested thread that's not assigned to any core or | ||
| 52 | * that is not the top thread in that core. | ||
| 53 | * - Third is no suggested thread is found, we do a second pass and pick a running | ||
| 54 | * thread in another core and swap it with its current thread. | ||
| 55 | */ | ||
| 56 | void GlobalScheduler::SelectThread(u32 core) { | ||
| 57 | const auto update_thread = [](Thread* thread, Scheduler& sched) { | ||
| 58 | if (thread != sched.selected_thread) { | ||
| 59 | if (thread == nullptr) { | ||
| 60 | ++sched.idle_selection_count; | ||
| 61 | } | ||
| 62 | sched.selected_thread = thread; | ||
| 63 | } | ||
| 64 | sched.is_context_switch_pending = sched.selected_thread != sched.current_thread; | ||
| 65 | std::atomic_thread_fence(std::memory_order_seq_cst); | ||
| 66 | }; | ||
| 67 | Scheduler& sched = system.Scheduler(core); | ||
| 68 | Thread* current_thread = nullptr; | ||
| 69 | // Step 1: Get top thread in schedule queue. | ||
| 70 | current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front(); | ||
| 71 | if (current_thread) { | ||
| 72 | update_thread(current_thread, sched); | ||
| 73 | return; | ||
| 74 | } | ||
| 75 | // Step 2: Try selecting a suggested thread. | ||
| 76 | Thread* winner = nullptr; | ||
| 77 | std::set<s32> sug_cores; | ||
| 78 | for (auto thread : suggested_queue[core]) { | ||
| 79 | s32 this_core = thread->GetProcessorID(); | ||
| 80 | Thread* thread_on_core = nullptr; | ||
| 81 | if (this_core >= 0) { | ||
| 82 | thread_on_core = scheduled_queue[this_core].front(); | ||
| 83 | } | ||
| 84 | if (this_core < 0 || thread != thread_on_core) { | ||
| 85 | winner = thread; | ||
| 86 | break; | ||
| 87 | } | ||
| 88 | sug_cores.insert(this_core); | ||
| 89 | } | ||
| 90 | // if we got a suggested thread, select it, else do a second pass. | ||
| 91 | if (winner && winner->GetPriority() > 2) { | ||
| 92 | if (winner->IsRunning()) { | ||
| 93 | UnloadThread(winner->GetProcessorID()); | ||
| 94 | } | ||
| 95 | TransferToCore(winner->GetPriority(), core, winner); | ||
| 96 | update_thread(winner, sched); | ||
| 97 | return; | ||
| 98 | } | ||
| 99 | // Step 3: Select a suggested thread from another core | ||
| 100 | for (auto& src_core : sug_cores) { | ||
| 101 | auto it = scheduled_queue[src_core].begin(); | ||
| 102 | it++; | ||
| 103 | if (it != scheduled_queue[src_core].end()) { | ||
| 104 | Thread* thread_on_core = scheduled_queue[src_core].front(); | ||
| 105 | Thread* to_change = *it; | ||
| 106 | if (thread_on_core->IsRunning() || to_change->IsRunning()) { | ||
| 107 | UnloadThread(src_core); | ||
| 108 | } | ||
| 109 | TransferToCore(thread_on_core->GetPriority(), core, thread_on_core); | ||
| 110 | current_thread = thread_on_core; | ||
| 111 | break; | ||
| 112 | } | ||
| 113 | } | ||
| 114 | update_thread(current_thread, sched); | ||
| 115 | } | ||
| 116 | |||
| 117 | /* | ||
| 118 | * YieldThread takes a thread and moves it to the back of the it's priority list | ||
| 119 | * This operation can be redundant and no scheduling is changed if marked as so. | ||
| 120 | */ | ||
| 121 | bool GlobalScheduler::YieldThread(Thread* yielding_thread) { | ||
| 122 | // Note: caller should use critical section, etc. | ||
| 123 | const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); | ||
| 124 | const u32 priority = yielding_thread->GetPriority(); | ||
| 125 | |||
| 126 | // Yield the thread | ||
| 127 | ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority), | ||
| 128 | "Thread yielding without being in front"); | ||
| 129 | scheduled_queue[core_id].yield(priority); | ||
| 130 | |||
| 131 | Thread* winner = scheduled_queue[core_id].front(priority); | ||
| 132 | return AskForReselectionOrMarkRedundant(yielding_thread, winner); | ||
| 133 | } | ||
| 134 | |||
| 135 | /* | ||
| 136 | * YieldThreadAndBalanceLoad takes a thread and moves it to the back of the it's priority list. | ||
| 137 | * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or | ||
| 138 | * a better priority than the next thread in the core. | ||
| 139 | * This operation can be redundant and no scheduling is changed if marked as so. | ||
| 140 | */ | ||
| 141 | bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { | ||
| 142 | // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, | ||
| 143 | // etc. | ||
| 144 | const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); | ||
| 145 | const u32 priority = yielding_thread->GetPriority(); | ||
| 146 | |||
| 147 | // Yield the thread | ||
| 148 | ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority), | ||
| 149 | "Thread yielding without being in front"); | ||
| 150 | scheduled_queue[core_id].yield(priority); | ||
| 151 | |||
| 152 | std::array<Thread*, NUM_CPU_CORES> current_threads; | ||
| 153 | for (u32 i = 0; i < NUM_CPU_CORES; i++) { | ||
| 154 | current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); | ||
| 155 | } | ||
| 156 | |||
| 157 | Thread* next_thread = scheduled_queue[core_id].front(priority); | ||
| 158 | Thread* winner = nullptr; | ||
| 159 | for (auto& thread : suggested_queue[core_id]) { | ||
| 160 | const s32 source_core = thread->GetProcessorID(); | ||
| 161 | if (source_core >= 0) { | ||
| 162 | if (current_threads[source_core] != nullptr) { | ||
| 163 | if (thread == current_threads[source_core] || | ||
| 164 | current_threads[source_core]->GetPriority() < min_regular_priority) { | ||
| 165 | continue; | ||
| 166 | } | ||
| 167 | } | ||
| 168 | } | ||
| 169 | if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() || | ||
| 170 | next_thread->GetPriority() < thread->GetPriority()) { | ||
| 171 | if (thread->GetPriority() <= priority) { | ||
| 172 | winner = thread; | ||
| 173 | break; | ||
| 174 | } | ||
| 175 | } | ||
| 176 | } | ||
| 177 | |||
| 178 | if (winner != nullptr) { | ||
| 179 | if (winner != yielding_thread) { | ||
| 180 | if (winner->IsRunning()) { | ||
| 181 | UnloadThread(winner->GetProcessorID()); | ||
| 182 | } | ||
| 183 | TransferToCore(winner->GetPriority(), core_id, winner); | ||
| 184 | } | ||
| 185 | } else { | ||
| 186 | winner = next_thread; | ||
| 187 | } | ||
| 188 | |||
| 189 | return AskForReselectionOrMarkRedundant(yielding_thread, winner); | ||
| 190 | } | ||
| 191 | |||
| 192 | /* | ||
| 193 | * YieldThreadAndWaitForLoadBalancing takes a thread and moves it out of the scheduling queue | ||
| 194 | * and into the suggested queue. If no thread can be squeduled afterwards in that core, | ||
| 195 | * a suggested thread is obtained instead. | ||
| 196 | * This operation can be redundant and no scheduling is changed if marked as so. | ||
| 197 | */ | ||
| 198 | bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) { | ||
| 199 | // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, | ||
| 200 | // etc. | ||
| 201 | Thread* winner = nullptr; | ||
| 202 | const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); | ||
| 203 | |||
| 204 | // Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead | ||
| 205 | TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread); | ||
| 206 | |||
| 207 | // If the core is idle, perform load balancing, excluding the threads that have just used this | ||
| 208 | // function... | ||
| 209 | if (scheduled_queue[core_id].empty()) { | ||
| 210 | // Here, "current_threads" is calculated after the ""yield"", unlike yield -1 | ||
| 211 | std::array<Thread*, NUM_CPU_CORES> current_threads; | ||
| 212 | for (u32 i = 0; i < NUM_CPU_CORES; i++) { | ||
| 213 | current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); | ||
| 214 | } | ||
| 215 | for (auto& thread : suggested_queue[core_id]) { | ||
| 216 | const s32 source_core = thread->GetProcessorID(); | ||
| 217 | if (source_core < 0 || thread == current_threads[source_core]) { | ||
| 218 | continue; | ||
| 219 | } | ||
| 220 | if (current_threads[source_core] == nullptr || | ||
| 221 | current_threads[source_core]->GetPriority() >= min_regular_priority) { | ||
| 222 | winner = thread; | ||
| 223 | } | ||
| 224 | break; | ||
| 225 | } | ||
| 226 | if (winner != nullptr) { | ||
| 227 | if (winner != yielding_thread) { | ||
| 228 | if (winner->IsRunning()) { | ||
| 229 | UnloadThread(winner->GetProcessorID()); | ||
| 230 | } | ||
| 231 | TransferToCore(winner->GetPriority(), core_id, winner); | ||
| 232 | } | ||
| 233 | } else { | ||
| 234 | winner = yielding_thread; | ||
| 235 | } | ||
| 236 | } | ||
| 237 | |||
| 238 | return AskForReselectionOrMarkRedundant(yielding_thread, winner); | ||
| 239 | } | ||
| 240 | |||
| 241 | void GlobalScheduler::PreemptThreads() { | ||
| 242 | for (std::size_t core_id = 0; core_id < NUM_CPU_CORES; core_id++) { | ||
| 243 | const u32 priority = preemption_priorities[core_id]; | ||
| 244 | |||
| 245 | if (scheduled_queue[core_id].size(priority) > 0) { | ||
| 246 | scheduled_queue[core_id].front(priority)->IncrementYieldCount(); | ||
| 247 | scheduled_queue[core_id].yield(priority); | ||
| 248 | if (scheduled_queue[core_id].size(priority) > 1) { | ||
| 249 | scheduled_queue[core_id].front(priority)->IncrementYieldCount(); | ||
| 250 | } | ||
| 251 | } | ||
| 252 | |||
| 253 | Thread* current_thread = | ||
| 254 | scheduled_queue[core_id].empty() ? nullptr : scheduled_queue[core_id].front(); | ||
| 255 | Thread* winner = nullptr; | ||
| 256 | for (auto& thread : suggested_queue[core_id]) { | ||
| 257 | const s32 source_core = thread->GetProcessorID(); | ||
| 258 | if (thread->GetPriority() != priority) { | ||
| 259 | continue; | ||
| 260 | } | ||
| 261 | if (source_core >= 0) { | ||
| 262 | Thread* next_thread = scheduled_queue[source_core].empty() | ||
| 263 | ? nullptr | ||
| 264 | : scheduled_queue[source_core].front(); | ||
| 265 | if (next_thread != nullptr && next_thread->GetPriority() < 2) { | ||
| 266 | break; | ||
| 267 | } | ||
| 268 | if (next_thread == thread) { | ||
| 269 | continue; | ||
| 270 | } | ||
| 271 | } | ||
| 272 | if (current_thread != nullptr && | ||
| 273 | current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { | ||
| 274 | winner = thread; | ||
| 275 | break; | ||
| 276 | } | ||
| 277 | } | ||
| 278 | |||
| 279 | if (winner != nullptr) { | ||
| 280 | if (winner->IsRunning()) { | ||
| 281 | UnloadThread(winner->GetProcessorID()); | ||
| 282 | } | ||
| 283 | TransferToCore(winner->GetPriority(), core_id, winner); | ||
| 284 | current_thread = | ||
| 285 | winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread; | ||
| 286 | } | ||
| 287 | |||
| 288 | if (current_thread != nullptr && current_thread->GetPriority() > priority) { | ||
| 289 | for (auto& thread : suggested_queue[core_id]) { | ||
| 290 | const s32 source_core = thread->GetProcessorID(); | ||
| 291 | if (thread->GetPriority() < priority) { | ||
| 292 | continue; | ||
| 293 | } | ||
| 294 | if (source_core >= 0) { | ||
| 295 | Thread* next_thread = scheduled_queue[source_core].empty() | ||
| 296 | ? nullptr | ||
| 297 | : scheduled_queue[source_core].front(); | ||
| 298 | if (next_thread != nullptr && next_thread->GetPriority() < 2) { | ||
| 299 | break; | ||
| 300 | } | ||
| 301 | if (next_thread == thread) { | ||
| 302 | continue; | ||
| 303 | } | ||
| 304 | } | ||
| 305 | if (current_thread != nullptr && | ||
| 306 | current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { | ||
| 307 | winner = thread; | ||
| 308 | break; | ||
| 309 | } | ||
| 310 | } | ||
| 311 | |||
| 312 | if (winner != nullptr) { | ||
| 313 | if (winner->IsRunning()) { | ||
| 314 | UnloadThread(winner->GetProcessorID()); | ||
| 315 | } | ||
| 316 | TransferToCore(winner->GetPriority(), core_id, winner); | ||
| 317 | current_thread = winner; | ||
| 318 | } | ||
| 319 | } | ||
| 320 | |||
| 321 | is_reselection_pending.store(true, std::memory_order_release); | ||
| 322 | } | ||
| 323 | } | ||
| 324 | |||
| 325 | void GlobalScheduler::Suggest(u32 priority, u32 core, Thread* thread) { | ||
| 326 | suggested_queue[core].add(thread, priority); | ||
| 327 | } | ||
| 328 | |||
| 329 | void GlobalScheduler::Unsuggest(u32 priority, u32 core, Thread* thread) { | ||
| 330 | suggested_queue[core].remove(thread, priority); | ||
| 331 | } | ||
| 332 | |||
| 333 | void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) { | ||
| 334 | ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core."); | ||
| 335 | scheduled_queue[core].add(thread, priority); | ||
| 336 | } | ||
| 337 | |||
| 338 | void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) { | ||
| 339 | ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core."); | ||
| 340 | scheduled_queue[core].add(thread, priority, false); | ||
| 341 | } | ||
| 342 | |||
| 343 | void GlobalScheduler::Reschedule(u32 priority, u32 core, Thread* thread) { | ||
| 344 | scheduled_queue[core].remove(thread, priority); | ||
| 345 | scheduled_queue[core].add(thread, priority); | ||
| 346 | } | ||
| 347 | |||
| 348 | void GlobalScheduler::Unschedule(u32 priority, u32 core, Thread* thread) { | ||
| 349 | scheduled_queue[core].remove(thread, priority); | ||
| 350 | } | ||
| 351 | |||
| 352 | void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) { | ||
| 353 | const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT; | ||
| 354 | const s32 source_core = thread->GetProcessorID(); | ||
| 355 | if (source_core == destination_core || !schedulable) { | ||
| 356 | return; | ||
| 357 | } | ||
| 358 | thread->SetProcessorID(destination_core); | ||
| 359 | if (source_core >= 0) { | ||
| 360 | Unschedule(priority, source_core, thread); | ||
| 361 | } | ||
| 362 | if (destination_core >= 0) { | ||
| 363 | Unsuggest(priority, destination_core, thread); | ||
| 364 | Schedule(priority, destination_core, thread); | ||
| 365 | } | ||
| 366 | if (source_core >= 0) { | ||
| 367 | Suggest(priority, source_core, thread); | ||
| 368 | } | ||
| 369 | } | ||
| 21 | 370 | ||
| 22 | Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core) | 371 | bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) { |
| 23 | : cpu_core{cpu_core}, system{system} {} | 372 | if (current_thread == winner) { |
| 373 | current_thread->IncrementYieldCount(); | ||
| 374 | return true; | ||
| 375 | } else { | ||
| 376 | is_reselection_pending.store(true, std::memory_order_release); | ||
| 377 | return false; | ||
| 378 | } | ||
| 379 | } | ||
| 24 | 380 | ||
| 25 | Scheduler::~Scheduler() { | 381 | void GlobalScheduler::Shutdown() { |
| 26 | for (auto& thread : thread_list) { | 382 | for (std::size_t core = 0; core < NUM_CPU_CORES; core++) { |
| 27 | thread->Stop(); | 383 | scheduled_queue[core].clear(); |
| 384 | suggested_queue[core].clear(); | ||
| 28 | } | 385 | } |
| 386 | thread_list.clear(); | ||
| 29 | } | 387 | } |
| 30 | 388 | ||
| 389 | GlobalScheduler::~GlobalScheduler() = default; | ||
| 390 | |||
| 391 | Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 core_id) | ||
| 392 | : system(system), cpu_core(cpu_core), core_id(core_id) {} | ||
| 393 | |||
| 394 | Scheduler::~Scheduler() = default; | ||
| 395 | |||
| 31 | bool Scheduler::HaveReadyThreads() const { | 396 | bool Scheduler::HaveReadyThreads() const { |
| 32 | std::lock_guard lock{scheduler_mutex}; | 397 | return system.GlobalScheduler().HaveReadyThreads(core_id); |
| 33 | return !ready_queue.empty(); | ||
| 34 | } | 398 | } |
| 35 | 399 | ||
| 36 | Thread* Scheduler::GetCurrentThread() const { | 400 | Thread* Scheduler::GetCurrentThread() const { |
| 37 | return current_thread.get(); | 401 | return current_thread.get(); |
| 38 | } | 402 | } |
| 39 | 403 | ||
| 404 | Thread* Scheduler::GetSelectedThread() const { | ||
| 405 | return selected_thread.get(); | ||
| 406 | } | ||
| 407 | |||
| 408 | void Scheduler::SelectThreads() { | ||
| 409 | system.GlobalScheduler().SelectThread(core_id); | ||
| 410 | } | ||
| 411 | |||
| 40 | u64 Scheduler::GetLastContextSwitchTicks() const { | 412 | u64 Scheduler::GetLastContextSwitchTicks() const { |
| 41 | return last_context_switch_time; | 413 | return last_context_switch_time; |
| 42 | } | 414 | } |
| 43 | 415 | ||
| 44 | Thread* Scheduler::PopNextReadyThread() { | 416 | void Scheduler::TryDoContextSwitch() { |
| 45 | Thread* next = nullptr; | 417 | if (is_context_switch_pending) { |
| 46 | Thread* thread = GetCurrentThread(); | 418 | SwitchContext(); |
| 419 | } | ||
| 420 | } | ||
| 47 | 421 | ||
| 48 | if (thread && thread->GetStatus() == ThreadStatus::Running) { | 422 | void Scheduler::UnloadThread() { |
| 49 | if (ready_queue.empty()) { | 423 | Thread* const previous_thread = GetCurrentThread(); |
| 50 | return thread; | 424 | Process* const previous_process = system.Kernel().CurrentProcess(); |
| 51 | } | 425 | |
| 52 | // We have to do better than the current thread. | 426 | UpdateLastContextSwitchTime(previous_thread, previous_process); |
| 53 | // This call returns null when that's not possible. | 427 | |
| 54 | next = ready_queue.front(); | 428 | // Save context for previous thread |
| 55 | if (next == nullptr || next->GetPriority() >= thread->GetPriority()) { | 429 | if (previous_thread) { |
| 56 | next = thread; | 430 | cpu_core.SaveContext(previous_thread->GetContext()); |
| 57 | } | 431 | // Save the TPIDR_EL0 system register in case it was modified. |
| 58 | } else { | 432 | previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); |
| 59 | if (ready_queue.empty()) { | 433 | |
| 60 | return nullptr; | 434 | if (previous_thread->GetStatus() == ThreadStatus::Running) { |
| 435 | // This is only the case when a reschedule is triggered without the current thread | ||
| 436 | // yielding execution (i.e. an event triggered, system core time-sliced, etc) | ||
| 437 | previous_thread->SetStatus(ThreadStatus::Ready); | ||
| 61 | } | 438 | } |
| 62 | next = ready_queue.front(); | 439 | previous_thread->SetIsRunning(false); |
| 63 | } | 440 | } |
| 64 | 441 | current_thread = nullptr; | |
| 65 | return next; | ||
| 66 | } | 442 | } |
| 67 | 443 | ||
| 68 | void Scheduler::SwitchContext(Thread* new_thread) { | 444 | void Scheduler::SwitchContext() { |
| 69 | Thread* previous_thread = GetCurrentThread(); | 445 | Thread* const previous_thread = GetCurrentThread(); |
| 446 | Thread* const new_thread = GetSelectedThread(); | ||
| 447 | |||
| 448 | is_context_switch_pending = false; | ||
| 449 | if (new_thread == previous_thread) { | ||
| 450 | return; | ||
| 451 | } | ||
| 452 | |||
| 70 | Process* const previous_process = system.Kernel().CurrentProcess(); | 453 | Process* const previous_process = system.Kernel().CurrentProcess(); |
| 71 | 454 | ||
| 72 | UpdateLastContextSwitchTime(previous_thread, previous_process); | 455 | UpdateLastContextSwitchTime(previous_thread, previous_process); |
| @@ -80,23 +463,23 @@ void Scheduler::SwitchContext(Thread* new_thread) { | |||
| 80 | if (previous_thread->GetStatus() == ThreadStatus::Running) { | 463 | if (previous_thread->GetStatus() == ThreadStatus::Running) { |
| 81 | // This is only the case when a reschedule is triggered without the current thread | 464 | // This is only the case when a reschedule is triggered without the current thread |
| 82 | // yielding execution (i.e. an event triggered, system core time-sliced, etc) | 465 | // yielding execution (i.e. an event triggered, system core time-sliced, etc) |
| 83 | ready_queue.add(previous_thread, previous_thread->GetPriority(), false); | ||
| 84 | previous_thread->SetStatus(ThreadStatus::Ready); | 466 | previous_thread->SetStatus(ThreadStatus::Ready); |
| 85 | } | 467 | } |
| 468 | previous_thread->SetIsRunning(false); | ||
| 86 | } | 469 | } |
| 87 | 470 | ||
| 88 | // Load context of new thread | 471 | // Load context of new thread |
| 89 | if (new_thread) { | 472 | if (new_thread) { |
| 473 | ASSERT_MSG(new_thread->GetProcessorID() == this->core_id, | ||
| 474 | "Thread must be assigned to this core."); | ||
| 90 | ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready, | 475 | ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready, |
| 91 | "Thread must be ready to become running."); | 476 | "Thread must be ready to become running."); |
| 92 | 477 | ||
| 93 | // Cancel any outstanding wakeup events for this thread | 478 | // Cancel any outstanding wakeup events for this thread |
| 94 | new_thread->CancelWakeupTimer(); | 479 | new_thread->CancelWakeupTimer(); |
| 95 | |||
| 96 | current_thread = new_thread; | 480 | current_thread = new_thread; |
| 97 | |||
| 98 | ready_queue.remove(new_thread, new_thread->GetPriority()); | ||
| 99 | new_thread->SetStatus(ThreadStatus::Running); | 481 | new_thread->SetStatus(ThreadStatus::Running); |
| 482 | new_thread->SetIsRunning(true); | ||
| 100 | 483 | ||
| 101 | auto* const thread_owner_process = current_thread->GetOwnerProcess(); | 484 | auto* const thread_owner_process = current_thread->GetOwnerProcess(); |
| 102 | if (previous_process != thread_owner_process) { | 485 | if (previous_process != thread_owner_process) { |
| @@ -130,124 +513,9 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { | |||
| 130 | last_context_switch_time = most_recent_switch_ticks; | 513 | last_context_switch_time = most_recent_switch_ticks; |
| 131 | } | 514 | } |
| 132 | 515 | ||
| 133 | void Scheduler::Reschedule() { | 516 | void Scheduler::Shutdown() { |
| 134 | std::lock_guard lock{scheduler_mutex}; | 517 | current_thread = nullptr; |
| 135 | 518 | selected_thread = nullptr; | |
| 136 | Thread* cur = GetCurrentThread(); | ||
| 137 | Thread* next = PopNextReadyThread(); | ||
| 138 | |||
| 139 | if (cur && next) { | ||
| 140 | LOG_TRACE(Kernel, "context switch {} -> {}", cur->GetObjectId(), next->GetObjectId()); | ||
| 141 | } else if (cur) { | ||
| 142 | LOG_TRACE(Kernel, "context switch {} -> idle", cur->GetObjectId()); | ||
| 143 | } else if (next) { | ||
| 144 | LOG_TRACE(Kernel, "context switch idle -> {}", next->GetObjectId()); | ||
| 145 | } | ||
| 146 | |||
| 147 | SwitchContext(next); | ||
| 148 | } | ||
| 149 | |||
| 150 | void Scheduler::AddThread(SharedPtr<Thread> thread) { | ||
| 151 | std::lock_guard lock{scheduler_mutex}; | ||
| 152 | |||
| 153 | thread_list.push_back(std::move(thread)); | ||
| 154 | } | ||
| 155 | |||
| 156 | void Scheduler::RemoveThread(Thread* thread) { | ||
| 157 | std::lock_guard lock{scheduler_mutex}; | ||
| 158 | |||
| 159 | thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), | ||
| 160 | thread_list.end()); | ||
| 161 | } | ||
| 162 | |||
| 163 | void Scheduler::ScheduleThread(Thread* thread, u32 priority) { | ||
| 164 | std::lock_guard lock{scheduler_mutex}; | ||
| 165 | |||
| 166 | ASSERT(thread->GetStatus() == ThreadStatus::Ready); | ||
| 167 | ready_queue.add(thread, priority); | ||
| 168 | } | ||
| 169 | |||
| 170 | void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { | ||
| 171 | std::lock_guard lock{scheduler_mutex}; | ||
| 172 | |||
| 173 | ASSERT(thread->GetStatus() == ThreadStatus::Ready); | ||
| 174 | ready_queue.remove(thread, priority); | ||
| 175 | } | ||
| 176 | |||
| 177 | void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { | ||
| 178 | std::lock_guard lock{scheduler_mutex}; | ||
| 179 | if (thread->GetPriority() == priority) { | ||
| 180 | return; | ||
| 181 | } | ||
| 182 | |||
| 183 | // If thread was ready, adjust queues | ||
| 184 | if (thread->GetStatus() == ThreadStatus::Ready) | ||
| 185 | ready_queue.adjust(thread, thread->GetPriority(), priority); | ||
| 186 | } | ||
| 187 | |||
| 188 | Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const { | ||
| 189 | std::lock_guard lock{scheduler_mutex}; | ||
| 190 | |||
| 191 | const u32 mask = 1U << core; | ||
| 192 | for (auto* thread : ready_queue) { | ||
| 193 | if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) { | ||
| 194 | return thread; | ||
| 195 | } | ||
| 196 | } | ||
| 197 | return nullptr; | ||
| 198 | } | ||
| 199 | |||
| 200 | void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { | ||
| 201 | ASSERT(thread != nullptr); | ||
| 202 | // Avoid yielding if the thread isn't even running. | ||
| 203 | ASSERT(thread->GetStatus() == ThreadStatus::Running); | ||
| 204 | |||
| 205 | // Sanity check that the priority is valid | ||
| 206 | ASSERT(thread->GetPriority() < THREADPRIO_COUNT); | ||
| 207 | |||
| 208 | // Yield this thread -- sleep for zero time and force reschedule to different thread | ||
| 209 | GetCurrentThread()->Sleep(0); | ||
| 210 | } | ||
| 211 | |||
| 212 | void Scheduler::YieldWithLoadBalancing(Thread* thread) { | ||
| 213 | ASSERT(thread != nullptr); | ||
| 214 | const auto priority = thread->GetPriority(); | ||
| 215 | const auto core = static_cast<u32>(thread->GetProcessorID()); | ||
| 216 | |||
| 217 | // Avoid yielding if the thread isn't even running. | ||
| 218 | ASSERT(thread->GetStatus() == ThreadStatus::Running); | ||
| 219 | |||
| 220 | // Sanity check that the priority is valid | ||
| 221 | ASSERT(priority < THREADPRIO_COUNT); | ||
| 222 | |||
| 223 | // Sleep for zero time to be able to force reschedule to different thread | ||
| 224 | GetCurrentThread()->Sleep(0); | ||
| 225 | |||
| 226 | Thread* suggested_thread = nullptr; | ||
| 227 | |||
| 228 | // Search through all of the cpu cores (except this one) for a suggested thread. | ||
| 229 | // Take the first non-nullptr one | ||
| 230 | for (unsigned cur_core = 0; cur_core < Core::NUM_CPU_CORES; ++cur_core) { | ||
| 231 | const auto res = | ||
| 232 | system.CpuCore(cur_core).Scheduler().GetNextSuggestedThread(core, priority); | ||
| 233 | |||
| 234 | // If scheduler provides a suggested thread | ||
| 235 | if (res != nullptr) { | ||
| 236 | // And its better than the current suggested thread (or is the first valid one) | ||
| 237 | if (suggested_thread == nullptr || | ||
| 238 | suggested_thread->GetPriority() > res->GetPriority()) { | ||
| 239 | suggested_thread = res; | ||
| 240 | } | ||
| 241 | } | ||
| 242 | } | ||
| 243 | |||
| 244 | // If a suggested thread was found, queue that for this core | ||
| 245 | if (suggested_thread != nullptr) | ||
| 246 | suggested_thread->ChangeCore(core, suggested_thread->GetAffinityMask()); | ||
| 247 | } | ||
| 248 | |||
| 249 | void Scheduler::YieldAndWaitForLoadBalancing(Thread* thread) { | ||
| 250 | UNIMPLEMENTED_MSG("Wait for load balancing thread yield type is not implemented!"); | ||
| 251 | } | 519 | } |
| 252 | 520 | ||
| 253 | } // namespace Kernel | 521 | } // namespace Kernel |