diff options
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
| -rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 819 |
1 files changed, 0 insertions, 819 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp deleted file mode 100644 index 5c63b0b4a..000000000 --- a/src/core/hle/kernel/scheduler.cpp +++ /dev/null | |||
| @@ -1,819 +0,0 @@ | |||
| 1 | // Copyright 2018 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | // | ||
| 5 | // SelectThreads, Yield functions originally by TuxSH. | ||
| 6 | // licensed under GPLv2 or later under exception provided by the author. | ||
| 7 | |||
| 8 | #include <algorithm> | ||
| 9 | #include <mutex> | ||
| 10 | #include <set> | ||
| 11 | #include <unordered_set> | ||
| 12 | #include <utility> | ||
| 13 | |||
| 14 | #include "common/assert.h" | ||
| 15 | #include "common/bit_util.h" | ||
| 16 | #include "common/fiber.h" | ||
| 17 | #include "common/logging/log.h" | ||
| 18 | #include "core/arm/arm_interface.h" | ||
| 19 | #include "core/core.h" | ||
| 20 | #include "core/core_timing.h" | ||
| 21 | #include "core/cpu_manager.h" | ||
| 22 | #include "core/hle/kernel/kernel.h" | ||
| 23 | #include "core/hle/kernel/physical_core.h" | ||
| 24 | #include "core/hle/kernel/process.h" | ||
| 25 | #include "core/hle/kernel/scheduler.h" | ||
| 26 | #include "core/hle/kernel/time_manager.h" | ||
| 27 | |||
| 28 | namespace Kernel { | ||
| 29 | |||
| 30 | GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {} | ||
| 31 | |||
| 32 | GlobalScheduler::~GlobalScheduler() = default; | ||
| 33 | |||
| 34 | void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) { | ||
| 35 | std::scoped_lock lock{global_list_guard}; | ||
| 36 | thread_list.push_back(std::move(thread)); | ||
| 37 | } | ||
| 38 | |||
| 39 | void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) { | ||
| 40 | std::scoped_lock lock{global_list_guard}; | ||
| 41 | thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), | ||
| 42 | thread_list.end()); | ||
| 43 | } | ||
| 44 | |||
| 45 | u32 GlobalScheduler::SelectThreads() { | ||
| 46 | ASSERT(is_locked); | ||
| 47 | const auto update_thread = [](Thread* thread, Scheduler& sched) { | ||
| 48 | std::scoped_lock lock{sched.guard}; | ||
| 49 | if (thread != sched.selected_thread_set.get()) { | ||
| 50 | if (thread == nullptr) { | ||
| 51 | ++sched.idle_selection_count; | ||
| 52 | } | ||
| 53 | sched.selected_thread_set = SharedFrom(thread); | ||
| 54 | } | ||
| 55 | const bool reschedule_pending = | ||
| 56 | sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread); | ||
| 57 | sched.is_context_switch_pending = reschedule_pending; | ||
| 58 | std::atomic_thread_fence(std::memory_order_seq_cst); | ||
| 59 | return reschedule_pending; | ||
| 60 | }; | ||
| 61 | if (!is_reselection_pending.load()) { | ||
| 62 | return 0; | ||
| 63 | } | ||
| 64 | std::array<Thread*, Core::Hardware::NUM_CPU_CORES> top_threads{}; | ||
| 65 | |||
| 66 | u32 idle_cores{}; | ||
| 67 | |||
| 68 | // Step 1: Get top thread in schedule queue. | ||
| 69 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 70 | Thread* top_thread = | ||
| 71 | scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front(); | ||
| 72 | if (top_thread != nullptr) { | ||
| 73 | // TODO(Blinkhawk): Implement Thread Pinning | ||
| 74 | } else { | ||
| 75 | idle_cores |= (1U << core); | ||
| 76 | } | ||
| 77 | top_threads[core] = top_thread; | ||
| 78 | } | ||
| 79 | |||
| 80 | while (idle_cores != 0) { | ||
| 81 | u32 core_id = Common::CountTrailingZeroes32(idle_cores); | ||
| 82 | |||
| 83 | if (!suggested_queue[core_id].empty()) { | ||
| 84 | std::array<s32, Core::Hardware::NUM_CPU_CORES> migration_candidates{}; | ||
| 85 | std::size_t num_candidates = 0; | ||
| 86 | auto iter = suggested_queue[core_id].begin(); | ||
| 87 | Thread* suggested = nullptr; | ||
| 88 | // Step 2: Try selecting a suggested thread. | ||
| 89 | while (iter != suggested_queue[core_id].end()) { | ||
| 90 | suggested = *iter; | ||
| 91 | iter++; | ||
| 92 | s32 suggested_core_id = suggested->GetProcessorID(); | ||
| 93 | Thread* top_thread = | ||
| 94 | suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr; | ||
| 95 | if (top_thread != suggested) { | ||
| 96 | if (top_thread != nullptr && | ||
| 97 | top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) { | ||
| 98 | suggested = nullptr; | ||
| 99 | break; | ||
| 100 | // There's a too high thread to do core migration, cancel | ||
| 101 | } | ||
| 102 | TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested); | ||
| 103 | break; | ||
| 104 | } | ||
| 105 | suggested = nullptr; | ||
| 106 | migration_candidates[num_candidates++] = suggested_core_id; | ||
| 107 | } | ||
| 108 | // Step 3: Select a suggested thread from another core | ||
| 109 | if (suggested == nullptr) { | ||
| 110 | for (std::size_t i = 0; i < num_candidates; i++) { | ||
| 111 | s32 candidate_core = migration_candidates[i]; | ||
| 112 | suggested = top_threads[candidate_core]; | ||
| 113 | auto it = scheduled_queue[candidate_core].begin(); | ||
| 114 | it++; | ||
| 115 | Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr; | ||
| 116 | if (next != nullptr) { | ||
| 117 | TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), | ||
| 118 | suggested); | ||
| 119 | top_threads[candidate_core] = next; | ||
| 120 | break; | ||
| 121 | } else { | ||
| 122 | suggested = nullptr; | ||
| 123 | } | ||
| 124 | } | ||
| 125 | } | ||
| 126 | top_threads[core_id] = suggested; | ||
| 127 | } | ||
| 128 | |||
| 129 | idle_cores &= ~(1U << core_id); | ||
| 130 | } | ||
| 131 | u32 cores_needing_context_switch{}; | ||
| 132 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 133 | Scheduler& sched = kernel.Scheduler(core); | ||
| 134 | ASSERT(top_threads[core] == nullptr || | ||
| 135 | static_cast<u32>(top_threads[core]->GetProcessorID()) == core); | ||
| 136 | if (update_thread(top_threads[core], sched)) { | ||
| 137 | cores_needing_context_switch |= (1U << core); | ||
| 138 | } | ||
| 139 | } | ||
| 140 | return cores_needing_context_switch; | ||
| 141 | } | ||
| 142 | |||
| 143 | bool GlobalScheduler::YieldThread(Thread* yielding_thread) { | ||
| 144 | ASSERT(is_locked); | ||
| 145 | // Note: caller should use critical section, etc. | ||
| 146 | if (!yielding_thread->IsRunnable()) { | ||
| 147 | // Normally this case shouldn't happen except for SetThreadActivity. | ||
| 148 | is_reselection_pending.store(true, std::memory_order_release); | ||
| 149 | return false; | ||
| 150 | } | ||
| 151 | const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); | ||
| 152 | const u32 priority = yielding_thread->GetPriority(); | ||
| 153 | |||
| 154 | // Yield the thread | ||
| 155 | Reschedule(priority, core_id, yielding_thread); | ||
| 156 | const Thread* const winner = scheduled_queue[core_id].front(); | ||
| 157 | if (kernel.GetCurrentHostThreadID() != core_id) { | ||
| 158 | is_reselection_pending.store(true, std::memory_order_release); | ||
| 159 | } | ||
| 160 | |||
| 161 | return AskForReselectionOrMarkRedundant(yielding_thread, winner); | ||
| 162 | } | ||
| 163 | |||
| 164 | bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { | ||
| 165 | ASSERT(is_locked); | ||
| 166 | // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, | ||
| 167 | // etc. | ||
| 168 | if (!yielding_thread->IsRunnable()) { | ||
| 169 | // Normally this case shouldn't happen except for SetThreadActivity. | ||
| 170 | is_reselection_pending.store(true, std::memory_order_release); | ||
| 171 | return false; | ||
| 172 | } | ||
| 173 | const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); | ||
| 174 | const u32 priority = yielding_thread->GetPriority(); | ||
| 175 | |||
| 176 | // Yield the thread | ||
| 177 | Reschedule(priority, core_id, yielding_thread); | ||
| 178 | |||
| 179 | std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads; | ||
| 180 | for (std::size_t i = 0; i < current_threads.size(); i++) { | ||
| 181 | current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); | ||
| 182 | } | ||
| 183 | |||
| 184 | Thread* next_thread = scheduled_queue[core_id].front(priority); | ||
| 185 | Thread* winner = nullptr; | ||
| 186 | for (auto& thread : suggested_queue[core_id]) { | ||
| 187 | const s32 source_core = thread->GetProcessorID(); | ||
| 188 | if (source_core >= 0) { | ||
| 189 | if (current_threads[source_core] != nullptr) { | ||
| 190 | if (thread == current_threads[source_core] || | ||
| 191 | current_threads[source_core]->GetPriority() < min_regular_priority) { | ||
| 192 | continue; | ||
| 193 | } | ||
| 194 | } | ||
| 195 | } | ||
| 196 | if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() || | ||
| 197 | next_thread->GetPriority() < thread->GetPriority()) { | ||
| 198 | if (thread->GetPriority() <= priority) { | ||
| 199 | winner = thread; | ||
| 200 | break; | ||
| 201 | } | ||
| 202 | } | ||
| 203 | } | ||
| 204 | |||
| 205 | if (winner != nullptr) { | ||
| 206 | if (winner != yielding_thread) { | ||
| 207 | TransferToCore(winner->GetPriority(), s32(core_id), winner); | ||
| 208 | } | ||
| 209 | } else { | ||
| 210 | winner = next_thread; | ||
| 211 | } | ||
| 212 | |||
| 213 | if (kernel.GetCurrentHostThreadID() != core_id) { | ||
| 214 | is_reselection_pending.store(true, std::memory_order_release); | ||
| 215 | } | ||
| 216 | |||
| 217 | return AskForReselectionOrMarkRedundant(yielding_thread, winner); | ||
| 218 | } | ||
| 219 | |||
| 220 | bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) { | ||
| 221 | ASSERT(is_locked); | ||
| 222 | // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, | ||
| 223 | // etc. | ||
| 224 | if (!yielding_thread->IsRunnable()) { | ||
| 225 | // Normally this case shouldn't happen except for SetThreadActivity. | ||
| 226 | is_reselection_pending.store(true, std::memory_order_release); | ||
| 227 | return false; | ||
| 228 | } | ||
| 229 | Thread* winner = nullptr; | ||
| 230 | const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); | ||
| 231 | |||
| 232 | // Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead | ||
| 233 | TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread); | ||
| 234 | |||
| 235 | // If the core is idle, perform load balancing, excluding the threads that have just used this | ||
| 236 | // function... | ||
| 237 | if (scheduled_queue[core_id].empty()) { | ||
| 238 | // Here, "current_threads" is calculated after the ""yield"", unlike yield -1 | ||
| 239 | std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads; | ||
| 240 | for (std::size_t i = 0; i < current_threads.size(); i++) { | ||
| 241 | current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); | ||
| 242 | } | ||
| 243 | for (auto& thread : suggested_queue[core_id]) { | ||
| 244 | const s32 source_core = thread->GetProcessorID(); | ||
| 245 | if (source_core < 0 || thread == current_threads[source_core]) { | ||
| 246 | continue; | ||
| 247 | } | ||
| 248 | if (current_threads[source_core] == nullptr || | ||
| 249 | current_threads[source_core]->GetPriority() >= min_regular_priority) { | ||
| 250 | winner = thread; | ||
| 251 | } | ||
| 252 | break; | ||
| 253 | } | ||
| 254 | if (winner != nullptr) { | ||
| 255 | if (winner != yielding_thread) { | ||
| 256 | TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner); | ||
| 257 | } | ||
| 258 | } else { | ||
| 259 | winner = yielding_thread; | ||
| 260 | } | ||
| 261 | } else { | ||
| 262 | winner = scheduled_queue[core_id].front(); | ||
| 263 | } | ||
| 264 | |||
| 265 | if (kernel.GetCurrentHostThreadID() != core_id) { | ||
| 266 | is_reselection_pending.store(true, std::memory_order_release); | ||
| 267 | } | ||
| 268 | |||
| 269 | return AskForReselectionOrMarkRedundant(yielding_thread, winner); | ||
| 270 | } | ||
| 271 | |||
| 272 | void GlobalScheduler::PreemptThreads() { | ||
| 273 | ASSERT(is_locked); | ||
| 274 | for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||
| 275 | const u32 priority = preemption_priorities[core_id]; | ||
| 276 | |||
| 277 | if (scheduled_queue[core_id].size(priority) > 0) { | ||
| 278 | if (scheduled_queue[core_id].size(priority) > 1) { | ||
| 279 | scheduled_queue[core_id].front(priority)->IncrementYieldCount(); | ||
| 280 | } | ||
| 281 | scheduled_queue[core_id].yield(priority); | ||
| 282 | if (scheduled_queue[core_id].size(priority) > 1) { | ||
| 283 | scheduled_queue[core_id].front(priority)->IncrementYieldCount(); | ||
| 284 | } | ||
| 285 | } | ||
| 286 | |||
| 287 | Thread* current_thread = | ||
| 288 | scheduled_queue[core_id].empty() ? nullptr : scheduled_queue[core_id].front(); | ||
| 289 | Thread* winner = nullptr; | ||
| 290 | for (auto& thread : suggested_queue[core_id]) { | ||
| 291 | const s32 source_core = thread->GetProcessorID(); | ||
| 292 | if (thread->GetPriority() != priority) { | ||
| 293 | continue; | ||
| 294 | } | ||
| 295 | if (source_core >= 0) { | ||
| 296 | Thread* next_thread = scheduled_queue[source_core].empty() | ||
| 297 | ? nullptr | ||
| 298 | : scheduled_queue[source_core].front(); | ||
| 299 | if (next_thread != nullptr && next_thread->GetPriority() < 2) { | ||
| 300 | break; | ||
| 301 | } | ||
| 302 | if (next_thread == thread) { | ||
| 303 | continue; | ||
| 304 | } | ||
| 305 | } | ||
| 306 | if (current_thread != nullptr && | ||
| 307 | current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { | ||
| 308 | winner = thread; | ||
| 309 | break; | ||
| 310 | } | ||
| 311 | } | ||
| 312 | |||
| 313 | if (winner != nullptr) { | ||
| 314 | TransferToCore(winner->GetPriority(), s32(core_id), winner); | ||
| 315 | current_thread = | ||
| 316 | winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread; | ||
| 317 | } | ||
| 318 | |||
| 319 | if (current_thread != nullptr && current_thread->GetPriority() > priority) { | ||
| 320 | for (auto& thread : suggested_queue[core_id]) { | ||
| 321 | const s32 source_core = thread->GetProcessorID(); | ||
| 322 | if (thread->GetPriority() < priority) { | ||
| 323 | continue; | ||
| 324 | } | ||
| 325 | if (source_core >= 0) { | ||
| 326 | Thread* next_thread = scheduled_queue[source_core].empty() | ||
| 327 | ? nullptr | ||
| 328 | : scheduled_queue[source_core].front(); | ||
| 329 | if (next_thread != nullptr && next_thread->GetPriority() < 2) { | ||
| 330 | break; | ||
| 331 | } | ||
| 332 | if (next_thread == thread) { | ||
| 333 | continue; | ||
| 334 | } | ||
| 335 | } | ||
| 336 | if (current_thread != nullptr && | ||
| 337 | current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { | ||
| 338 | winner = thread; | ||
| 339 | break; | ||
| 340 | } | ||
| 341 | } | ||
| 342 | |||
| 343 | if (winner != nullptr) { | ||
| 344 | TransferToCore(winner->GetPriority(), s32(core_id), winner); | ||
| 345 | current_thread = winner; | ||
| 346 | } | ||
| 347 | } | ||
| 348 | |||
| 349 | is_reselection_pending.store(true, std::memory_order_release); | ||
| 350 | } | ||
| 351 | } | ||
| 352 | |||
| 353 | void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule, | ||
| 354 | Core::EmuThreadHandle global_thread) { | ||
| 355 | u32 current_core = global_thread.host_handle; | ||
| 356 | bool must_context_switch = global_thread.guest_handle != InvalidHandle && | ||
| 357 | (current_core < Core::Hardware::NUM_CPU_CORES); | ||
| 358 | while (cores_pending_reschedule != 0) { | ||
| 359 | u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule); | ||
| 360 | ASSERT(core < Core::Hardware::NUM_CPU_CORES); | ||
| 361 | if (!must_context_switch || core != current_core) { | ||
| 362 | auto& phys_core = kernel.PhysicalCore(core); | ||
| 363 | phys_core.Interrupt(); | ||
| 364 | } else { | ||
| 365 | must_context_switch = true; | ||
| 366 | } | ||
| 367 | cores_pending_reschedule &= ~(1U << core); | ||
| 368 | } | ||
| 369 | if (must_context_switch) { | ||
| 370 | auto& core_scheduler = kernel.CurrentScheduler(); | ||
| 371 | kernel.ExitSVCProfile(); | ||
| 372 | core_scheduler.TryDoContextSwitch(); | ||
| 373 | kernel.EnterSVCProfile(); | ||
| 374 | } | ||
| 375 | } | ||
| 376 | |||
| 377 | void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) { | ||
| 378 | ASSERT(is_locked); | ||
| 379 | suggested_queue[core].add(thread, priority); | ||
| 380 | } | ||
| 381 | |||
| 382 | void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) { | ||
| 383 | ASSERT(is_locked); | ||
| 384 | suggested_queue[core].remove(thread, priority); | ||
| 385 | } | ||
| 386 | |||
| 387 | void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) { | ||
| 388 | ASSERT(is_locked); | ||
| 389 | ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); | ||
| 390 | scheduled_queue[core].add(thread, priority); | ||
| 391 | } | ||
| 392 | |||
| 393 | void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) { | ||
| 394 | ASSERT(is_locked); | ||
| 395 | ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); | ||
| 396 | scheduled_queue[core].add(thread, priority, false); | ||
| 397 | } | ||
| 398 | |||
| 399 | void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) { | ||
| 400 | ASSERT(is_locked); | ||
| 401 | scheduled_queue[core].remove(thread, priority); | ||
| 402 | scheduled_queue[core].add(thread, priority); | ||
| 403 | } | ||
| 404 | |||
| 405 | void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) { | ||
| 406 | ASSERT(is_locked); | ||
| 407 | scheduled_queue[core].remove(thread, priority); | ||
| 408 | } | ||
| 409 | |||
| 410 | void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) { | ||
| 411 | ASSERT(is_locked); | ||
| 412 | const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT; | ||
| 413 | const s32 source_core = thread->GetProcessorID(); | ||
| 414 | if (source_core == destination_core || !schedulable) { | ||
| 415 | return; | ||
| 416 | } | ||
| 417 | thread->SetProcessorID(destination_core); | ||
| 418 | if (source_core >= 0) { | ||
| 419 | Unschedule(priority, static_cast<u32>(source_core), thread); | ||
| 420 | } | ||
| 421 | if (destination_core >= 0) { | ||
| 422 | Unsuggest(priority, static_cast<u32>(destination_core), thread); | ||
| 423 | Schedule(priority, static_cast<u32>(destination_core), thread); | ||
| 424 | } | ||
| 425 | if (source_core >= 0) { | ||
| 426 | Suggest(priority, static_cast<u32>(source_core), thread); | ||
| 427 | } | ||
| 428 | } | ||
| 429 | |||
| 430 | bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, | ||
| 431 | const Thread* winner) { | ||
| 432 | if (current_thread == winner) { | ||
| 433 | current_thread->IncrementYieldCount(); | ||
| 434 | return true; | ||
| 435 | } else { | ||
| 436 | is_reselection_pending.store(true, std::memory_order_release); | ||
| 437 | return false; | ||
| 438 | } | ||
| 439 | } | ||
| 440 | |||
| 441 | void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) { | ||
| 442 | if (old_flags == thread->scheduling_state) { | ||
| 443 | return; | ||
| 444 | } | ||
| 445 | ASSERT(is_locked); | ||
| 446 | |||
| 447 | if (old_flags == static_cast<u32>(ThreadSchedStatus::Runnable)) { | ||
| 448 | // In this case the thread was running, now it's pausing/exitting | ||
| 449 | if (thread->processor_id >= 0) { | ||
| 450 | Unschedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread); | ||
| 451 | } | ||
| 452 | |||
| 453 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 454 | if (core != static_cast<u32>(thread->processor_id) && | ||
| 455 | ((thread->affinity_mask >> core) & 1) != 0) { | ||
| 456 | Unsuggest(thread->current_priority, core, thread); | ||
| 457 | } | ||
| 458 | } | ||
| 459 | } else if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { | ||
| 460 | // The thread is now set to running from being stopped | ||
| 461 | if (thread->processor_id >= 0) { | ||
| 462 | Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread); | ||
| 463 | } | ||
| 464 | |||
| 465 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 466 | if (core != static_cast<u32>(thread->processor_id) && | ||
| 467 | ((thread->affinity_mask >> core) & 1) != 0) { | ||
| 468 | Suggest(thread->current_priority, core, thread); | ||
| 469 | } | ||
| 470 | } | ||
| 471 | } | ||
| 472 | |||
| 473 | SetReselectionPending(); | ||
| 474 | } | ||
| 475 | |||
| 476 | void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priority) { | ||
| 477 | if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable)) { | ||
| 478 | return; | ||
| 479 | } | ||
| 480 | ASSERT(is_locked); | ||
| 481 | if (thread->processor_id >= 0) { | ||
| 482 | Unschedule(old_priority, static_cast<u32>(thread->processor_id), thread); | ||
| 483 | } | ||
| 484 | |||
| 485 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 486 | if (core != static_cast<u32>(thread->processor_id) && | ||
| 487 | ((thread->affinity_mask >> core) & 1) != 0) { | ||
| 488 | Unsuggest(old_priority, core, thread); | ||
| 489 | } | ||
| 490 | } | ||
| 491 | |||
| 492 | if (thread->processor_id >= 0) { | ||
| 493 | if (thread == kernel.CurrentScheduler().GetCurrentThread()) { | ||
| 494 | SchedulePrepend(thread->current_priority, static_cast<u32>(thread->processor_id), | ||
| 495 | thread); | ||
| 496 | } else { | ||
| 497 | Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread); | ||
| 498 | } | ||
| 499 | } | ||
| 500 | |||
| 501 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 502 | if (core != static_cast<u32>(thread->processor_id) && | ||
| 503 | ((thread->affinity_mask >> core) & 1) != 0) { | ||
| 504 | Suggest(thread->current_priority, core, thread); | ||
| 505 | } | ||
| 506 | } | ||
| 507 | thread->IncrementYieldCount(); | ||
| 508 | SetReselectionPending(); | ||
| 509 | } | ||
| 510 | |||
| 511 | void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask, | ||
| 512 | s32 old_core) { | ||
| 513 | if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable) || | ||
| 514 | thread->current_priority >= THREADPRIO_COUNT) { | ||
| 515 | return; | ||
| 516 | } | ||
| 517 | ASSERT(is_locked); | ||
| 518 | |||
| 519 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 520 | if (((old_affinity_mask >> core) & 1) != 0) { | ||
| 521 | if (core == static_cast<u32>(old_core)) { | ||
| 522 | Unschedule(thread->current_priority, core, thread); | ||
| 523 | } else { | ||
| 524 | Unsuggest(thread->current_priority, core, thread); | ||
| 525 | } | ||
| 526 | } | ||
| 527 | } | ||
| 528 | |||
| 529 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 530 | if (((thread->affinity_mask >> core) & 1) != 0) { | ||
| 531 | if (core == static_cast<u32>(thread->processor_id)) { | ||
| 532 | Schedule(thread->current_priority, core, thread); | ||
| 533 | } else { | ||
| 534 | Suggest(thread->current_priority, core, thread); | ||
| 535 | } | ||
| 536 | } | ||
| 537 | } | ||
| 538 | |||
| 539 | thread->IncrementYieldCount(); | ||
| 540 | SetReselectionPending(); | ||
| 541 | } | ||
| 542 | |||
| 543 | void GlobalScheduler::Shutdown() { | ||
| 544 | for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 545 | scheduled_queue[core].clear(); | ||
| 546 | suggested_queue[core].clear(); | ||
| 547 | } | ||
| 548 | thread_list.clear(); | ||
| 549 | } | ||
| 550 | |||
| 551 | void GlobalScheduler::Lock() { | ||
| 552 | Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID(); | ||
| 553 | ASSERT(!current_thread.IsInvalid()); | ||
| 554 | if (current_thread == current_owner) { | ||
| 555 | ++scope_lock; | ||
| 556 | } else { | ||
| 557 | inner_lock.lock(); | ||
| 558 | is_locked = true; | ||
| 559 | current_owner = current_thread; | ||
| 560 | ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle()); | ||
| 561 | scope_lock = 1; | ||
| 562 | } | ||
| 563 | } | ||
| 564 | |||
| 565 | void GlobalScheduler::Unlock() { | ||
| 566 | if (--scope_lock != 0) { | ||
| 567 | ASSERT(scope_lock > 0); | ||
| 568 | return; | ||
| 569 | } | ||
| 570 | u32 cores_pending_reschedule = SelectThreads(); | ||
| 571 | Core::EmuThreadHandle leaving_thread = current_owner; | ||
| 572 | current_owner = Core::EmuThreadHandle::InvalidHandle(); | ||
| 573 | scope_lock = 1; | ||
| 574 | is_locked = false; | ||
| 575 | inner_lock.unlock(); | ||
| 576 | EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread); | ||
| 577 | } | ||
| 578 | |||
| 579 | Scheduler::Scheduler(Core::System& system, std::size_t core_id) : system(system), core_id(core_id) { | ||
| 580 | switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this); | ||
| 581 | } | ||
| 582 | |||
| 583 | Scheduler::~Scheduler() = default; | ||
| 584 | |||
| 585 | bool Scheduler::HaveReadyThreads() const { | ||
| 586 | return system.GlobalScheduler().HaveReadyThreads(core_id); | ||
| 587 | } | ||
| 588 | |||
| 589 | Thread* Scheduler::GetCurrentThread() const { | ||
| 590 | if (current_thread) { | ||
| 591 | return current_thread.get(); | ||
| 592 | } | ||
| 593 | return idle_thread.get(); | ||
| 594 | } | ||
| 595 | |||
| 596 | Thread* Scheduler::GetSelectedThread() const { | ||
| 597 | return selected_thread.get(); | ||
| 598 | } | ||
| 599 | |||
| 600 | u64 Scheduler::GetLastContextSwitchTicks() const { | ||
| 601 | return last_context_switch_time; | ||
| 602 | } | ||
| 603 | |||
| 604 | void Scheduler::TryDoContextSwitch() { | ||
| 605 | auto& phys_core = system.Kernel().CurrentPhysicalCore(); | ||
| 606 | if (phys_core.IsInterrupted()) { | ||
| 607 | phys_core.ClearInterrupt(); | ||
| 608 | } | ||
| 609 | guard.lock(); | ||
| 610 | if (is_context_switch_pending) { | ||
| 611 | SwitchContext(); | ||
| 612 | } else { | ||
| 613 | guard.unlock(); | ||
| 614 | } | ||
| 615 | } | ||
| 616 | |||
| 617 | void Scheduler::OnThreadStart() { | ||
| 618 | SwitchContextStep2(); | ||
| 619 | } | ||
| 620 | |||
| 621 | void Scheduler::Unload(Thread* thread) { | ||
| 622 | if (thread) { | ||
| 623 | thread->last_running_ticks = system.CoreTiming().GetCPUTicks(); | ||
| 624 | thread->SetIsRunning(false); | ||
| 625 | if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) { | ||
| 626 | system.ArmInterface(core_id).ExceptionalExit(); | ||
| 627 | thread->SetContinuousOnSVC(false); | ||
| 628 | } | ||
| 629 | if (!thread->IsHLEThread() && !thread->HasExited()) { | ||
| 630 | Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); | ||
| 631 | cpu_core.SaveContext(thread->GetContext32()); | ||
| 632 | cpu_core.SaveContext(thread->GetContext64()); | ||
| 633 | // Save the TPIDR_EL0 system register in case it was modified. | ||
| 634 | thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | ||
| 635 | cpu_core.ClearExclusiveState(); | ||
| 636 | } | ||
| 637 | thread->context_guard.unlock(); | ||
| 638 | } | ||
| 639 | } | ||
| 640 | |||
| 641 | void Scheduler::Unload() { | ||
| 642 | Unload(current_thread.get()); | ||
| 643 | } | ||
| 644 | |||
| 645 | void Scheduler::Reload(Thread* thread) { | ||
| 646 | if (thread) { | ||
| 647 | ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable, | ||
| 648 | "Thread must be runnable."); | ||
| 649 | |||
| 650 | // Cancel any outstanding wakeup events for this thread | ||
| 651 | thread->SetIsRunning(true); | ||
| 652 | thread->SetWasRunning(false); | ||
| 653 | thread->last_running_ticks = system.CoreTiming().GetCPUTicks(); | ||
| 654 | |||
| 655 | auto* const thread_owner_process = thread->GetOwnerProcess(); | ||
| 656 | if (thread_owner_process != nullptr) { | ||
| 657 | system.Kernel().MakeCurrentProcess(thread_owner_process); | ||
| 658 | } | ||
| 659 | if (!thread->IsHLEThread()) { | ||
| 660 | Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); | ||
| 661 | cpu_core.LoadContext(thread->GetContext32()); | ||
| 662 | cpu_core.LoadContext(thread->GetContext64()); | ||
| 663 | cpu_core.SetTlsAddress(thread->GetTLSAddress()); | ||
| 664 | cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); | ||
| 665 | cpu_core.ClearExclusiveState(); | ||
| 666 | } | ||
| 667 | } | ||
| 668 | } | ||
| 669 | |||
| 670 | void Scheduler::Reload() { | ||
| 671 | Reload(current_thread.get()); | ||
| 672 | } | ||
| 673 | |||
| 674 | void Scheduler::SwitchContextStep2() { | ||
| 675 | // Load context of new thread | ||
| 676 | Reload(selected_thread.get()); | ||
| 677 | |||
| 678 | TryDoContextSwitch(); | ||
| 679 | } | ||
| 680 | |||
| 681 | void Scheduler::SwitchContext() { | ||
| 682 | current_thread_prev = current_thread; | ||
| 683 | selected_thread = selected_thread_set; | ||
| 684 | Thread* previous_thread = current_thread_prev.get(); | ||
| 685 | Thread* new_thread = selected_thread.get(); | ||
| 686 | current_thread = selected_thread; | ||
| 687 | |||
| 688 | is_context_switch_pending = false; | ||
| 689 | |||
| 690 | if (new_thread == previous_thread) { | ||
| 691 | guard.unlock(); | ||
| 692 | return; | ||
| 693 | } | ||
| 694 | |||
| 695 | Process* const previous_process = system.Kernel().CurrentProcess(); | ||
| 696 | |||
| 697 | UpdateLastContextSwitchTime(previous_thread, previous_process); | ||
| 698 | |||
| 699 | // Save context for previous thread | ||
| 700 | Unload(previous_thread); | ||
| 701 | |||
| 702 | std::shared_ptr<Common::Fiber>* old_context; | ||
| 703 | if (previous_thread != nullptr) { | ||
| 704 | old_context = &previous_thread->GetHostContext(); | ||
| 705 | } else { | ||
| 706 | old_context = &idle_thread->GetHostContext(); | ||
| 707 | } | ||
| 708 | guard.unlock(); | ||
| 709 | |||
| 710 | Common::Fiber::YieldTo(*old_context, switch_fiber); | ||
| 711 | /// When a thread wakes up, the scheduler may have changed to other in another core. | ||
| 712 | auto& next_scheduler = system.Kernel().CurrentScheduler(); | ||
| 713 | next_scheduler.SwitchContextStep2(); | ||
| 714 | } | ||
| 715 | |||
| 716 | void Scheduler::OnSwitch(void* this_scheduler) { | ||
| 717 | Scheduler* sched = static_cast<Scheduler*>(this_scheduler); | ||
| 718 | sched->SwitchToCurrent(); | ||
| 719 | } | ||
| 720 | |||
| 721 | void Scheduler::SwitchToCurrent() { | ||
| 722 | while (true) { | ||
| 723 | { | ||
| 724 | std::scoped_lock lock{guard}; | ||
| 725 | selected_thread = selected_thread_set; | ||
| 726 | current_thread = selected_thread; | ||
| 727 | is_context_switch_pending = false; | ||
| 728 | } | ||
| 729 | const auto is_switch_pending = [this] { | ||
| 730 | std::scoped_lock lock{guard}; | ||
| 731 | return is_context_switch_pending; | ||
| 732 | }; | ||
| 733 | do { | ||
| 734 | if (current_thread != nullptr && !current_thread->IsHLEThread()) { | ||
| 735 | current_thread->context_guard.lock(); | ||
| 736 | if (!current_thread->IsRunnable()) { | ||
| 737 | current_thread->context_guard.unlock(); | ||
| 738 | break; | ||
| 739 | } | ||
| 740 | if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) { | ||
| 741 | current_thread->context_guard.unlock(); | ||
| 742 | break; | ||
| 743 | } | ||
| 744 | } | ||
| 745 | std::shared_ptr<Common::Fiber>* next_context; | ||
| 746 | if (current_thread != nullptr) { | ||
| 747 | next_context = ¤t_thread->GetHostContext(); | ||
| 748 | } else { | ||
| 749 | next_context = &idle_thread->GetHostContext(); | ||
| 750 | } | ||
| 751 | Common::Fiber::YieldTo(switch_fiber, *next_context); | ||
| 752 | } while (!is_switch_pending()); | ||
| 753 | } | ||
| 754 | } | ||
| 755 | |||
| 756 | void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { | ||
| 757 | const u64 prev_switch_ticks = last_context_switch_time; | ||
| 758 | const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks(); | ||
| 759 | const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; | ||
| 760 | |||
| 761 | if (thread != nullptr) { | ||
| 762 | thread->UpdateCPUTimeTicks(update_ticks); | ||
| 763 | } | ||
| 764 | |||
| 765 | if (process != nullptr) { | ||
| 766 | process->UpdateCPUTimeTicks(update_ticks); | ||
| 767 | } | ||
| 768 | |||
| 769 | last_context_switch_time = most_recent_switch_ticks; | ||
| 770 | } | ||
| 771 | |||
| 772 | void Scheduler::Initialize() { | ||
| 773 | std::string name = "Idle Thread Id:" + std::to_string(core_id); | ||
| 774 | std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc(); | ||
| 775 | void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); | ||
| 776 | ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE); | ||
| 777 | auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0, | ||
| 778 | nullptr, std::move(init_func), init_func_parameter); | ||
| 779 | idle_thread = std::move(thread_res).Unwrap(); | ||
| 780 | } | ||
| 781 | |||
| 782 | void Scheduler::Shutdown() { | ||
| 783 | current_thread = nullptr; | ||
| 784 | selected_thread = nullptr; | ||
| 785 | } | ||
| 786 | |||
| 787 | SchedulerLock::SchedulerLock(KernelCore& kernel) : kernel{kernel} { | ||
| 788 | kernel.GlobalScheduler().Lock(); | ||
| 789 | } | ||
| 790 | |||
| 791 | SchedulerLock::~SchedulerLock() { | ||
| 792 | kernel.GlobalScheduler().Unlock(); | ||
| 793 | } | ||
| 794 | |||
| 795 | SchedulerLockAndSleep::SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, | ||
| 796 | Thread* time_task, s64 nanoseconds) | ||
| 797 | : SchedulerLock{kernel}, event_handle{event_handle}, time_task{time_task}, nanoseconds{ | ||
| 798 | nanoseconds} { | ||
| 799 | event_handle = InvalidHandle; | ||
| 800 | } | ||
| 801 | |||
| 802 | SchedulerLockAndSleep::~SchedulerLockAndSleep() { | ||
| 803 | if (sleep_cancelled) { | ||
| 804 | return; | ||
| 805 | } | ||
| 806 | auto& time_manager = kernel.TimeManager(); | ||
| 807 | time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); | ||
| 808 | } | ||
| 809 | |||
| 810 | void SchedulerLockAndSleep::Release() { | ||
| 811 | if (sleep_cancelled) { | ||
| 812 | return; | ||
| 813 | } | ||
| 814 | auto& time_manager = kernel.TimeManager(); | ||
| 815 | time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); | ||
| 816 | sleep_cancelled = true; | ||
| 817 | } | ||
| 818 | |||
| 819 | } // namespace Kernel | ||