diff options
| author | 2020-02-24 22:04:12 -0400 | |
|---|---|---|
| committer | 2020-06-27 11:35:06 -0400 | |
| commit | e31425df3877636c098ec7426ebd2067920715cb (patch) | |
| tree | 5c0fc518a4ebb8413c491b43a9fdd99450c7bd80 /src/core/hle/kernel/scheduler.cpp | |
| parent | Merge pull request #3396 from FernandoS27/prometheus-1 (diff) | |
| download | yuzu-e31425df3877636c098ec7426ebd2067920715cb.tar.gz yuzu-e31425df3877636c098ec7426ebd2067920715cb.tar.xz yuzu-e31425df3877636c098ec7426ebd2067920715cb.zip | |
General: Recover Prometheus project from harddrive failure
This commit: Implements CPU Interrupts, Replaces Cycle Timing for Host
Timing, Reworks the Kernel's Scheduler, Introduce Idle State and
Suspended State, Recreates the bootmanager, Initializes Multicore
system.
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
| -rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 415 |
1 files changed, 296 insertions, 119 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 1140c72a3..5166020a0 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -11,11 +11,15 @@ | |||
| 11 | #include <utility> | 11 | #include <utility> |
| 12 | 12 | ||
| 13 | #include "common/assert.h" | 13 | #include "common/assert.h" |
| 14 | #include "common/bit_util.h" | ||
| 15 | #include "common/fiber.h" | ||
| 14 | #include "common/logging/log.h" | 16 | #include "common/logging/log.h" |
| 15 | #include "core/arm/arm_interface.h" | 17 | #include "core/arm/arm_interface.h" |
| 16 | #include "core/core.h" | 18 | #include "core/core.h" |
| 17 | #include "core/core_timing.h" | 19 | #include "core/core_timing.h" |
| 20 | #include "core/cpu_manager.h" | ||
| 18 | #include "core/hle/kernel/kernel.h" | 21 | #include "core/hle/kernel/kernel.h" |
| 22 | #include "core/hle/kernel/physical_core.h" | ||
| 19 | #include "core/hle/kernel/process.h" | 23 | #include "core/hle/kernel/process.h" |
| 20 | #include "core/hle/kernel/scheduler.h" | 24 | #include "core/hle/kernel/scheduler.h" |
| 21 | #include "core/hle/kernel/time_manager.h" | 25 | #include "core/hle/kernel/time_manager.h" |
| @@ -27,78 +31,108 @@ GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {} | |||
| 27 | GlobalScheduler::~GlobalScheduler() = default; | 31 | GlobalScheduler::~GlobalScheduler() = default; |
| 28 | 32 | ||
| 29 | void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) { | 33 | void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) { |
| 34 | global_list_guard.lock(); | ||
| 30 | thread_list.push_back(std::move(thread)); | 35 | thread_list.push_back(std::move(thread)); |
| 36 | global_list_guard.unlock(); | ||
| 31 | } | 37 | } |
| 32 | 38 | ||
| 33 | void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) { | 39 | void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) { |
| 40 | global_list_guard.lock(); | ||
| 34 | thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), | 41 | thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), |
| 35 | thread_list.end()); | 42 | thread_list.end()); |
| 43 | global_list_guard.unlock(); | ||
| 36 | } | 44 | } |
| 37 | 45 | ||
| 38 | void GlobalScheduler::UnloadThread(std::size_t core) { | 46 | u32 GlobalScheduler::SelectThreads() { |
| 39 | Scheduler& sched = kernel.Scheduler(core); | ||
| 40 | sched.UnloadThread(); | ||
| 41 | } | ||
| 42 | |||
| 43 | void GlobalScheduler::SelectThread(std::size_t core) { | ||
| 44 | const auto update_thread = [](Thread* thread, Scheduler& sched) { | 47 | const auto update_thread = [](Thread* thread, Scheduler& sched) { |
| 48 | sched.guard.lock(); | ||
| 45 | if (thread != sched.selected_thread.get()) { | 49 | if (thread != sched.selected_thread.get()) { |
| 46 | if (thread == nullptr) { | 50 | if (thread == nullptr) { |
| 47 | ++sched.idle_selection_count; | 51 | ++sched.idle_selection_count; |
| 48 | } | 52 | } |
| 49 | sched.selected_thread = SharedFrom(thread); | 53 | sched.selected_thread = SharedFrom(thread); |
| 50 | } | 54 | } |
| 51 | sched.is_context_switch_pending = sched.selected_thread != sched.current_thread; | 55 | const bool reschedule_pending = sched.selected_thread != sched.current_thread; |
| 56 | sched.is_context_switch_pending = reschedule_pending; | ||
| 52 | std::atomic_thread_fence(std::memory_order_seq_cst); | 57 | std::atomic_thread_fence(std::memory_order_seq_cst); |
| 58 | sched.guard.unlock(); | ||
| 59 | return reschedule_pending; | ||
| 53 | }; | 60 | }; |
| 54 | Scheduler& sched = kernel.Scheduler(core); | 61 | if (!is_reselection_pending.load()) { |
| 55 | Thread* current_thread = nullptr; | 62 | return 0; |
| 56 | // Step 1: Get top thread in schedule queue. | ||
| 57 | current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front(); | ||
| 58 | if (current_thread) { | ||
| 59 | update_thread(current_thread, sched); | ||
| 60 | return; | ||
| 61 | } | 63 | } |
| 62 | // Step 2: Try selecting a suggested thread. | 64 | std::array<Thread*, Core::Hardware::NUM_CPU_CORES> top_threads{}; |
| 63 | Thread* winner = nullptr; | 65 | |
| 64 | std::set<s32> sug_cores; | 66 | u32 idle_cores{}; |
| 65 | for (auto thread : suggested_queue[core]) { | 67 | |
| 66 | s32 this_core = thread->GetProcessorID(); | 68 | // Step 1: Get top thread in schedule queue. |
| 67 | Thread* thread_on_core = nullptr; | 69 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 68 | if (this_core >= 0) { | 70 | Thread* top_thread = |
| 69 | thread_on_core = scheduled_queue[this_core].front(); | 71 | scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front(); |
| 70 | } | 72 | if (top_thread != nullptr) { |
| 71 | if (this_core < 0 || thread != thread_on_core) { | 73 | // TODO(Blinkhawk): Implement Thread Pinning |
| 72 | winner = thread; | 74 | } else { |
| 73 | break; | 75 | idle_cores |= (1ul << core); |
| 74 | } | 76 | } |
| 75 | sug_cores.insert(this_core); | 77 | top_threads[core] = top_thread; |
| 76 | } | 78 | } |
| 77 | // if we got a suggested thread, select it, else do a second pass. | 79 | |
| 78 | if (winner && winner->GetPriority() > 2) { | 80 | while (idle_cores != 0) { |
| 79 | if (winner->IsRunning()) { | 81 | u32 core_id = Common::CountTrailingZeroes32(idle_cores); |
| 80 | UnloadThread(static_cast<u32>(winner->GetProcessorID())); | 82 | |
| 83 | if (!suggested_queue[core_id].empty()) { | ||
| 84 | std::array<s32, Core::Hardware::NUM_CPU_CORES> migration_candidates{}; | ||
| 85 | std::size_t num_candidates = 0; | ||
| 86 | auto iter = suggested_queue[core_id].begin(); | ||
| 87 | Thread* suggested = nullptr; | ||
| 88 | // Step 2: Try selecting a suggested thread. | ||
| 89 | while (iter != suggested_queue[core_id].end()) { | ||
| 90 | suggested = *iter; | ||
| 91 | iter++; | ||
| 92 | s32 suggested_core_id = suggested->GetProcessorID(); | ||
| 93 | Thread* top_thread = | ||
| 94 | suggested_core_id > 0 ? top_threads[suggested_core_id] : nullptr; | ||
| 95 | if (top_thread != suggested) { | ||
| 96 | if (top_thread != nullptr && | ||
| 97 | top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) { | ||
| 98 | suggested = nullptr; | ||
| 99 | break; | ||
| 100 | // There's a too high thread to do core migration, cancel | ||
| 101 | } | ||
| 102 | TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested); | ||
| 103 | break; | ||
| 104 | } | ||
| 105 | migration_candidates[num_candidates++] = suggested_core_id; | ||
| 106 | } | ||
| 107 | // Step 3: Select a suggested thread from another core | ||
| 108 | if (suggested == nullptr) { | ||
| 109 | for (std::size_t i = 0; i < num_candidates; i++) { | ||
| 110 | s32 candidate_core = migration_candidates[i]; | ||
| 111 | suggested = top_threads[candidate_core]; | ||
| 112 | auto it = scheduled_queue[candidate_core].begin(); | ||
| 113 | it++; | ||
| 114 | Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr; | ||
| 115 | if (next != nullptr) { | ||
| 116 | TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), | ||
| 117 | suggested); | ||
| 118 | top_threads[candidate_core] = next; | ||
| 119 | break; | ||
| 120 | } | ||
| 121 | } | ||
| 122 | } | ||
| 123 | top_threads[core_id] = suggested; | ||
| 81 | } | 124 | } |
| 82 | TransferToCore(winner->GetPriority(), static_cast<s32>(core), winner); | 125 | |
| 83 | update_thread(winner, sched); | 126 | idle_cores &= ~(1ul << core_id); |
| 84 | return; | ||
| 85 | } | 127 | } |
| 86 | // Step 3: Select a suggested thread from another core | 128 | u32 cores_needing_context_switch{}; |
| 87 | for (auto& src_core : sug_cores) { | 129 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 88 | auto it = scheduled_queue[src_core].begin(); | 130 | Scheduler& sched = kernel.Scheduler(core); |
| 89 | it++; | 131 | if (update_thread(top_threads[core], sched)) { |
| 90 | if (it != scheduled_queue[src_core].end()) { | 132 | cores_needing_context_switch |= (1ul << core); |
| 91 | Thread* thread_on_core = scheduled_queue[src_core].front(); | ||
| 92 | Thread* to_change = *it; | ||
| 93 | if (thread_on_core->IsRunning() || to_change->IsRunning()) { | ||
| 94 | UnloadThread(static_cast<u32>(src_core)); | ||
| 95 | } | ||
| 96 | TransferToCore(thread_on_core->GetPriority(), static_cast<s32>(core), thread_on_core); | ||
| 97 | current_thread = thread_on_core; | ||
| 98 | break; | ||
| 99 | } | 133 | } |
| 100 | } | 134 | } |
| 101 | update_thread(current_thread, sched); | 135 | return cores_needing_context_switch; |
| 102 | } | 136 | } |
| 103 | 137 | ||
| 104 | bool GlobalScheduler::YieldThread(Thread* yielding_thread) { | 138 | bool GlobalScheduler::YieldThread(Thread* yielding_thread) { |
| @@ -153,9 +187,6 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { | |||
| 153 | 187 | ||
| 154 | if (winner != nullptr) { | 188 | if (winner != nullptr) { |
| 155 | if (winner != yielding_thread) { | 189 | if (winner != yielding_thread) { |
| 156 | if (winner->IsRunning()) { | ||
| 157 | UnloadThread(static_cast<u32>(winner->GetProcessorID())); | ||
| 158 | } | ||
| 159 | TransferToCore(winner->GetPriority(), s32(core_id), winner); | 190 | TransferToCore(winner->GetPriority(), s32(core_id), winner); |
| 160 | } | 191 | } |
| 161 | } else { | 192 | } else { |
| @@ -195,9 +226,6 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread | |||
| 195 | } | 226 | } |
| 196 | if (winner != nullptr) { | 227 | if (winner != nullptr) { |
| 197 | if (winner != yielding_thread) { | 228 | if (winner != yielding_thread) { |
| 198 | if (winner->IsRunning()) { | ||
| 199 | UnloadThread(static_cast<u32>(winner->GetProcessorID())); | ||
| 200 | } | ||
| 201 | TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner); | 229 | TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner); |
| 202 | } | 230 | } |
| 203 | } else { | 231 | } else { |
| @@ -213,7 +241,9 @@ void GlobalScheduler::PreemptThreads() { | |||
| 213 | const u32 priority = preemption_priorities[core_id]; | 241 | const u32 priority = preemption_priorities[core_id]; |
| 214 | 242 | ||
| 215 | if (scheduled_queue[core_id].size(priority) > 0) { | 243 | if (scheduled_queue[core_id].size(priority) > 0) { |
| 216 | scheduled_queue[core_id].front(priority)->IncrementYieldCount(); | 244 | if (scheduled_queue[core_id].size(priority) > 1) { |
| 245 | scheduled_queue[core_id].front(priority)->IncrementYieldCount(); | ||
| 246 | } | ||
| 217 | scheduled_queue[core_id].yield(priority); | 247 | scheduled_queue[core_id].yield(priority); |
| 218 | if (scheduled_queue[core_id].size(priority) > 1) { | 248 | if (scheduled_queue[core_id].size(priority) > 1) { |
| 219 | scheduled_queue[core_id].front(priority)->IncrementYieldCount(); | 249 | scheduled_queue[core_id].front(priority)->IncrementYieldCount(); |
| @@ -247,9 +277,6 @@ void GlobalScheduler::PreemptThreads() { | |||
| 247 | } | 277 | } |
| 248 | 278 | ||
| 249 | if (winner != nullptr) { | 279 | if (winner != nullptr) { |
| 250 | if (winner->IsRunning()) { | ||
| 251 | UnloadThread(static_cast<u32>(winner->GetProcessorID())); | ||
| 252 | } | ||
| 253 | TransferToCore(winner->GetPriority(), s32(core_id), winner); | 280 | TransferToCore(winner->GetPriority(), s32(core_id), winner); |
| 254 | current_thread = | 281 | current_thread = |
| 255 | winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread; | 282 | winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread; |
| @@ -280,9 +307,6 @@ void GlobalScheduler::PreemptThreads() { | |||
| 280 | } | 307 | } |
| 281 | 308 | ||
| 282 | if (winner != nullptr) { | 309 | if (winner != nullptr) { |
| 283 | if (winner->IsRunning()) { | ||
| 284 | UnloadThread(static_cast<u32>(winner->GetProcessorID())); | ||
| 285 | } | ||
| 286 | TransferToCore(winner->GetPriority(), s32(core_id), winner); | 310 | TransferToCore(winner->GetPriority(), s32(core_id), winner); |
| 287 | current_thread = winner; | 311 | current_thread = winner; |
| 288 | } | 312 | } |
| @@ -292,6 +316,28 @@ void GlobalScheduler::PreemptThreads() { | |||
| 292 | } | 316 | } |
| 293 | } | 317 | } |
| 294 | 318 | ||
| 319 | void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule, | ||
| 320 | Core::EmuThreadHandle global_thread) { | ||
| 321 | u32 current_core = global_thread.host_handle; | ||
| 322 | bool must_context_switch = global_thread.guest_handle != InvalidHandle && | ||
| 323 | (current_core < Core::Hardware::NUM_CPU_CORES); | ||
| 324 | while (cores_pending_reschedule != 0) { | ||
| 325 | u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule); | ||
| 326 | ASSERT(core < Core::Hardware::NUM_CPU_CORES); | ||
| 327 | if (!must_context_switch || core != current_core) { | ||
| 328 | auto& phys_core = kernel.PhysicalCore(core); | ||
| 329 | phys_core.Interrupt(); | ||
| 330 | } else { | ||
| 331 | must_context_switch = true; | ||
| 332 | } | ||
| 333 | cores_pending_reschedule &= ~(1ul << core); | ||
| 334 | } | ||
| 335 | if (must_context_switch) { | ||
| 336 | auto& core_scheduler = kernel.CurrentScheduler(); | ||
| 337 | core_scheduler.TryDoContextSwitch(); | ||
| 338 | } | ||
| 339 | } | ||
| 340 | |||
| 295 | void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) { | 341 | void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) { |
| 296 | suggested_queue[core].add(thread, priority); | 342 | suggested_queue[core].add(thread, priority); |
| 297 | } | 343 | } |
| @@ -349,6 +395,108 @@ bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, | |||
| 349 | } | 395 | } |
| 350 | } | 396 | } |
| 351 | 397 | ||
| 398 | void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) { | ||
| 399 | if (old_flags == thread->scheduling_state) { | ||
| 400 | return; | ||
| 401 | } | ||
| 402 | |||
| 403 | if (static_cast<ThreadSchedStatus>(old_flags & static_cast<u32>(ThreadSchedMasks::LowMask)) == | ||
| 404 | ThreadSchedStatus::Runnable) { | ||
| 405 | // In this case the thread was running, now it's pausing/exitting | ||
| 406 | if (thread->processor_id >= 0) { | ||
| 407 | Unschedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread); | ||
| 408 | } | ||
| 409 | |||
| 410 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 411 | if (core != static_cast<u32>(thread->processor_id) && | ||
| 412 | ((thread->affinity_mask >> core) & 1) != 0) { | ||
| 413 | Unsuggest(thread->current_priority, core, thread); | ||
| 414 | } | ||
| 415 | } | ||
| 416 | } else if (thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable) { | ||
| 417 | // The thread is now set to running from being stopped | ||
| 418 | if (thread->processor_id >= 0) { | ||
| 419 | Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread); | ||
| 420 | } | ||
| 421 | |||
| 422 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 423 | if (core != static_cast<u32>(thread->processor_id) && | ||
| 424 | ((thread->affinity_mask >> core) & 1) != 0) { | ||
| 425 | Suggest(thread->current_priority, core, thread); | ||
| 426 | } | ||
| 427 | } | ||
| 428 | } | ||
| 429 | |||
| 430 | SetReselectionPending(); | ||
| 431 | } | ||
| 432 | |||
| 433 | void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priority) { | ||
| 434 | if (thread->GetSchedulingStatus() != ThreadSchedStatus::Runnable) { | ||
| 435 | return; | ||
| 436 | } | ||
| 437 | if (thread->processor_id >= 0) { | ||
| 438 | Unschedule(old_priority, static_cast<u32>(thread->processor_id), thread); | ||
| 439 | } | ||
| 440 | |||
| 441 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 442 | if (core != static_cast<u32>(thread->processor_id) && | ||
| 443 | ((thread->affinity_mask >> core) & 1) != 0) { | ||
| 444 | Unsuggest(old_priority, core, thread); | ||
| 445 | } | ||
| 446 | } | ||
| 447 | |||
| 448 | if (thread->processor_id >= 0) { | ||
| 449 | // TODO(Blinkhawk): compare it with current thread running on current core, instead of | ||
| 450 | // checking running | ||
| 451 | if (thread->IsRunning()) { | ||
| 452 | SchedulePrepend(thread->current_priority, static_cast<u32>(thread->processor_id), | ||
| 453 | thread); | ||
| 454 | } else { | ||
| 455 | Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread); | ||
| 456 | } | ||
| 457 | } | ||
| 458 | |||
| 459 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 460 | if (core != static_cast<u32>(thread->processor_id) && | ||
| 461 | ((thread->affinity_mask >> core) & 1) != 0) { | ||
| 462 | Suggest(thread->current_priority, core, thread); | ||
| 463 | } | ||
| 464 | } | ||
| 465 | thread->IncrementYieldCount(); | ||
| 466 | SetReselectionPending(); | ||
| 467 | } | ||
| 468 | |||
| 469 | void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask, | ||
| 470 | s32 old_core) { | ||
| 471 | if (thread->GetSchedulingStatus() != ThreadSchedStatus::Runnable || | ||
| 472 | thread->current_priority >= THREADPRIO_COUNT) { | ||
| 473 | return; | ||
| 474 | } | ||
| 475 | |||
| 476 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 477 | if (((old_affinity_mask >> core) & 1) != 0) { | ||
| 478 | if (core == static_cast<u32>(old_core)) { | ||
| 479 | Unschedule(thread->current_priority, core, thread); | ||
| 480 | } else { | ||
| 481 | Unsuggest(thread->current_priority, core, thread); | ||
| 482 | } | ||
| 483 | } | ||
| 484 | } | ||
| 485 | |||
| 486 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 487 | if (((thread->affinity_mask >> core) & 1) != 0) { | ||
| 488 | if (core == static_cast<u32>(thread->processor_id)) { | ||
| 489 | Schedule(thread->current_priority, core, thread); | ||
| 490 | } else { | ||
| 491 | Suggest(thread->current_priority, core, thread); | ||
| 492 | } | ||
| 493 | } | ||
| 494 | } | ||
| 495 | |||
| 496 | thread->IncrementYieldCount(); | ||
| 497 | SetReselectionPending(); | ||
| 498 | } | ||
| 499 | |||
| 352 | void GlobalScheduler::Shutdown() { | 500 | void GlobalScheduler::Shutdown() { |
| 353 | for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | 501 | for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 354 | scheduled_queue[core].clear(); | 502 | scheduled_queue[core].clear(); |
| @@ -374,13 +522,12 @@ void GlobalScheduler::Unlock() { | |||
| 374 | ASSERT(scope_lock > 0); | 522 | ASSERT(scope_lock > 0); |
| 375 | return; | 523 | return; |
| 376 | } | 524 | } |
| 377 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | 525 | u32 cores_pending_reschedule = SelectThreads(); |
| 378 | SelectThread(i); | 526 | Core::EmuThreadHandle leaving_thread = current_owner; |
| 379 | } | ||
| 380 | current_owner = Core::EmuThreadHandle::InvalidHandle(); | 527 | current_owner = Core::EmuThreadHandle::InvalidHandle(); |
| 381 | scope_lock = 1; | 528 | scope_lock = 1; |
| 382 | inner_lock.unlock(); | 529 | inner_lock.unlock(); |
| 383 | // TODO(Blinkhawk): Setup the interrupts and change context on current core. | 530 | EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread); |
| 384 | } | 531 | } |
| 385 | 532 | ||
| 386 | Scheduler::Scheduler(Core::System& system, std::size_t core_id) | 533 | Scheduler::Scheduler(Core::System& system, std::size_t core_id) |
| @@ -393,56 +540,83 @@ bool Scheduler::HaveReadyThreads() const { | |||
| 393 | } | 540 | } |
| 394 | 541 | ||
| 395 | Thread* Scheduler::GetCurrentThread() const { | 542 | Thread* Scheduler::GetCurrentThread() const { |
| 396 | return current_thread.get(); | 543 | if (current_thread) { |
| 544 | return current_thread.get(); | ||
| 545 | } | ||
| 546 | return idle_thread.get(); | ||
| 397 | } | 547 | } |
| 398 | 548 | ||
| 399 | Thread* Scheduler::GetSelectedThread() const { | 549 | Thread* Scheduler::GetSelectedThread() const { |
| 400 | return selected_thread.get(); | 550 | return selected_thread.get(); |
| 401 | } | 551 | } |
| 402 | 552 | ||
| 403 | void Scheduler::SelectThreads() { | ||
| 404 | system.GlobalScheduler().SelectThread(core_id); | ||
| 405 | } | ||
| 406 | |||
| 407 | u64 Scheduler::GetLastContextSwitchTicks() const { | 553 | u64 Scheduler::GetLastContextSwitchTicks() const { |
| 408 | return last_context_switch_time; | 554 | return last_context_switch_time; |
| 409 | } | 555 | } |
| 410 | 556 | ||
| 411 | void Scheduler::TryDoContextSwitch() { | 557 | void Scheduler::TryDoContextSwitch() { |
| 558 | auto& phys_core = system.Kernel().CurrentPhysicalCore(); | ||
| 559 | if (phys_core.IsInterrupted()) { | ||
| 560 | phys_core.ClearInterrupt(); | ||
| 561 | } | ||
| 562 | guard.lock(); | ||
| 412 | if (is_context_switch_pending) { | 563 | if (is_context_switch_pending) { |
| 413 | SwitchContext(); | 564 | SwitchContext(); |
| 565 | } else { | ||
| 566 | guard.unlock(); | ||
| 414 | } | 567 | } |
| 415 | } | 568 | } |
| 416 | 569 | ||
| 417 | void Scheduler::UnloadThread() { | 570 | void Scheduler::OnThreadStart() { |
| 418 | Thread* const previous_thread = GetCurrentThread(); | 571 | SwitchContextStep2(); |
| 419 | Process* const previous_process = system.Kernel().CurrentProcess(); | 572 | } |
| 420 | 573 | ||
| 421 | UpdateLastContextSwitchTime(previous_thread, previous_process); | 574 | void Scheduler::SwitchContextStep2() { |
| 575 | Thread* previous_thread = current_thread.get(); | ||
| 576 | Thread* new_thread = selected_thread.get(); | ||
| 422 | 577 | ||
| 423 | // Save context for previous thread | 578 | // Load context of new thread |
| 424 | if (previous_thread) { | 579 | Process* const previous_process = |
| 425 | system.ArmInterface(core_id).SaveContext(previous_thread->GetContext32()); | 580 | previous_thread != nullptr ? previous_thread->GetOwnerProcess() : nullptr; |
| 426 | system.ArmInterface(core_id).SaveContext(previous_thread->GetContext64()); | ||
| 427 | // Save the TPIDR_EL0 system register in case it was modified. | ||
| 428 | previous_thread->SetTPIDR_EL0(system.ArmInterface(core_id).GetTPIDR_EL0()); | ||
| 429 | 581 | ||
| 430 | if (previous_thread->GetStatus() == ThreadStatus::Running) { | 582 | if (new_thread) { |
| 431 | // This is only the case when a reschedule is triggered without the current thread | 583 | new_thread->context_guard.lock(); |
| 432 | // yielding execution (i.e. an event triggered, system core time-sliced, etc) | 584 | ASSERT_MSG(new_thread->GetProcessorID() == s32(this->core_id), |
| 433 | previous_thread->SetStatus(ThreadStatus::Ready); | 585 | "Thread must be assigned to this core."); |
| 586 | ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready, | ||
| 587 | "Thread must be ready to become running."); | ||
| 588 | |||
| 589 | // Cancel any outstanding wakeup events for this thread | ||
| 590 | current_thread = SharedFrom(new_thread); | ||
| 591 | new_thread->SetStatus(ThreadStatus::Running); | ||
| 592 | new_thread->SetIsRunning(true); | ||
| 593 | |||
| 594 | auto* const thread_owner_process = current_thread->GetOwnerProcess(); | ||
| 595 | if (previous_process != thread_owner_process && thread_owner_process != nullptr) { | ||
| 596 | system.Kernel().MakeCurrentProcess(thread_owner_process); | ||
| 434 | } | 597 | } |
| 435 | previous_thread->SetIsRunning(false); | 598 | if (!new_thread->IsHLEThread()) { |
| 599 | auto& cpu_core = system.ArmInterface(core_id); | ||
| 600 | cpu_core.LoadContext(new_thread->GetContext32()); | ||
| 601 | cpu_core.LoadContext(new_thread->GetContext64()); | ||
| 602 | cpu_core.SetTlsAddress(new_thread->GetTLSAddress()); | ||
| 603 | cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0()); | ||
| 604 | } | ||
| 605 | } else { | ||
| 606 | current_thread = nullptr; | ||
| 607 | // Note: We do not reset the current process and current page table when idling because | ||
| 608 | // technically we haven't changed processes, our threads are just paused. | ||
| 436 | } | 609 | } |
| 437 | current_thread = nullptr; | 610 | guard.unlock(); |
| 438 | } | 611 | } |
| 439 | 612 | ||
| 440 | void Scheduler::SwitchContext() { | 613 | void Scheduler::SwitchContext() { |
| 441 | Thread* const previous_thread = GetCurrentThread(); | 614 | Thread* previous_thread = current_thread.get(); |
| 442 | Thread* const new_thread = GetSelectedThread(); | 615 | Thread* new_thread = selected_thread.get(); |
| 443 | 616 | ||
| 444 | is_context_switch_pending = false; | 617 | is_context_switch_pending = false; |
| 445 | if (new_thread == previous_thread) { | 618 | if (new_thread == previous_thread) { |
| 619 | guard.unlock(); | ||
| 446 | return; | 620 | return; |
| 447 | } | 621 | } |
| 448 | 622 | ||
| @@ -452,51 +626,44 @@ void Scheduler::SwitchContext() { | |||
| 452 | 626 | ||
| 453 | // Save context for previous thread | 627 | // Save context for previous thread |
| 454 | if (previous_thread) { | 628 | if (previous_thread) { |
| 455 | system.ArmInterface(core_id).SaveContext(previous_thread->GetContext32()); | 629 | if (!previous_thread->IsHLEThread()) { |
| 456 | system.ArmInterface(core_id).SaveContext(previous_thread->GetContext64()); | 630 | auto& cpu_core = system.ArmInterface(core_id); |
| 457 | // Save the TPIDR_EL0 system register in case it was modified. | 631 | cpu_core.SaveContext(previous_thread->GetContext32()); |
| 458 | previous_thread->SetTPIDR_EL0(system.ArmInterface(core_id).GetTPIDR_EL0()); | 632 | cpu_core.SaveContext(previous_thread->GetContext64()); |
| 633 | // Save the TPIDR_EL0 system register in case it was modified. | ||
| 634 | previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | ||
| 459 | 635 | ||
| 636 | } | ||
| 460 | if (previous_thread->GetStatus() == ThreadStatus::Running) { | 637 | if (previous_thread->GetStatus() == ThreadStatus::Running) { |
| 461 | // This is only the case when a reschedule is triggered without the current thread | ||
| 462 | // yielding execution (i.e. an event triggered, system core time-sliced, etc) | ||
| 463 | previous_thread->SetStatus(ThreadStatus::Ready); | 638 | previous_thread->SetStatus(ThreadStatus::Ready); |
| 464 | } | 639 | } |
| 465 | previous_thread->SetIsRunning(false); | 640 | previous_thread->SetIsRunning(false); |
| 641 | previous_thread->context_guard.unlock(); | ||
| 466 | } | 642 | } |
| 467 | 643 | ||
| 468 | // Load context of new thread | 644 | std::shared_ptr<Common::Fiber> old_context; |
| 469 | if (new_thread) { | 645 | if (previous_thread != nullptr) { |
| 470 | ASSERT_MSG(new_thread->GetProcessorID() == s32(this->core_id), | 646 | old_context = previous_thread->GetHostContext(); |
| 471 | "Thread must be assigned to this core."); | 647 | } else { |
| 472 | ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready, | 648 | old_context = idle_thread->GetHostContext(); |
| 473 | "Thread must be ready to become running."); | 649 | } |
| 474 | |||
| 475 | // Cancel any outstanding wakeup events for this thread | ||
| 476 | new_thread->CancelWakeupTimer(); | ||
| 477 | current_thread = SharedFrom(new_thread); | ||
| 478 | new_thread->SetStatus(ThreadStatus::Running); | ||
| 479 | new_thread->SetIsRunning(true); | ||
| 480 | |||
| 481 | auto* const thread_owner_process = current_thread->GetOwnerProcess(); | ||
| 482 | if (previous_process != thread_owner_process) { | ||
| 483 | system.Kernel().MakeCurrentProcess(thread_owner_process); | ||
| 484 | } | ||
| 485 | 650 | ||
| 486 | system.ArmInterface(core_id).LoadContext(new_thread->GetContext32()); | 651 | std::shared_ptr<Common::Fiber> next_context; |
| 487 | system.ArmInterface(core_id).LoadContext(new_thread->GetContext64()); | 652 | if (new_thread != nullptr) { |
| 488 | system.ArmInterface(core_id).SetTlsAddress(new_thread->GetTLSAddress()); | 653 | next_context = new_thread->GetHostContext(); |
| 489 | system.ArmInterface(core_id).SetTPIDR_EL0(new_thread->GetTPIDR_EL0()); | ||
| 490 | } else { | 654 | } else { |
| 491 | current_thread = nullptr; | 655 | next_context = idle_thread->GetHostContext(); |
| 492 | // Note: We do not reset the current process and current page table when idling because | ||
| 493 | // technically we haven't changed processes, our threads are just paused. | ||
| 494 | } | 656 | } |
| 657 | |||
| 658 | Common::Fiber::YieldTo(old_context, next_context); | ||
| 659 | /// When a thread wakes up, the scheduler may have changed to other in another core. | ||
| 660 | auto& next_scheduler = system.Kernel().CurrentScheduler(); | ||
| 661 | next_scheduler.SwitchContextStep2(); | ||
| 495 | } | 662 | } |
| 496 | 663 | ||
| 497 | void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { | 664 | void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { |
| 498 | const u64 prev_switch_ticks = last_context_switch_time; | 665 | const u64 prev_switch_ticks = last_context_switch_time; |
| 499 | const u64 most_recent_switch_ticks = system.CoreTiming().GetTicks(); | 666 | const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks(); |
| 500 | const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; | 667 | const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; |
| 501 | 668 | ||
| 502 | if (thread != nullptr) { | 669 | if (thread != nullptr) { |
| @@ -510,6 +677,16 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { | |||
| 510 | last_context_switch_time = most_recent_switch_ticks; | 677 | last_context_switch_time = most_recent_switch_ticks; |
| 511 | } | 678 | } |
| 512 | 679 | ||
| 680 | void Scheduler::Initialize() { | ||
| 681 | std::string name = "Idle Thread Id:" + std::to_string(core_id); | ||
| 682 | std::function<void(void*)> init_func = system.GetCpuManager().GetIdleThreadStartFunc(); | ||
| 683 | void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); | ||
| 684 | ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE); | ||
| 685 | auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0, | ||
| 686 | nullptr, std::move(init_func), init_func_parameter); | ||
| 687 | idle_thread = std::move(thread_res).Unwrap(); | ||
| 688 | } | ||
| 689 | |||
| 513 | void Scheduler::Shutdown() { | 690 | void Scheduler::Shutdown() { |
| 514 | current_thread = nullptr; | 691 | current_thread = nullptr; |
| 515 | selected_thread = nullptr; | 692 | selected_thread = nullptr; |