diff options
| author | 2019-10-12 10:13:25 -0400 | |
|---|---|---|
| committer | 2019-10-15 11:55:25 -0400 | |
| commit | 3073615dbc214a53badc88da68eecbaaa73898de (patch) | |
| tree | 78926945e9c645bbcdebfba7dc3d216678dae547 /src/core/hle/kernel/scheduler.cpp | |
| parent | Kernel Scheduler: Make sure the global scheduler shutdowns correctly. (diff) | |
| download | yuzu-3073615dbc214a53badc88da68eecbaaa73898de.tar.gz yuzu-3073615dbc214a53badc88da68eecbaaa73898de.tar.xz yuzu-3073615dbc214a53badc88da68eecbaaa73898de.zip | |
Kernel: Address Feedback.
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
| -rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 53 |
1 files changed, 47 insertions, 6 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 122106267..dabeb05d6 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -23,7 +23,7 @@ | |||
| 23 | namespace Kernel { | 23 | namespace Kernel { |
| 24 | 24 | ||
| 25 | GlobalScheduler::GlobalScheduler(Core::System& system) : system{system} { | 25 | GlobalScheduler::GlobalScheduler(Core::System& system) : system{system} { |
| 26 | reselection_pending = false; | 26 | is_reselection_pending = false; |
| 27 | } | 27 | } |
| 28 | 28 | ||
| 29 | void GlobalScheduler::AddThread(SharedPtr<Thread> thread) { | 29 | void GlobalScheduler::AddThread(SharedPtr<Thread> thread) { |
| @@ -61,7 +61,7 @@ void GlobalScheduler::SelectThread(u32 core) { | |||
| 61 | } | 61 | } |
| 62 | sched.selected_thread = thread; | 62 | sched.selected_thread = thread; |
| 63 | } | 63 | } |
| 64 | sched.context_switch_pending = sched.selected_thread != sched.current_thread; | 64 | sched.is_context_switch_pending = sched.selected_thread != sched.current_thread; |
| 65 | std::atomic_thread_fence(std::memory_order_seq_cst); | 65 | std::atomic_thread_fence(std::memory_order_seq_cst); |
| 66 | }; | 66 | }; |
| 67 | Scheduler& sched = system.Scheduler(core); | 67 | Scheduler& sched = system.Scheduler(core); |
| @@ -318,10 +318,18 @@ void GlobalScheduler::PreemptThreads() { | |||
| 318 | } | 318 | } |
| 319 | } | 319 | } |
| 320 | 320 | ||
| 321 | reselection_pending.store(true, std::memory_order_release); | 321 | is_reselection_pending.store(true, std::memory_order_release); |
| 322 | } | 322 | } |
| 323 | } | 323 | } |
| 324 | 324 | ||
| 325 | void GlobalScheduler::Suggest(u32 priority, u32 core, Thread* thread) { | ||
| 326 | suggested_queue[core].add(thread, priority); | ||
| 327 | } | ||
| 328 | |||
| 329 | void GlobalScheduler::Unsuggest(u32 priority, u32 core, Thread* thread) { | ||
| 330 | suggested_queue[core].remove(thread, priority); | ||
| 331 | } | ||
| 332 | |||
| 325 | void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) { | 333 | void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) { |
| 326 | ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core."); | 334 | ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core."); |
| 327 | scheduled_queue[core].add(thread, priority); | 335 | scheduled_queue[core].add(thread, priority); |
| @@ -332,12 +340,40 @@ void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) { | |||
| 332 | scheduled_queue[core].add(thread, priority, false); | 340 | scheduled_queue[core].add(thread, priority, false); |
| 333 | } | 341 | } |
| 334 | 342 | ||
| 343 | void GlobalScheduler::Reschedule(u32 priority, u32 core, Thread* thread) { | ||
| 344 | scheduled_queue[core].remove(thread, priority); | ||
| 345 | scheduled_queue[core].add(thread, priority); | ||
| 346 | } | ||
| 347 | |||
| 348 | void GlobalScheduler::Unschedule(u32 priority, u32 core, Thread* thread) { | ||
| 349 | scheduled_queue[core].remove(thread, priority); | ||
| 350 | } | ||
| 351 | |||
| 352 | void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) { | ||
| 353 | const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT; | ||
| 354 | const s32 source_core = thread->GetProcessorID(); | ||
| 355 | if (source_core == destination_core || !schedulable) { | ||
| 356 | return; | ||
| 357 | } | ||
| 358 | thread->SetProcessorID(destination_core); | ||
| 359 | if (source_core >= 0) { | ||
| 360 | Unschedule(priority, source_core, thread); | ||
| 361 | } | ||
| 362 | if (destination_core >= 0) { | ||
| 363 | Unsuggest(priority, destination_core, thread); | ||
| 364 | Schedule(priority, destination_core, thread); | ||
| 365 | } | ||
| 366 | if (source_core >= 0) { | ||
| 367 | Suggest(priority, source_core, thread); | ||
| 368 | } | ||
| 369 | } | ||
| 370 | |||
| 335 | bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) { | 371 | bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) { |
| 336 | if (current_thread == winner) { | 372 | if (current_thread == winner) { |
| 337 | current_thread->IncrementYieldCount(); | 373 | current_thread->IncrementYieldCount(); |
| 338 | return true; | 374 | return true; |
| 339 | } else { | 375 | } else { |
| 340 | reselection_pending.store(true, std::memory_order_release); | 376 | is_reselection_pending.store(true, std::memory_order_release); |
| 341 | return false; | 377 | return false; |
| 342 | } | 378 | } |
| 343 | } | 379 | } |
| @@ -378,7 +414,7 @@ u64 Scheduler::GetLastContextSwitchTicks() const { | |||
| 378 | } | 414 | } |
| 379 | 415 | ||
| 380 | void Scheduler::TryDoContextSwitch() { | 416 | void Scheduler::TryDoContextSwitch() { |
| 381 | if (context_switch_pending) { | 417 | if (is_context_switch_pending ) { |
| 382 | SwitchContext(); | 418 | SwitchContext(); |
| 383 | } | 419 | } |
| 384 | } | 420 | } |
| @@ -409,7 +445,7 @@ void Scheduler::SwitchContext() { | |||
| 409 | Thread* const previous_thread = GetCurrentThread(); | 445 | Thread* const previous_thread = GetCurrentThread(); |
| 410 | Thread* const new_thread = GetSelectedThread(); | 446 | Thread* const new_thread = GetSelectedThread(); |
| 411 | 447 | ||
| 412 | context_switch_pending = false; | 448 | is_context_switch_pending = false; |
| 413 | if (new_thread == previous_thread) { | 449 | if (new_thread == previous_thread) { |
| 414 | return; | 450 | return; |
| 415 | } | 451 | } |
| @@ -477,4 +513,9 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { | |||
| 477 | last_context_switch_time = most_recent_switch_ticks; | 513 | last_context_switch_time = most_recent_switch_ticks; |
| 478 | } | 514 | } |
| 479 | 515 | ||
| 516 | void Scheduler::Shutdown() { | ||
| 517 | current_thread = nullptr; | ||
| 518 | selected_thread = nullptr; | ||
| 519 | } | ||
| 520 | |||
| 480 | } // namespace Kernel | 521 | } // namespace Kernel |