diff options
| author | 2019-06-19 09:11:18 -0400 | |
|---|---|---|
| committer | 2019-10-15 11:55:12 -0400 | |
| commit | 82218c925af8bcbaa05ae9f39af2d2393de7681f (patch) | |
| tree | e38d90c4838679ae59d58f51fff2904b16b1a155 /src/core/hle/kernel/scheduler.cpp | |
| parent | Correct PrepareReschedule (diff) | |
| download | yuzu-82218c925af8bcbaa05ae9f39af2d2393de7681f.tar.gz yuzu-82218c925af8bcbaa05ae9f39af2d2393de7681f.tar.xz yuzu-82218c925af8bcbaa05ae9f39af2d2393de7681f.zip | |
Kernel: Style and Corrections
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
| -rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 78 |
1 files changed, 48 insertions, 30 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 537640152..df4e9b799 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -1,6 +1,9 @@ | |||
| 1 | // Copyright 2018 yuzu emulator team | 1 | // Copyright 2018 yuzu emulator team |
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | // | ||
| 5 | // SelectThreads, Yield functions originally by TuxSH. | ||
| 6 | // licensed under GPLv2 or later under exception provided by the author. | ||
| 4 | 7 | ||
| 5 | #include <algorithm> | 8 | #include <algorithm> |
| 6 | #include <set> | 9 | #include <set> |
| @@ -19,16 +22,15 @@ | |||
| 19 | 22 | ||
| 20 | namespace Kernel { | 23 | namespace Kernel { |
| 21 | 24 | ||
| 22 | /* | 25 | GlobalScheduler::GlobalScheduler(Core::System& system) : system{system} { |
| 23 | * SelectThreads, Yield functions originally by TuxSH. | 26 | reselection_pending = false; |
| 24 | * licensed under GPLv2 or later under exception provided by the author. | 27 | } |
| 25 | */ | ||
| 26 | 28 | ||
| 27 | void GlobalScheduler::AddThread(SharedPtr<Thread> thread) { | 29 | void GlobalScheduler::AddThread(SharedPtr<Thread> thread) { |
| 28 | thread_list.push_back(std::move(thread)); | 30 | thread_list.push_back(std::move(thread)); |
| 29 | } | 31 | } |
| 30 | 32 | ||
| 31 | void GlobalScheduler::RemoveThread(Thread* thread) { | 33 | void GlobalScheduler::RemoveThread(const Thread* thread) { |
| 32 | thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), | 34 | thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), |
| 33 | thread_list.end()); | 35 | thread_list.end()); |
| 34 | } | 36 | } |
| @@ -37,7 +39,7 @@ void GlobalScheduler::RemoveThread(Thread* thread) { | |||
| 37 | * UnloadThread selects a core and forces it to unload its current thread's context | 39 | * UnloadThread selects a core and forces it to unload its current thread's context |
| 38 | */ | 40 | */ |
| 39 | void GlobalScheduler::UnloadThread(s32 core) { | 41 | void GlobalScheduler::UnloadThread(s32 core) { |
| 40 | Scheduler& sched = Core::System::GetInstance().Scheduler(core); | 42 | Scheduler& sched = system.Scheduler(core); |
| 41 | sched.UnloadThread(); | 43 | sched.UnloadThread(); |
| 42 | } | 44 | } |
| 43 | 45 | ||
| @@ -52,7 +54,7 @@ void GlobalScheduler::UnloadThread(s32 core) { | |||
| 52 | * thread in another core and swap it with its current thread. | 54 | * thread in another core and swap it with its current thread. |
| 53 | */ | 55 | */ |
| 54 | void GlobalScheduler::SelectThread(u32 core) { | 56 | void GlobalScheduler::SelectThread(u32 core) { |
| 55 | auto update_thread = [](Thread* thread, Scheduler& sched) { | 57 | const auto update_thread = [](Thread* thread, Scheduler& sched) { |
| 56 | if (thread != sched.selected_thread) { | 58 | if (thread != sched.selected_thread) { |
| 57 | if (thread == nullptr) { | 59 | if (thread == nullptr) { |
| 58 | ++sched.idle_selection_count; | 60 | ++sched.idle_selection_count; |
| @@ -62,7 +64,7 @@ void GlobalScheduler::SelectThread(u32 core) { | |||
| 62 | sched.context_switch_pending = sched.selected_thread != sched.current_thread; | 64 | sched.context_switch_pending = sched.selected_thread != sched.current_thread; |
| 63 | std::atomic_thread_fence(std::memory_order_seq_cst); | 65 | std::atomic_thread_fence(std::memory_order_seq_cst); |
| 64 | }; | 66 | }; |
| 65 | Scheduler& sched = Core::System::GetInstance().Scheduler(core); | 67 | Scheduler& sched = system.Scheduler(core); |
| 66 | Thread* current_thread = nullptr; | 68 | Thread* current_thread = nullptr; |
| 67 | // Step 1: Get top thread in schedule queue. | 69 | // Step 1: Get top thread in schedule queue. |
| 68 | current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front(); | 70 | current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front(); |
| @@ -118,8 +120,8 @@ void GlobalScheduler::SelectThread(u32 core) { | |||
| 118 | */ | 120 | */ |
| 119 | void GlobalScheduler::YieldThread(Thread* yielding_thread) { | 121 | void GlobalScheduler::YieldThread(Thread* yielding_thread) { |
| 120 | // Note: caller should use critical section, etc. | 122 | // Note: caller should use critical section, etc. |
| 121 | u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); | 123 | const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); |
| 122 | u32 priority = yielding_thread->GetPriority(); | 124 | const u32 priority = yielding_thread->GetPriority(); |
| 123 | 125 | ||
| 124 | // Yield the thread | 126 | // Yield the thread |
| 125 | ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority), | 127 | ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority), |
| @@ -139,8 +141,8 @@ void GlobalScheduler::YieldThread(Thread* yielding_thread) { | |||
| 139 | void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { | 141 | void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { |
| 140 | // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, | 142 | // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, |
| 141 | // etc. | 143 | // etc. |
| 142 | u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); | 144 | const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); |
| 143 | u32 priority = yielding_thread->GetPriority(); | 145 | const u32 priority = yielding_thread->GetPriority(); |
| 144 | 146 | ||
| 145 | // Yield the thread | 147 | // Yield the thread |
| 146 | ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority), | 148 | ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority), |
| @@ -155,12 +157,13 @@ void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { | |||
| 155 | Thread* next_thread = scheduled_queue[core_id].front(priority); | 157 | Thread* next_thread = scheduled_queue[core_id].front(priority); |
| 156 | Thread* winner = nullptr; | 158 | Thread* winner = nullptr; |
| 157 | for (auto& thread : suggested_queue[core_id]) { | 159 | for (auto& thread : suggested_queue[core_id]) { |
| 158 | s32 source_core = thread->GetProcessorID(); | 160 | const s32 source_core = thread->GetProcessorID(); |
| 159 | if (source_core >= 0) { | 161 | if (source_core >= 0) { |
| 160 | if (current_threads[source_core] != nullptr) { | 162 | if (current_threads[source_core] != nullptr) { |
| 161 | if (thread == current_threads[source_core] || | 163 | if (thread == current_threads[source_core] || |
| 162 | current_threads[source_core]->GetPriority() < min_regular_priority) | 164 | current_threads[source_core]->GetPriority() < min_regular_priority) { |
| 163 | continue; | 165 | continue; |
| 166 | } | ||
| 164 | } | 167 | } |
| 165 | if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() || | 168 | if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() || |
| 166 | next_thread->GetPriority() < thread->GetPriority()) { | 169 | next_thread->GetPriority() < thread->GetPriority()) { |
| @@ -174,8 +177,9 @@ void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { | |||
| 174 | 177 | ||
| 175 | if (winner != nullptr) { | 178 | if (winner != nullptr) { |
| 176 | if (winner != yielding_thread) { | 179 | if (winner != yielding_thread) { |
| 177 | if (winner->IsRunning()) | 180 | if (winner->IsRunning()) { |
| 178 | UnloadThread(winner->GetProcessorID()); | 181 | UnloadThread(winner->GetProcessorID()); |
| 182 | } | ||
| 179 | TransferToCore(winner->GetPriority(), core_id, winner); | 183 | TransferToCore(winner->GetPriority(), core_id, winner); |
| 180 | } | 184 | } |
| 181 | } else { | 185 | } else { |
| @@ -195,7 +199,7 @@ void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread | |||
| 195 | // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, | 199 | // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, |
| 196 | // etc. | 200 | // etc. |
| 197 | Thread* winner = nullptr; | 201 | Thread* winner = nullptr; |
| 198 | u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); | 202 | const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); |
| 199 | 203 | ||
| 200 | // Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead | 204 | // Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead |
| 201 | TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread); | 205 | TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread); |
| @@ -209,9 +213,10 @@ void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread | |||
| 209 | current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); | 213 | current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); |
| 210 | } | 214 | } |
| 211 | for (auto& thread : suggested_queue[core_id]) { | 215 | for (auto& thread : suggested_queue[core_id]) { |
| 212 | s32 source_core = thread->GetProcessorID(); | 216 | const s32 source_core = thread->GetProcessorID(); |
| 213 | if (source_core < 0 || thread == current_threads[source_core]) | 217 | if (source_core < 0 || thread == current_threads[source_core]) { |
| 214 | continue; | 218 | continue; |
| 219 | } | ||
| 215 | if (current_threads[source_core] == nullptr || | 220 | if (current_threads[source_core] == nullptr || |
| 216 | current_threads[source_core]->GetPriority() >= min_regular_priority) { | 221 | current_threads[source_core]->GetPriority() >= min_regular_priority) { |
| 217 | winner = thread; | 222 | winner = thread; |
| @@ -220,8 +225,9 @@ void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread | |||
| 220 | } | 225 | } |
| 221 | if (winner != nullptr) { | 226 | if (winner != nullptr) { |
| 222 | if (winner != yielding_thread) { | 227 | if (winner != yielding_thread) { |
| 223 | if (winner->IsRunning()) | 228 | if (winner->IsRunning()) { |
| 224 | UnloadThread(winner->GetProcessorID()); | 229 | UnloadThread(winner->GetProcessorID()); |
| 230 | } | ||
| 225 | TransferToCore(winner->GetPriority(), core_id, winner); | 231 | TransferToCore(winner->GetPriority(), core_id, winner); |
| 226 | } | 232 | } |
| 227 | } else { | 233 | } else { |
| @@ -232,6 +238,16 @@ void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread | |||
| 232 | AskForReselectionOrMarkRedundant(yielding_thread, winner); | 238 | AskForReselectionOrMarkRedundant(yielding_thread, winner); |
| 233 | } | 239 | } |
| 234 | 240 | ||
| 241 | void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) { | ||
| 242 | ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core."); | ||
| 243 | scheduled_queue[core].add(thread, priority); | ||
| 244 | } | ||
| 245 | |||
| 246 | void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) { | ||
| 247 | ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core."); | ||
| 248 | scheduled_queue[core].add(thread, priority, false); | ||
| 249 | } | ||
| 250 | |||
| 235 | void GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) { | 251 | void GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) { |
| 236 | if (current_thread == winner) { | 252 | if (current_thread == winner) { |
| 237 | // TODO(blinkhawk): manage redundant operations, this is not implemented. | 253 | // TODO(blinkhawk): manage redundant operations, this is not implemented. |
| @@ -244,13 +260,13 @@ void GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, T | |||
| 244 | 260 | ||
| 245 | GlobalScheduler::~GlobalScheduler() = default; | 261 | GlobalScheduler::~GlobalScheduler() = default; |
| 246 | 262 | ||
| 247 | Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 id) | 263 | Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 core_id) |
| 248 | : system(system), cpu_core(cpu_core), id(id) {} | 264 | : system(system), cpu_core(cpu_core), core_id(core_id) {} |
| 249 | 265 | ||
| 250 | Scheduler::~Scheduler() {} | 266 | Scheduler::~Scheduler() = default; |
| 251 | 267 | ||
| 252 | bool Scheduler::HaveReadyThreads() const { | 268 | bool Scheduler::HaveReadyThreads() const { |
| 253 | return system.GlobalScheduler().HaveReadyThreads(id); | 269 | return system.GlobalScheduler().HaveReadyThreads(core_id); |
| 254 | } | 270 | } |
| 255 | 271 | ||
| 256 | Thread* Scheduler::GetCurrentThread() const { | 272 | Thread* Scheduler::GetCurrentThread() const { |
| @@ -262,7 +278,7 @@ Thread* Scheduler::GetSelectedThread() const { | |||
| 262 | } | 278 | } |
| 263 | 279 | ||
| 264 | void Scheduler::SelectThreads() { | 280 | void Scheduler::SelectThreads() { |
| 265 | system.GlobalScheduler().SelectThread(id); | 281 | system.GlobalScheduler().SelectThread(core_id); |
| 266 | } | 282 | } |
| 267 | 283 | ||
| 268 | u64 Scheduler::GetLastContextSwitchTicks() const { | 284 | u64 Scheduler::GetLastContextSwitchTicks() const { |
| @@ -270,13 +286,14 @@ u64 Scheduler::GetLastContextSwitchTicks() const { | |||
| 270 | } | 286 | } |
| 271 | 287 | ||
| 272 | void Scheduler::TryDoContextSwitch() { | 288 | void Scheduler::TryDoContextSwitch() { |
| 273 | if (context_switch_pending) | 289 | if (context_switch_pending) { |
| 274 | SwitchContext(); | 290 | SwitchContext(); |
| 291 | } | ||
| 275 | } | 292 | } |
| 276 | 293 | ||
| 277 | void Scheduler::UnloadThread() { | 294 | void Scheduler::UnloadThread() { |
| 278 | Thread* const previous_thread = GetCurrentThread(); | 295 | Thread* const previous_thread = GetCurrentThread(); |
| 279 | Process* const previous_process = Core::CurrentProcess(); | 296 | Process* const previous_process = system.Kernel().CurrentProcess(); |
| 280 | 297 | ||
| 281 | UpdateLastContextSwitchTime(previous_thread, previous_process); | 298 | UpdateLastContextSwitchTime(previous_thread, previous_process); |
| 282 | 299 | ||
| @@ -301,10 +318,11 @@ void Scheduler::SwitchContext() { | |||
| 301 | Thread* const new_thread = GetSelectedThread(); | 318 | Thread* const new_thread = GetSelectedThread(); |
| 302 | 319 | ||
| 303 | context_switch_pending = false; | 320 | context_switch_pending = false; |
| 304 | if (new_thread == previous_thread) | 321 | if (new_thread == previous_thread) { |
| 305 | return; | 322 | return; |
| 323 | } | ||
| 306 | 324 | ||
| 307 | Process* const previous_process = Core::CurrentProcess(); | 325 | Process* const previous_process = system.Kernel().CurrentProcess(); |
| 308 | 326 | ||
| 309 | UpdateLastContextSwitchTime(previous_thread, previous_process); | 327 | UpdateLastContextSwitchTime(previous_thread, previous_process); |
| 310 | 328 | ||
| @@ -324,7 +342,7 @@ void Scheduler::SwitchContext() { | |||
| 324 | 342 | ||
| 325 | // Load context of new thread | 343 | // Load context of new thread |
| 326 | if (new_thread) { | 344 | if (new_thread) { |
| 327 | ASSERT_MSG(new_thread->GetProcessorID() == this->id, | 345 | ASSERT_MSG(new_thread->GetProcessorID() == this->core_id, |
| 328 | "Thread must be assigned to this core."); | 346 | "Thread must be assigned to this core."); |
| 329 | ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready, | 347 | ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready, |
| 330 | "Thread must be ready to become running."); | 348 | "Thread must be ready to become running."); |
| @@ -353,7 +371,7 @@ void Scheduler::SwitchContext() { | |||
| 353 | 371 | ||
| 354 | void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { | 372 | void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { |
| 355 | const u64 prev_switch_ticks = last_context_switch_time; | 373 | const u64 prev_switch_ticks = last_context_switch_time; |
| 356 | const u64 most_recent_switch_ticks = Core::System::GetInstance().CoreTiming().GetTicks(); | 374 | const u64 most_recent_switch_ticks = system.CoreTiming().GetTicks(); |
| 357 | const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; | 375 | const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; |
| 358 | 376 | ||
| 359 | if (thread != nullptr) { | 377 | if (thread != nullptr) { |