summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar Fernando Sahmkow2019-06-19 09:11:18 -0400
committerGravatar FernandoS272019-10-15 11:55:12 -0400
commit82218c925af8bcbaa05ae9f39af2d2393de7681f (patch)
treee38d90c4838679ae59d58f51fff2904b16b1a155 /src
parentCorrect PrepareReschedule (diff)
downloadyuzu-82218c925af8bcbaa05ae9f39af2d2393de7681f.tar.gz
yuzu-82218c925af8bcbaa05ae9f39af2d2393de7681f.tar.xz
yuzu-82218c925af8bcbaa05ae9f39af2d2393de7681f.zip
Kernel: Style and Corrections
Diffstat (limited to 'src')
-rw-r--r--src/core/core.cpp5
-rw-r--r--src/core/core.h4
-rw-r--r--src/core/core_cpu.cpp2
-rw-r--r--src/core/core_cpu.h2
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp1
-rw-r--r--src/core/hle/kernel/kernel.cpp2
-rw-r--r--src/core/hle/kernel/mutex.cpp2
-rw-r--r--src/core/hle/kernel/scheduler.cpp78
-rw-r--r--src/core/hle/kernel/scheduler.h53
-rw-r--r--src/core/hle/kernel/svc.cpp15
-rw-r--r--src/core/hle/kernel/thread.cpp54
-rw-r--r--src/core/hle/kernel/thread.h15
12 files changed, 137 insertions, 96 deletions
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 4a95630bd..d79045eea 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -404,9 +404,10 @@ void System::PrepareReschedule() {
404 CurrentCpuCore().PrepareReschedule(); 404 CurrentCpuCore().PrepareReschedule();
405} 405}
406 406
407void System::PrepareReschedule(s32 core_index) { 407void System::PrepareReschedule(const u32 core_index) {
408 if (core_index >= 0) 408 if (core_index < GlobalScheduler().CpuCoresCount()) {
409 CpuCore(core_index).PrepareReschedule(); 409 CpuCore(core_index).PrepareReschedule();
410 }
410} 411}
411 412
412PerfStatsResults System::GetAndResetPerfStats() { 413PerfStatsResults System::GetAndResetPerfStats() {
diff --git a/src/core/core.h b/src/core/core.h
index 0d1008895..984074ce3 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -24,10 +24,10 @@ class VfsFilesystem;
24} // namespace FileSys 24} // namespace FileSys
25 25
26namespace Kernel { 26namespace Kernel {
27class GlobalScheduler;
27class KernelCore; 28class KernelCore;
28class Process; 29class Process;
29class Scheduler; 30class Scheduler;
30class GlobalScheduler;
31} // namespace Kernel 31} // namespace Kernel
32 32
33namespace Loader { 33namespace Loader {
@@ -186,7 +186,7 @@ public:
186 void PrepareReschedule(); 186 void PrepareReschedule();
187 187
188 /// Prepare the core emulation for a reschedule 188 /// Prepare the core emulation for a reschedule
189 void PrepareReschedule(s32 core_index); 189 void PrepareReschedule(u32 core_index);
190 190
191 /// Gets and resets core performance statistics 191 /// Gets and resets core performance statistics
192 PerfStatsResults GetAndResetPerfStats(); 192 PerfStatsResults GetAndResetPerfStats();
diff --git a/src/core/core_cpu.cpp b/src/core/core_cpu.cpp
index 2a7c3af24..a6f63e437 100644
--- a/src/core/core_cpu.cpp
+++ b/src/core/core_cpu.cpp
@@ -111,7 +111,7 @@ void Cpu::PrepareReschedule() {
111 111
112void Cpu::Reschedule() { 112void Cpu::Reschedule() {
113 // Lock the global kernel mutex when we manipulate the HLE state 113 // Lock the global kernel mutex when we manipulate the HLE state
114 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 114 std::lock_guard lock(HLE::g_hle_lock);
115 115
116 global_scheduler.SelectThread(core_index); 116 global_scheduler.SelectThread(core_index);
117 scheduler->TryDoContextSwitch(); 117 scheduler->TryDoContextSwitch();
diff --git a/src/core/core_cpu.h b/src/core/core_cpu.h
index 0cde54787..80261daf7 100644
--- a/src/core/core_cpu.h
+++ b/src/core/core_cpu.h
@@ -12,8 +12,8 @@
12#include "common/common_types.h" 12#include "common/common_types.h"
13 13
14namespace Kernel { 14namespace Kernel {
15class Scheduler;
16class GlobalScheduler; 15class GlobalScheduler;
16class Scheduler;
17} // namespace Kernel 17} // namespace Kernel
18 18
19namespace Core { 19namespace Core {
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index c66cd16ef..4c1d3fd18 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -22,7 +22,6 @@ namespace Kernel {
22namespace { 22namespace {
23// Wake up num_to_wake (or all) threads in a vector. 23// Wake up num_to_wake (or all) threads in a vector.
24void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) { 24void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) {
25
26 auto& system = Core::System::GetInstance(); 25 auto& system = Core::System::GetInstance();
27 // Only process up to 'target' threads, unless 'target' is <= 0, in which case process 26 // Only process up to 'target' threads, unless 'target' is <= 0, in which case process
28 // them all. 27 // them all.
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index b4fd1d3f3..600d6ec74 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -89,7 +89,7 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_
89} 89}
90 90
91struct KernelCore::Impl { 91struct KernelCore::Impl {
92 explicit Impl(Core::System& system) : system{system} {} 92 explicit Impl(Core::System& system) : system{system}, global_scheduler{system} {}
93 93
94 void Initialize(KernelCore& kernel) { 94 void Initialize(KernelCore& kernel) {
95 Shutdown(); 95 Shutdown();
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
index 57f2d8bf3..eb919246c 100644
--- a/src/core/hle/kernel/mutex.cpp
+++ b/src/core/hle/kernel/mutex.cpp
@@ -140,7 +140,7 @@ ResultCode Mutex::Release(VAddr address) {
140 thread->SetMutexWaitAddress(0); 140 thread->SetMutexWaitAddress(0);
141 thread->SetWaitHandle(0); 141 thread->SetWaitHandle(0);
142 142
143 Core::System::GetInstance().PrepareReschedule(); 143 system.PrepareReschedule();
144 144
145 return RESULT_SUCCESS; 145 return RESULT_SUCCESS;
146} 146}
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 537640152..df4e9b799 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -1,6 +1,9 @@
1// Copyright 2018 yuzu emulator team 1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4//
5// SelectThreads, Yield functions originally by TuxSH.
6// licensed under GPLv2 or later under exception provided by the author.
4 7
5#include <algorithm> 8#include <algorithm>
6#include <set> 9#include <set>
@@ -19,16 +22,15 @@
19 22
20namespace Kernel { 23namespace Kernel {
21 24
22/* 25GlobalScheduler::GlobalScheduler(Core::System& system) : system{system} {
23 * SelectThreads, Yield functions originally by TuxSH. 26 reselection_pending = false;
24 * licensed under GPLv2 or later under exception provided by the author. 27}
25 */
26 28
27void GlobalScheduler::AddThread(SharedPtr<Thread> thread) { 29void GlobalScheduler::AddThread(SharedPtr<Thread> thread) {
28 thread_list.push_back(std::move(thread)); 30 thread_list.push_back(std::move(thread));
29} 31}
30 32
31void GlobalScheduler::RemoveThread(Thread* thread) { 33void GlobalScheduler::RemoveThread(const Thread* thread) {
32 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), 34 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
33 thread_list.end()); 35 thread_list.end());
34} 36}
@@ -37,7 +39,7 @@ void GlobalScheduler::RemoveThread(Thread* thread) {
37 * UnloadThread selects a core and forces it to unload its current thread's context 39 * UnloadThread selects a core and forces it to unload its current thread's context
38 */ 40 */
39void GlobalScheduler::UnloadThread(s32 core) { 41void GlobalScheduler::UnloadThread(s32 core) {
40 Scheduler& sched = Core::System::GetInstance().Scheduler(core); 42 Scheduler& sched = system.Scheduler(core);
41 sched.UnloadThread(); 43 sched.UnloadThread();
42} 44}
43 45
@@ -52,7 +54,7 @@ void GlobalScheduler::UnloadThread(s32 core) {
52 * thread in another core and swap it with its current thread. 54 * thread in another core and swap it with its current thread.
53 */ 55 */
54void GlobalScheduler::SelectThread(u32 core) { 56void GlobalScheduler::SelectThread(u32 core) {
55 auto update_thread = [](Thread* thread, Scheduler& sched) { 57 const auto update_thread = [](Thread* thread, Scheduler& sched) {
56 if (thread != sched.selected_thread) { 58 if (thread != sched.selected_thread) {
57 if (thread == nullptr) { 59 if (thread == nullptr) {
58 ++sched.idle_selection_count; 60 ++sched.idle_selection_count;
@@ -62,7 +64,7 @@ void GlobalScheduler::SelectThread(u32 core) {
62 sched.context_switch_pending = sched.selected_thread != sched.current_thread; 64 sched.context_switch_pending = sched.selected_thread != sched.current_thread;
63 std::atomic_thread_fence(std::memory_order_seq_cst); 65 std::atomic_thread_fence(std::memory_order_seq_cst);
64 }; 66 };
65 Scheduler& sched = Core::System::GetInstance().Scheduler(core); 67 Scheduler& sched = system.Scheduler(core);
66 Thread* current_thread = nullptr; 68 Thread* current_thread = nullptr;
67 // Step 1: Get top thread in schedule queue. 69 // Step 1: Get top thread in schedule queue.
68 current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front(); 70 current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
@@ -118,8 +120,8 @@ void GlobalScheduler::SelectThread(u32 core) {
118 */ 120 */
119void GlobalScheduler::YieldThread(Thread* yielding_thread) { 121void GlobalScheduler::YieldThread(Thread* yielding_thread) {
120 // Note: caller should use critical section, etc. 122 // Note: caller should use critical section, etc.
121 u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); 123 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
122 u32 priority = yielding_thread->GetPriority(); 124 const u32 priority = yielding_thread->GetPriority();
123 125
124 // Yield the thread 126 // Yield the thread
125 ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority), 127 ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority),
@@ -139,8 +141,8 @@ void GlobalScheduler::YieldThread(Thread* yielding_thread) {
139void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { 141void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
140 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, 142 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
141 // etc. 143 // etc.
142 u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); 144 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
143 u32 priority = yielding_thread->GetPriority(); 145 const u32 priority = yielding_thread->GetPriority();
144 146
145 // Yield the thread 147 // Yield the thread
146 ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority), 148 ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority),
@@ -155,12 +157,13 @@ void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
155 Thread* next_thread = scheduled_queue[core_id].front(priority); 157 Thread* next_thread = scheduled_queue[core_id].front(priority);
156 Thread* winner = nullptr; 158 Thread* winner = nullptr;
157 for (auto& thread : suggested_queue[core_id]) { 159 for (auto& thread : suggested_queue[core_id]) {
158 s32 source_core = thread->GetProcessorID(); 160 const s32 source_core = thread->GetProcessorID();
159 if (source_core >= 0) { 161 if (source_core >= 0) {
160 if (current_threads[source_core] != nullptr) { 162 if (current_threads[source_core] != nullptr) {
161 if (thread == current_threads[source_core] || 163 if (thread == current_threads[source_core] ||
162 current_threads[source_core]->GetPriority() < min_regular_priority) 164 current_threads[source_core]->GetPriority() < min_regular_priority) {
163 continue; 165 continue;
166 }
164 } 167 }
165 if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() || 168 if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() ||
166 next_thread->GetPriority() < thread->GetPriority()) { 169 next_thread->GetPriority() < thread->GetPriority()) {
@@ -174,8 +177,9 @@ void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
174 177
175 if (winner != nullptr) { 178 if (winner != nullptr) {
176 if (winner != yielding_thread) { 179 if (winner != yielding_thread) {
177 if (winner->IsRunning()) 180 if (winner->IsRunning()) {
178 UnloadThread(winner->GetProcessorID()); 181 UnloadThread(winner->GetProcessorID());
182 }
179 TransferToCore(winner->GetPriority(), core_id, winner); 183 TransferToCore(winner->GetPriority(), core_id, winner);
180 } 184 }
181 } else { 185 } else {
@@ -195,7 +199,7 @@ void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
195 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, 199 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
196 // etc. 200 // etc.
197 Thread* winner = nullptr; 201 Thread* winner = nullptr;
198 u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); 202 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
199 203
200 // Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead 204 // Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead
201 TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread); 205 TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread);
@@ -209,9 +213,10 @@ void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
209 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); 213 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
210 } 214 }
211 for (auto& thread : suggested_queue[core_id]) { 215 for (auto& thread : suggested_queue[core_id]) {
212 s32 source_core = thread->GetProcessorID(); 216 const s32 source_core = thread->GetProcessorID();
213 if (source_core < 0 || thread == current_threads[source_core]) 217 if (source_core < 0 || thread == current_threads[source_core]) {
214 continue; 218 continue;
219 }
215 if (current_threads[source_core] == nullptr || 220 if (current_threads[source_core] == nullptr ||
216 current_threads[source_core]->GetPriority() >= min_regular_priority) { 221 current_threads[source_core]->GetPriority() >= min_regular_priority) {
217 winner = thread; 222 winner = thread;
@@ -220,8 +225,9 @@ void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
220 } 225 }
221 if (winner != nullptr) { 226 if (winner != nullptr) {
222 if (winner != yielding_thread) { 227 if (winner != yielding_thread) {
223 if (winner->IsRunning()) 228 if (winner->IsRunning()) {
224 UnloadThread(winner->GetProcessorID()); 229 UnloadThread(winner->GetProcessorID());
230 }
225 TransferToCore(winner->GetPriority(), core_id, winner); 231 TransferToCore(winner->GetPriority(), core_id, winner);
226 } 232 }
227 } else { 233 } else {
@@ -232,6 +238,16 @@ void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
232 AskForReselectionOrMarkRedundant(yielding_thread, winner); 238 AskForReselectionOrMarkRedundant(yielding_thread, winner);
233} 239}
234 240
241void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) {
242 ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core.");
243 scheduled_queue[core].add(thread, priority);
244}
245
246void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) {
247 ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core.");
248 scheduled_queue[core].add(thread, priority, false);
249}
250
235void GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) { 251void GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
236 if (current_thread == winner) { 252 if (current_thread == winner) {
237 // TODO(blinkhawk): manage redundant operations, this is not implemented. 253 // TODO(blinkhawk): manage redundant operations, this is not implemented.
@@ -244,13 +260,13 @@ void GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, T
244 260
245GlobalScheduler::~GlobalScheduler() = default; 261GlobalScheduler::~GlobalScheduler() = default;
246 262
247Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 id) 263Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 core_id)
248 : system(system), cpu_core(cpu_core), id(id) {} 264 : system(system), cpu_core(cpu_core), core_id(core_id) {}
249 265
250Scheduler::~Scheduler() {} 266Scheduler::~Scheduler() = default;
251 267
252bool Scheduler::HaveReadyThreads() const { 268bool Scheduler::HaveReadyThreads() const {
253 return system.GlobalScheduler().HaveReadyThreads(id); 269 return system.GlobalScheduler().HaveReadyThreads(core_id);
254} 270}
255 271
256Thread* Scheduler::GetCurrentThread() const { 272Thread* Scheduler::GetCurrentThread() const {
@@ -262,7 +278,7 @@ Thread* Scheduler::GetSelectedThread() const {
262} 278}
263 279
264void Scheduler::SelectThreads() { 280void Scheduler::SelectThreads() {
265 system.GlobalScheduler().SelectThread(id); 281 system.GlobalScheduler().SelectThread(core_id);
266} 282}
267 283
268u64 Scheduler::GetLastContextSwitchTicks() const { 284u64 Scheduler::GetLastContextSwitchTicks() const {
@@ -270,13 +286,14 @@ u64 Scheduler::GetLastContextSwitchTicks() const {
270} 286}
271 287
272void Scheduler::TryDoContextSwitch() { 288void Scheduler::TryDoContextSwitch() {
273 if (context_switch_pending) 289 if (context_switch_pending) {
274 SwitchContext(); 290 SwitchContext();
291 }
275} 292}
276 293
277void Scheduler::UnloadThread() { 294void Scheduler::UnloadThread() {
278 Thread* const previous_thread = GetCurrentThread(); 295 Thread* const previous_thread = GetCurrentThread();
279 Process* const previous_process = Core::CurrentProcess(); 296 Process* const previous_process = system.Kernel().CurrentProcess();
280 297
281 UpdateLastContextSwitchTime(previous_thread, previous_process); 298 UpdateLastContextSwitchTime(previous_thread, previous_process);
282 299
@@ -301,10 +318,11 @@ void Scheduler::SwitchContext() {
301 Thread* const new_thread = GetSelectedThread(); 318 Thread* const new_thread = GetSelectedThread();
302 319
303 context_switch_pending = false; 320 context_switch_pending = false;
304 if (new_thread == previous_thread) 321 if (new_thread == previous_thread) {
305 return; 322 return;
323 }
306 324
307 Process* const previous_process = Core::CurrentProcess(); 325 Process* const previous_process = system.Kernel().CurrentProcess();
308 326
309 UpdateLastContextSwitchTime(previous_thread, previous_process); 327 UpdateLastContextSwitchTime(previous_thread, previous_process);
310 328
@@ -324,7 +342,7 @@ void Scheduler::SwitchContext() {
324 342
325 // Load context of new thread 343 // Load context of new thread
326 if (new_thread) { 344 if (new_thread) {
327 ASSERT_MSG(new_thread->GetProcessorID() == this->id, 345 ASSERT_MSG(new_thread->GetProcessorID() == this->core_id,
328 "Thread must be assigned to this core."); 346 "Thread must be assigned to this core.");
329 ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready, 347 ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready,
330 "Thread must be ready to become running."); 348 "Thread must be ready to become running.");
@@ -353,7 +371,7 @@ void Scheduler::SwitchContext() {
353 371
354void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { 372void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
355 const u64 prev_switch_ticks = last_context_switch_time; 373 const u64 prev_switch_ticks = last_context_switch_time;
356 const u64 most_recent_switch_ticks = Core::System::GetInstance().CoreTiming().GetTicks(); 374 const u64 most_recent_switch_ticks = system.CoreTiming().GetTicks();
357 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; 375 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
358 376
359 if (thread != nullptr) { 377 if (thread != nullptr) {
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 82ed64b55..1c9d8a30f 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -24,62 +24,70 @@ class GlobalScheduler final {
24public: 24public:
25 static constexpr u32 NUM_CPU_CORES = 4; 25 static constexpr u32 NUM_CPU_CORES = 4;
26 26
27 GlobalScheduler() { 27 explicit GlobalScheduler(Core::System& system);
28 reselection_pending = false;
29 }
30 ~GlobalScheduler(); 28 ~GlobalScheduler();
31 /// Adds a new thread to the scheduler 29 /// Adds a new thread to the scheduler
32 void AddThread(SharedPtr<Thread> thread); 30 void AddThread(SharedPtr<Thread> thread);
33 31
34 /// Removes a thread from the scheduler 32 /// Removes a thread from the scheduler
35 void RemoveThread(Thread* thread); 33 void RemoveThread(const Thread* thread);
36 34
37 /// Returns a list of all threads managed by the scheduler 35 /// Returns a list of all threads managed by the scheduler
38 const std::vector<SharedPtr<Thread>>& GetThreadList() const { 36 const std::vector<SharedPtr<Thread>>& GetThreadList() const {
39 return thread_list; 37 return thread_list;
40 } 38 }
41 39
40 // Add a thread to the suggested queue of a cpu core. Suggested threads may be
41 // picked if no thread is scheduled to run on the core.
42 void Suggest(u32 priority, u32 core, Thread* thread) { 42 void Suggest(u32 priority, u32 core, Thread* thread) {
43 suggested_queue[core].add(thread, priority); 43 suggested_queue[core].add(thread, priority);
44 } 44 }
45 45
46 // Remove a thread to the suggested queue of a cpu core. Suggested threads may be
47 // picked if no thread is scheduled to run on the core.
46 void Unsuggest(u32 priority, u32 core, Thread* thread) { 48 void Unsuggest(u32 priority, u32 core, Thread* thread) {
47 suggested_queue[core].remove(thread, priority); 49 suggested_queue[core].remove(thread, priority);
48 } 50 }
49 51
50 void Schedule(u32 priority, u32 core, Thread* thread) { 52 // Add a thread to the scheduling queue of a cpu core. The thread is added at the
51 ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core."); 53 // back the queue in its priority level
52 scheduled_queue[core].add(thread, priority); 54 void Schedule(u32 priority, u32 core, Thread* thread);
53 }
54 55
55 void SchedulePrepend(u32 priority, u32 core, Thread* thread) { 56 // Add a thread to the scheduling queue of a cpu core. The thread is added at the
56 ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core."); 57 // front the queue in its priority level
57 scheduled_queue[core].add(thread, priority, false); 58 void SchedulePrepend(u32 priority, u32 core, Thread* thread);
58 }
59 59
60 // Reschedule an already scheduled thread based on a new priority
60 void Reschedule(u32 priority, u32 core, Thread* thread) { 61 void Reschedule(u32 priority, u32 core, Thread* thread) {
61 scheduled_queue[core].remove(thread, priority); 62 scheduled_queue[core].remove(thread, priority);
62 scheduled_queue[core].add(thread, priority); 63 scheduled_queue[core].add(thread, priority);
63 } 64 }
64 65
66 // Unschedule a thread.
65 void Unschedule(u32 priority, u32 core, Thread* thread) { 67 void Unschedule(u32 priority, u32 core, Thread* thread) {
66 scheduled_queue[core].remove(thread, priority); 68 scheduled_queue[core].remove(thread, priority);
67 } 69 }
68 70
71 // Transfers a thread into an specific core. If the destination_core is -1
72 // it will be unscheduled from its source code and added into its suggested
73 // queue.
69 void TransferToCore(u32 priority, s32 destination_core, Thread* thread) { 74 void TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
70 bool schedulable = thread->GetPriority() < THREADPRIO_COUNT; 75 const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
71 s32 source_core = thread->GetProcessorID(); 76 const s32 source_core = thread->GetProcessorID();
72 if (source_core == destination_core || !schedulable) 77 if (source_core == destination_core || !schedulable) {
73 return; 78 return;
79 }
74 thread->SetProcessorID(destination_core); 80 thread->SetProcessorID(destination_core);
75 if (source_core >= 0) 81 if (source_core >= 0) {
76 Unschedule(priority, source_core, thread); 82 Unschedule(priority, source_core, thread);
83 }
77 if (destination_core >= 0) { 84 if (destination_core >= 0) {
78 Unsuggest(priority, destination_core, thread); 85 Unsuggest(priority, destination_core, thread);
79 Schedule(priority, destination_core, thread); 86 Schedule(priority, destination_core, thread);
80 } 87 }
81 if (source_core >= 0) 88 if (source_core >= 0) {
82 Suggest(priority, source_core, thread); 89 Suggest(priority, source_core, thread);
90 }
83 } 91 }
84 92
85 /* 93 /*
@@ -99,7 +107,7 @@ public:
99 */ 107 */
100 void SelectThread(u32 core); 108 void SelectThread(u32 core);
101 109
102 bool HaveReadyThreads(u32 core_id) { 110 bool HaveReadyThreads(u32 core_id) const {
103 return !scheduled_queue[core_id].empty(); 111 return !scheduled_queue[core_id].empty();
104 } 112 }
105 113
@@ -133,8 +141,8 @@ public:
133 reselection_pending.store(true, std::memory_order_release); 141 reselection_pending.store(true, std::memory_order_release);
134 } 142 }
135 143
136 bool IsReselectionPending() { 144 bool IsReselectionPending() const {
137 return reselection_pending.load(std::memory_order_acquire); 145 return reselection_pending.load();
138 } 146 }
139 147
140private: 148private:
@@ -147,11 +155,12 @@ private:
147 155
148 /// Lists all thread ids that aren't deleted/etc. 156 /// Lists all thread ids that aren't deleted/etc.
149 std::vector<SharedPtr<Thread>> thread_list; 157 std::vector<SharedPtr<Thread>> thread_list;
158 Core::System& system;
150}; 159};
151 160
152class Scheduler final { 161class Scheduler final {
153public: 162public:
154 explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, const u32 id); 163 explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, const u32 core_id);
155 ~Scheduler(); 164 ~Scheduler();
156 165
157 /// Returns whether there are any threads that are ready to run. 166 /// Returns whether there are any threads that are ready to run.
@@ -204,7 +213,7 @@ private:
204 Core::ARM_Interface& cpu_core; 213 Core::ARM_Interface& cpu_core;
205 u64 last_context_switch_time = 0; 214 u64 last_context_switch_time = 0;
206 u64 idle_selection_count = 0; 215 u64 idle_selection_count = 0;
207 const u32 id; 216 const u32 core_id;
208 217
209 bool context_switch_pending = false; 218 bool context_switch_pending = false;
210}; 219};
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 560ac3945..d520ed033 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -1560,13 +1560,13 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
1560 if (nanoseconds <= 0) { 1560 if (nanoseconds <= 0) {
1561 switch (static_cast<SleepType>(nanoseconds)) { 1561 switch (static_cast<SleepType>(nanoseconds)) {
1562 case SleepType::YieldWithoutLoadBalancing: 1562 case SleepType::YieldWithoutLoadBalancing:
1563 current_thread->YieldType0(); 1563 current_thread->YieldSimple();
1564 break; 1564 break;
1565 case SleepType::YieldWithLoadBalancing: 1565 case SleepType::YieldWithLoadBalancing:
1566 current_thread->YieldType1(); 1566 current_thread->YieldAndBalanceLoad();
1567 break; 1567 break;
1568 case SleepType::YieldAndWaitForLoadBalancing: 1568 case SleepType::YieldAndWaitForLoadBalancing:
1569 current_thread->YieldType2(); 1569 current_thread->YieldAndWaitForLoadBalancing();
1570 break; 1570 break;
1571 default: 1571 default:
1572 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); 1572 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
@@ -1638,8 +1638,9 @@ static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_var
1638 const auto& thread_list = scheduler.GetThreadList(); 1638 const auto& thread_list = scheduler.GetThreadList();
1639 1639
1640 for (const auto& thread : thread_list) { 1640 for (const auto& thread : thread_list) {
1641 if (thread->GetCondVarWaitAddress() == condition_variable_addr) 1641 if (thread->GetCondVarWaitAddress() == condition_variable_addr) {
1642 waiting_threads.push_back(thread); 1642 waiting_threads.push_back(thread);
1643 }
1643 } 1644 }
1644 1645
1645 // Sort them by priority, such that the highest priority ones come first. 1646 // Sort them by priority, such that the highest priority ones come first.
@@ -1747,9 +1748,11 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type,
1747 1748
1748 const auto arbitration_type = static_cast<AddressArbiter::ArbitrationType>(type); 1749 const auto arbitration_type = static_cast<AddressArbiter::ArbitrationType>(type);
1749 auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter(); 1750 auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter();
1750 ResultCode result = address_arbiter.WaitForAddress(address, arbitration_type, value, timeout); 1751 const ResultCode result =
1751 if (result == RESULT_SUCCESS) 1752 address_arbiter.WaitForAddress(address, arbitration_type, value, timeout);
1753 if (result == RESULT_SUCCESS) {
1752 system.PrepareReschedule(); 1754 system.PrepareReschedule();
1755 }
1753 return result; 1756 return result;
1754} 1757}
1755 1758
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index d0fa7b370..8cf0a7ec7 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -373,43 +373,44 @@ void Thread::Sleep(s64 nanoseconds) {
373 WakeAfterDelay(nanoseconds); 373 WakeAfterDelay(nanoseconds);
374} 374}
375 375
376void Thread::YieldType0() { 376void Thread::YieldSimple() {
377 auto& scheduler = kernel.GlobalScheduler(); 377 auto& scheduler = kernel.GlobalScheduler();
378 scheduler.YieldThread(this); 378 scheduler.YieldThread(this);
379} 379}
380 380
381void Thread::YieldType1() { 381void Thread::YieldAndBalanceLoad() {
382 auto& scheduler = kernel.GlobalScheduler(); 382 auto& scheduler = kernel.GlobalScheduler();
383 scheduler.YieldThreadAndBalanceLoad(this); 383 scheduler.YieldThreadAndBalanceLoad(this);
384} 384}
385 385
386void Thread::YieldType2() { 386void Thread::YieldAndWaitForLoadBalancing() {
387 auto& scheduler = kernel.GlobalScheduler(); 387 auto& scheduler = kernel.GlobalScheduler();
388 scheduler.YieldThreadAndWaitForLoadBalancing(this); 388 scheduler.YieldThreadAndWaitForLoadBalancing(this);
389} 389}
390 390
391void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { 391void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
392 u32 old_flags = scheduling_state; 392 const u32 old_flags = scheduling_state;
393 scheduling_state = 393 scheduling_state =
394 (scheduling_state & ThreadSchedMasks::HighMask) | static_cast<u32>(new_status); 394 (scheduling_state & ThreadSchedMasks::HighMask) | static_cast<u32>(new_status);
395 AdjustSchedulingOnStatus(old_flags); 395 AdjustSchedulingOnStatus(old_flags);
396} 396}
397 397
398void Thread::SetCurrentPriority(u32 new_priority) { 398void Thread::SetCurrentPriority(u32 new_priority) {
399 u32 old_priority = current_priority; 399 u32 old_priority = std::exchange(current_priority, new_priority);
400 current_priority = new_priority;
401 AdjustSchedulingOnPriority(old_priority); 400 AdjustSchedulingOnPriority(old_priority);
402} 401}
403 402
404ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { 403ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
405 auto HighestSetCore = [](u64 mask, u32 max_cores) { 404 const auto HighestSetCore = [](u64 mask, u32 max_cores) {
406 for (s32 core = max_cores - 1; core >= 0; core--) { 405 for (s32 core = max_cores - 1; core >= 0; core--) {
407 if (((mask >> core) & 1) != 0) 406 if (((mask >> core) & 1) != 0) {
408 return core; 407 return core;
408 }
409 } 409 }
410 return -1; 410 return -1;
411 }; 411 };
412 bool use_override = affinity_override_count != 0; 412
413 const bool use_override = affinity_override_count != 0;
413 // The value -3 is "do not change the ideal core". 414 // The value -3 is "do not change the ideal core".
414 if (new_core == -3) { 415 if (new_core == -3) {
415 new_core = use_override ? ideal_core_override : ideal_core; 416 new_core = use_override ? ideal_core_override : ideal_core;
@@ -421,11 +422,10 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
421 ideal_core_override = new_core; 422 ideal_core_override = new_core;
422 affinity_mask_override = new_affinity_mask; 423 affinity_mask_override = new_affinity_mask;
423 } else { 424 } else {
424 u64 old_affinity_mask = affinity_mask; 425 const u64 old_affinity_mask = std::exchange(affinity_mask, new_affinity_mask);
425 ideal_core = new_core; 426 ideal_core = new_core;
426 affinity_mask = new_affinity_mask;
427 if (old_affinity_mask != new_affinity_mask) { 427 if (old_affinity_mask != new_affinity_mask) {
428 s32 old_core = processor_id; 428 const s32 old_core = processor_id;
429 if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { 429 if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) {
430 if (ideal_core < 0) { 430 if (ideal_core < 0) {
431 processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES); 431 processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES);
@@ -440,28 +440,33 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
440} 440}
441 441
442void Thread::AdjustSchedulingOnStatus(u32 old_flags) { 442void Thread::AdjustSchedulingOnStatus(u32 old_flags) {
443 if (old_flags == scheduling_state) 443 if (old_flags == scheduling_state) {
444 return; 444 return;
445 }
445 446
446 auto& scheduler = kernel.GlobalScheduler(); 447 auto& scheduler = kernel.GlobalScheduler();
447 if (static_cast<ThreadSchedStatus>(old_flags & ThreadSchedMasks::LowMask) == 448 if (static_cast<ThreadSchedStatus>(old_flags & ThreadSchedMasks::LowMask) ==
448 ThreadSchedStatus::Runnable) { 449 ThreadSchedStatus::Runnable) {
449 // In this case the thread was running, now it's pausing/exitting 450 // In this case the thread was running, now it's pausing/exitting
450 if (processor_id >= 0) 451 if (processor_id >= 0) {
451 scheduler.Unschedule(current_priority, processor_id, this); 452 scheduler.Unschedule(current_priority, processor_id, this);
453 }
452 454
453 for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { 455 for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
454 if (core != processor_id && ((affinity_mask >> core) & 1) != 0) 456 if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
455 scheduler.Unsuggest(current_priority, core, this); 457 scheduler.Unsuggest(current_priority, core, this);
458 }
456 } 459 }
457 } else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) { 460 } else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) {
458 // The thread is now set to running from being stopped 461 // The thread is now set to running from being stopped
459 if (processor_id >= 0) 462 if (processor_id >= 0) {
460 scheduler.Schedule(current_priority, processor_id, this); 463 scheduler.Schedule(current_priority, processor_id, this);
464 }
461 465
462 for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { 466 for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
463 if (core != processor_id && ((affinity_mask >> core) & 1) != 0) 467 if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
464 scheduler.Suggest(current_priority, core, this); 468 scheduler.Suggest(current_priority, core, this);
469 }
465 } 470 }
466 } 471 }
467 472
@@ -477,7 +482,7 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) {
477 scheduler.Unschedule(old_priority, processor_id, this); 482 scheduler.Unschedule(old_priority, processor_id, this);
478 } 483 }
479 484
480 for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { 485 for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
481 if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { 486 if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
482 scheduler.Unsuggest(old_priority, core, this); 487 scheduler.Unsuggest(old_priority, core, this);
483 } 488 }
@@ -494,7 +499,7 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) {
494 } 499 }
495 } 500 }
496 501
497 for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { 502 for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
498 if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { 503 if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
499 scheduler.Suggest(current_priority, core, this); 504 scheduler.Suggest(current_priority, core, this);
500 } 505 }
@@ -506,10 +511,11 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) {
506void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) { 511void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) {
507 auto& scheduler = Core::System::GetInstance().GlobalScheduler(); 512 auto& scheduler = Core::System::GetInstance().GlobalScheduler();
508 if (GetSchedulingStatus() != ThreadSchedStatus::Runnable || 513 if (GetSchedulingStatus() != ThreadSchedStatus::Runnable ||
509 current_priority >= THREADPRIO_COUNT) 514 current_priority >= THREADPRIO_COUNT) {
510 return; 515 return;
516 }
511 517
512 for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { 518 for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
513 if (((old_affinity_mask >> core) & 1) != 0) { 519 if (((old_affinity_mask >> core) & 1) != 0) {
514 if (core == old_core) { 520 if (core == old_core) {
515 scheduler.Unschedule(current_priority, core, this); 521 scheduler.Unschedule(current_priority, core, this);
@@ -519,7 +525,7 @@ void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) {
519 } 525 }
520 } 526 }
521 527
522 for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { 528 for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
523 if (((affinity_mask >> core) & 1) != 0) { 529 if (((affinity_mask >> core) & 1) != 0) {
524 if (core == processor_id) { 530 if (core == processor_id) {
525 scheduler.Schedule(current_priority, core, this); 531 scheduler.Schedule(current_priority, core, this);
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index c426a7209..bf0cae959 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -75,7 +75,12 @@ enum class ThreadActivity : u32 {
75 Paused = 1, 75 Paused = 1,
76}; 76};
77 77
78enum class ThreadSchedStatus : u32 { None = 0, Paused = 1, Runnable = 2, Exited = 3 }; 78enum class ThreadSchedStatus : u32 {
79 None = 0,
80 Paused = 1,
81 Runnable = 2,
82 Exited = 3,
83};
79 84
80enum ThreadSchedFlags : u32 { 85enum ThreadSchedFlags : u32 {
81 ProcessPauseFlag = 1 << 4, 86 ProcessPauseFlag = 1 << 4,
@@ -403,15 +408,15 @@ public:
403 void Sleep(s64 nanoseconds); 408 void Sleep(s64 nanoseconds);
404 409
405 /// Yields this thread without rebalancing loads. 410 /// Yields this thread without rebalancing loads.
406 void YieldType0(); 411 void YieldSimple();
407 412
408 /// Yields this thread and does a load rebalancing. 413 /// Yields this thread and does a load rebalancing.
409 void YieldType1(); 414 void YieldAndBalanceLoad();
410 415
411 /// Yields this thread and if the core is left idle, loads are rebalanced 416 /// Yields this thread and if the core is left idle, loads are rebalanced
412 void YieldType2(); 417 void YieldAndWaitForLoadBalancing();
413 418
414 ThreadSchedStatus GetSchedulingStatus() { 419 ThreadSchedStatus GetSchedulingStatus() const {
415 return static_cast<ThreadSchedStatus>(scheduling_state & ThreadSchedMasks::LowMask); 420 return static_cast<ThreadSchedStatus>(scheduling_state & ThreadSchedMasks::LowMask);
416 } 421 }
417 422