summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/scheduler.cpp
diff options
context:
space:
mode:
authorGravatar bunnei2020-02-14 14:40:20 -0500
committerGravatar GitHub2020-02-14 14:40:20 -0500
commitf552d553bac1374c583d748dad27f8c86e86c4a0 (patch)
tree1da4aa037ff417fa4fd43bffac267dcb2b55a72d /src/core/hle/kernel/scheduler.cpp
parentMerge pull request #3379 from ReinUsesLisp/cbuf-offset (diff)
parentCore: Correct compilition in GCC (diff)
downloadyuzu-f552d553bac1374c583d748dad27f8c86e86c4a0.tar.gz
yuzu-f552d553bac1374c583d748dad27f8c86e86c4a0.tar.xz
yuzu-f552d553bac1374c583d748dad27f8c86e86c4a0.zip
Merge pull request #3401 from FernandoS27/synchronization
Set of refactors for Kernel Synchronization and Hardware Constants
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
-rw-r--r--src/core/hle/kernel/scheduler.cpp12
1 files changed, 6 insertions, 6 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index eb196a690..86f1421bf 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -124,8 +124,8 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
124 "Thread yielding without being in front"); 124 "Thread yielding without being in front");
125 scheduled_queue[core_id].yield(priority); 125 scheduled_queue[core_id].yield(priority);
126 126
127 std::array<Thread*, NUM_CPU_CORES> current_threads; 127 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
128 for (u32 i = 0; i < NUM_CPU_CORES; i++) { 128 for (std::size_t i = 0; i < current_threads.size(); i++) {
129 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); 129 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
130 } 130 }
131 131
@@ -177,8 +177,8 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
177 // function... 177 // function...
178 if (scheduled_queue[core_id].empty()) { 178 if (scheduled_queue[core_id].empty()) {
179 // Here, "current_threads" is calculated after the ""yield"", unlike yield -1 179 // Here, "current_threads" is calculated after the ""yield"", unlike yield -1
180 std::array<Thread*, NUM_CPU_CORES> current_threads; 180 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
181 for (u32 i = 0; i < NUM_CPU_CORES; i++) { 181 for (std::size_t i = 0; i < current_threads.size(); i++) {
182 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); 182 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
183 } 183 }
184 for (auto& thread : suggested_queue[core_id]) { 184 for (auto& thread : suggested_queue[core_id]) {
@@ -208,7 +208,7 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
208} 208}
209 209
210void GlobalScheduler::PreemptThreads() { 210void GlobalScheduler::PreemptThreads() {
211 for (std::size_t core_id = 0; core_id < NUM_CPU_CORES; core_id++) { 211 for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
212 const u32 priority = preemption_priorities[core_id]; 212 const u32 priority = preemption_priorities[core_id];
213 213
214 if (scheduled_queue[core_id].size(priority) > 0) { 214 if (scheduled_queue[core_id].size(priority) > 0) {
@@ -349,7 +349,7 @@ bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread,
349} 349}
350 350
351void GlobalScheduler::Shutdown() { 351void GlobalScheduler::Shutdown() {
352 for (std::size_t core = 0; core < NUM_CPU_CORES; core++) { 352 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
353 scheduled_queue[core].clear(); 353 scheduled_queue[core].clear();
354 suggested_queue[core].clear(); 354 suggested_queue[core].clear();
355 } 355 }