summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/scheduler.cpp
diff options
context:
space:
mode:
authorGravatar Lioncash2020-10-15 14:49:45 -0400
committerGravatar Lioncash2020-10-17 19:50:39 -0400
commitbe1954e04cb5a0c3a526f78ed5490a5e65310280 (patch)
tree267db7ae4be88dbbc288fa605e35d4a2a13839f6 /src/core/hle/kernel/scheduler.cpp
parentMerge pull request #4787 from lioncash/conversion (diff)
downloadyuzu-be1954e04cb5a0c3a526f78ed5490a5e65310280.tar.gz
yuzu-be1954e04cb5a0c3a526f78ed5490a5e65310280.tar.xz
yuzu-be1954e04cb5a0c3a526f78ed5490a5e65310280.zip
core: Fix clang build
Recent changes to the build system that made more warnings be flagged as errors caused building via clang to break. Fixes #4795
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
-rw-r--r--src/core/hle/kernel/scheduler.cpp72
1 files changed, 51 insertions, 21 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 6b7db5372..4a9a762f3 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -89,9 +89,11 @@ u32 GlobalScheduler::SelectThreads() {
89 while (iter != suggested_queue[core_id].end()) { 89 while (iter != suggested_queue[core_id].end()) {
90 suggested = *iter; 90 suggested = *iter;
91 iter++; 91 iter++;
92 s32 suggested_core_id = suggested->GetProcessorID(); 92 const s32 suggested_core_id = suggested->GetProcessorID();
93 Thread* top_thread = 93 Thread* top_thread = suggested_core_id >= 0
94 suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr; 94 ? top_threads[static_cast<u32>(suggested_core_id)]
95 : nullptr;
96
95 if (top_thread != suggested) { 97 if (top_thread != suggested) {
96 if (top_thread != nullptr && 98 if (top_thread != nullptr &&
97 top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) { 99 top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) {
@@ -102,16 +104,19 @@ u32 GlobalScheduler::SelectThreads() {
102 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested); 104 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested);
103 break; 105 break;
104 } 106 }
107
105 suggested = nullptr; 108 suggested = nullptr;
106 migration_candidates[num_candidates++] = suggested_core_id; 109 migration_candidates[num_candidates++] = suggested_core_id;
107 } 110 }
111
108 // Step 3: Select a suggested thread from another core 112 // Step 3: Select a suggested thread from another core
109 if (suggested == nullptr) { 113 if (suggested == nullptr) {
110 for (std::size_t i = 0; i < num_candidates; i++) { 114 for (std::size_t i = 0; i < num_candidates; i++) {
111 s32 candidate_core = migration_candidates[i]; 115 const auto candidate_core = static_cast<u32>(migration_candidates[i]);
112 suggested = top_threads[candidate_core]; 116 suggested = top_threads[candidate_core];
113 auto it = scheduled_queue[candidate_core].begin(); 117 auto it = scheduled_queue[candidate_core].begin();
114 it++; 118 ++it;
119
115 Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr; 120 Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr;
116 if (next != nullptr) { 121 if (next != nullptr) {
117 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), 122 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id),
@@ -128,7 +133,8 @@ u32 GlobalScheduler::SelectThreads() {
128 133
129 idle_cores &= ~(1U << core_id); 134 idle_cores &= ~(1U << core_id);
130 } 135 }
131 u32 cores_needing_context_switch{}; 136
137 u32 cores_needing_context_switch = 0;
132 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { 138 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
133 Scheduler& sched = kernel.Scheduler(core); 139 Scheduler& sched = kernel.Scheduler(core);
134 ASSERT(top_threads[core] == nullptr || 140 ASSERT(top_threads[core] == nullptr ||
@@ -186,13 +192,16 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
186 for (auto& thread : suggested_queue[core_id]) { 192 for (auto& thread : suggested_queue[core_id]) {
187 const s32 source_core = thread->GetProcessorID(); 193 const s32 source_core = thread->GetProcessorID();
188 if (source_core >= 0) { 194 if (source_core >= 0) {
189 if (current_threads[source_core] != nullptr) { 195 const auto sanitized_source_core = static_cast<u32>(source_core);
190 if (thread == current_threads[source_core] || 196
191 current_threads[source_core]->GetPriority() < min_regular_priority) { 197 if (current_threads[sanitized_source_core] != nullptr) {
198 if (thread == current_threads[sanitized_source_core] ||
199 current_threads[sanitized_source_core]->GetPriority() < min_regular_priority) {
192 continue; 200 continue;
193 } 201 }
194 } 202 }
195 } 203 }
204
196 if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() || 205 if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() ||
197 next_thread->GetPriority() < thread->GetPriority()) { 206 next_thread->GetPriority() < thread->GetPriority()) {
198 if (thread->GetPriority() <= priority) { 207 if (thread->GetPriority() <= priority) {
@@ -240,17 +249,25 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
240 for (std::size_t i = 0; i < current_threads.size(); i++) { 249 for (std::size_t i = 0; i < current_threads.size(); i++) {
241 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); 250 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
242 } 251 }
252
243 for (auto& thread : suggested_queue[core_id]) { 253 for (auto& thread : suggested_queue[core_id]) {
244 const s32 source_core = thread->GetProcessorID(); 254 const s32 source_core = thread->GetProcessorID();
245 if (source_core < 0 || thread == current_threads[source_core]) { 255 if (source_core < 0) {
256 continue;
257 }
258
259 const auto sanitized_source_core = static_cast<u32>(source_core);
260 if (thread == current_threads[sanitized_source_core]) {
246 continue; 261 continue;
247 } 262 }
248 if (current_threads[source_core] == nullptr || 263
249 current_threads[source_core]->GetPriority() >= min_regular_priority) { 264 if (current_threads[sanitized_source_core] == nullptr ||
265 current_threads[sanitized_source_core]->GetPriority() >= min_regular_priority) {
250 winner = thread; 266 winner = thread;
251 } 267 }
252 break; 268 break;
253 } 269 }
270
254 if (winner != nullptr) { 271 if (winner != nullptr) {
255 if (winner != yielding_thread) { 272 if (winner != yielding_thread) {
256 TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner); 273 TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner);
@@ -292,17 +309,22 @@ void GlobalScheduler::PreemptThreads() {
292 if (thread->GetPriority() != priority) { 309 if (thread->GetPriority() != priority) {
293 continue; 310 continue;
294 } 311 }
312
295 if (source_core >= 0) { 313 if (source_core >= 0) {
296 Thread* next_thread = scheduled_queue[source_core].empty() 314 const auto sanitized_source_core = static_cast<u32>(source_core);
315 Thread* next_thread = scheduled_queue[sanitized_source_core].empty()
297 ? nullptr 316 ? nullptr
298 : scheduled_queue[source_core].front(); 317 : scheduled_queue[sanitized_source_core].front();
318
299 if (next_thread != nullptr && next_thread->GetPriority() < 2) { 319 if (next_thread != nullptr && next_thread->GetPriority() < 2) {
300 break; 320 break;
301 } 321 }
322
302 if (next_thread == thread) { 323 if (next_thread == thread) {
303 continue; 324 continue;
304 } 325 }
305 } 326 }
327
306 if (current_thread != nullptr && 328 if (current_thread != nullptr &&
307 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { 329 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
308 winner = thread; 330 winner = thread;
@@ -322,17 +344,22 @@ void GlobalScheduler::PreemptThreads() {
322 if (thread->GetPriority() < priority) { 344 if (thread->GetPriority() < priority) {
323 continue; 345 continue;
324 } 346 }
347
325 if (source_core >= 0) { 348 if (source_core >= 0) {
326 Thread* next_thread = scheduled_queue[source_core].empty() 349 const auto sanitized_source_core = static_cast<u32>(source_core);
350 Thread* next_thread = scheduled_queue[sanitized_source_core].empty()
327 ? nullptr 351 ? nullptr
328 : scheduled_queue[source_core].front(); 352 : scheduled_queue[sanitized_source_core].front();
353
329 if (next_thread != nullptr && next_thread->GetPriority() < 2) { 354 if (next_thread != nullptr && next_thread->GetPriority() < 2) {
330 break; 355 break;
331 } 356 }
357
332 if (next_thread == thread) { 358 if (next_thread == thread) {
333 continue; 359 continue;
334 } 360 }
335 } 361 }
362
336 if (current_thread != nullptr && 363 if (current_thread != nullptr &&
337 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { 364 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
338 winner = thread; 365 winner = thread;
@@ -352,11 +379,11 @@ void GlobalScheduler::PreemptThreads() {
352 379
353void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule, 380void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
354 Core::EmuThreadHandle global_thread) { 381 Core::EmuThreadHandle global_thread) {
355 u32 current_core = global_thread.host_handle; 382 const u32 current_core = global_thread.host_handle;
356 bool must_context_switch = global_thread.guest_handle != InvalidHandle && 383 bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
357 (current_core < Core::Hardware::NUM_CPU_CORES); 384 (current_core < Core::Hardware::NUM_CPU_CORES);
358 while (cores_pending_reschedule != 0) { 385 while (cores_pending_reschedule != 0) {
359 u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule); 386 const u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule);
360 ASSERT(core < Core::Hardware::NUM_CPU_CORES); 387 ASSERT(core < Core::Hardware::NUM_CPU_CORES);
361 if (!must_context_switch || core != current_core) { 388 if (!must_context_switch || core != current_core) {
362 auto& phys_core = kernel.PhysicalCore(core); 389 auto& phys_core = kernel.PhysicalCore(core);
@@ -366,6 +393,7 @@ void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
366 } 393 }
367 cores_pending_reschedule &= ~(1U << core); 394 cores_pending_reschedule &= ~(1U << core);
368 } 395 }
396
369 if (must_context_switch) { 397 if (must_context_switch) {
370 auto& core_scheduler = kernel.CurrentScheduler(); 398 auto& core_scheduler = kernel.CurrentScheduler();
371 kernel.ExitSVCProfile(); 399 kernel.ExitSVCProfile();
@@ -803,9 +831,11 @@ void Scheduler::Initialize() {
803 std::string name = "Idle Thread Id:" + std::to_string(core_id); 831 std::string name = "Idle Thread Id:" + std::to_string(core_id);
804 std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc(); 832 std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
805 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); 833 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
806 ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE); 834 const auto type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
807 auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0, 835 auto thread_res =
808 nullptr, std::move(init_func), init_func_parameter); 836 Thread::Create(system, type, std::move(name), 0, 64, 0, static_cast<s32>(core_id), 0,
837 nullptr, std::move(init_func), init_func_parameter);
838
809 idle_thread = std::move(thread_res).Unwrap(); 839 idle_thread = std::move(thread_res).Unwrap();
810} 840}
811 841