summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/scheduler.cpp
diff options
context:
space:
mode:
authorGravatar Lioncash2020-10-13 08:10:50 -0400
committerGravatar Lioncash2020-10-13 13:16:49 -0400
commit39c8d18feba8eafcd43fbb55e73ae150a1947aad (patch)
tree9565ff464bbb9e5a0aa66e6e310098314e88d019 /src/core/hle/kernel/scheduler.cpp
parentMerge pull request #3929 from FearlessTobi/ticket-keys (diff)
downloadyuzu-39c8d18feba8eafcd43fbb55e73ae150a1947aad.tar.gz
yuzu-39c8d18feba8eafcd43fbb55e73ae150a1947aad.tar.xz
yuzu-39c8d18feba8eafcd43fbb55e73ae150a1947aad.zip
core/CMakeLists: Make some warnings errors
Makes our error coverage a little more consistent across the board by applying it to Linux side of things as well. This also makes it more consistent with the warning settings in other libraries in the project. This also updates httplib to 0.7.9, as there are several warning cleanups made that allow us to enable several warnings as errors.
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
-rw-r--r--src/core/hle/kernel/scheduler.cpp10
1 files changed, 5 insertions, 5 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 5cbd3b912..6b7db5372 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -72,7 +72,7 @@ u32 GlobalScheduler::SelectThreads() {
72 if (top_thread != nullptr) { 72 if (top_thread != nullptr) {
73 // TODO(Blinkhawk): Implement Thread Pinning 73 // TODO(Blinkhawk): Implement Thread Pinning
74 } else { 74 } else {
75 idle_cores |= (1ul << core); 75 idle_cores |= (1U << core);
76 } 76 }
77 top_threads[core] = top_thread; 77 top_threads[core] = top_thread;
78 } 78 }
@@ -126,7 +126,7 @@ u32 GlobalScheduler::SelectThreads() {
126 top_threads[core_id] = suggested; 126 top_threads[core_id] = suggested;
127 } 127 }
128 128
129 idle_cores &= ~(1ul << core_id); 129 idle_cores &= ~(1U << core_id);
130 } 130 }
131 u32 cores_needing_context_switch{}; 131 u32 cores_needing_context_switch{};
132 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { 132 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
@@ -134,7 +134,7 @@ u32 GlobalScheduler::SelectThreads() {
134 ASSERT(top_threads[core] == nullptr || 134 ASSERT(top_threads[core] == nullptr ||
135 static_cast<u32>(top_threads[core]->GetProcessorID()) == core); 135 static_cast<u32>(top_threads[core]->GetProcessorID()) == core);
136 if (update_thread(top_threads[core], sched)) { 136 if (update_thread(top_threads[core], sched)) {
137 cores_needing_context_switch |= (1ul << core); 137 cores_needing_context_switch |= (1U << core);
138 } 138 }
139 } 139 }
140 return cores_needing_context_switch; 140 return cores_needing_context_switch;
@@ -364,7 +364,7 @@ void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
364 } else { 364 } else {
365 must_context_switch = true; 365 must_context_switch = true;
366 } 366 }
367 cores_pending_reschedule &= ~(1ul << core); 367 cores_pending_reschedule &= ~(1U << core);
368 } 368 }
369 if (must_context_switch) { 369 if (must_context_switch) {
370 auto& core_scheduler = kernel.CurrentScheduler(); 370 auto& core_scheduler = kernel.CurrentScheduler();
@@ -767,7 +767,7 @@ void Scheduler::SwitchToCurrent() {
767 current_thread->context_guard.unlock(); 767 current_thread->context_guard.unlock();
768 break; 768 break;
769 } 769 }
770 if (current_thread->GetProcessorID() != core_id) { 770 if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) {
771 current_thread->context_guard.unlock(); 771 current_thread->context_guard.unlock();
772 break; 772 break;
773 } 773 }