summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/handle_table.cpp2
-rw-r--r--src/core/hle/kernel/scheduler.cpp10
2 files changed, 6 insertions, 6 deletions
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
index fb30b6f8b..3e745c18b 100644
--- a/src/core/hle/kernel/handle_table.cpp
+++ b/src/core/hle/kernel/handle_table.cpp
@@ -118,7 +118,7 @@ std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
118 118
119void HandleTable::Clear() { 119void HandleTable::Clear() {
120 for (u16 i = 0; i < table_size; ++i) { 120 for (u16 i = 0; i < table_size; ++i) {
121 generations[i] = i + 1; 121 generations[i] = static_cast<u16>(i + 1);
122 objects[i] = nullptr; 122 objects[i] = nullptr;
123 } 123 }
124 next_free_slot = 0; 124 next_free_slot = 0;
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 5cbd3b912..6b7db5372 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -72,7 +72,7 @@ u32 GlobalScheduler::SelectThreads() {
72 if (top_thread != nullptr) { 72 if (top_thread != nullptr) {
73 // TODO(Blinkhawk): Implement Thread Pinning 73 // TODO(Blinkhawk): Implement Thread Pinning
74 } else { 74 } else {
75 idle_cores |= (1ul << core); 75 idle_cores |= (1U << core);
76 } 76 }
77 top_threads[core] = top_thread; 77 top_threads[core] = top_thread;
78 } 78 }
@@ -126,7 +126,7 @@ u32 GlobalScheduler::SelectThreads() {
126 top_threads[core_id] = suggested; 126 top_threads[core_id] = suggested;
127 } 127 }
128 128
129 idle_cores &= ~(1ul << core_id); 129 idle_cores &= ~(1U << core_id);
130 } 130 }
131 u32 cores_needing_context_switch{}; 131 u32 cores_needing_context_switch{};
132 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { 132 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
@@ -134,7 +134,7 @@ u32 GlobalScheduler::SelectThreads() {
134 ASSERT(top_threads[core] == nullptr || 134 ASSERT(top_threads[core] == nullptr ||
135 static_cast<u32>(top_threads[core]->GetProcessorID()) == core); 135 static_cast<u32>(top_threads[core]->GetProcessorID()) == core);
136 if (update_thread(top_threads[core], sched)) { 136 if (update_thread(top_threads[core], sched)) {
137 cores_needing_context_switch |= (1ul << core); 137 cores_needing_context_switch |= (1U << core);
138 } 138 }
139 } 139 }
140 return cores_needing_context_switch; 140 return cores_needing_context_switch;
@@ -364,7 +364,7 @@ void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
364 } else { 364 } else {
365 must_context_switch = true; 365 must_context_switch = true;
366 } 366 }
367 cores_pending_reschedule &= ~(1ul << core); 367 cores_pending_reschedule &= ~(1U << core);
368 } 368 }
369 if (must_context_switch) { 369 if (must_context_switch) {
370 auto& core_scheduler = kernel.CurrentScheduler(); 370 auto& core_scheduler = kernel.CurrentScheduler();
@@ -767,7 +767,7 @@ void Scheduler::SwitchToCurrent() {
767 current_thread->context_guard.unlock(); 767 current_thread->context_guard.unlock();
768 break; 768 break;
769 } 769 }
770 if (current_thread->GetProcessorID() != core_id) { 770 if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) {
771 current_thread->context_guard.unlock(); 771 current_thread->context_guard.unlock();
772 break; 772 break;
773 } 773 }