diff options
| author | 2019-11-15 12:08:50 -0500 | |
|---|---|---|
| committer | 2019-11-15 12:08:50 -0500 | |
| commit | 3e0e4f146b3f2ad7f1935a61141c38cdce87e04f (patch) | |
| tree | 75f29ce4bb9d84f9d9722b7aa86d71e805cac8cc /src/core/hle/kernel | |
| parent | Merge pull request #3113 from lioncash/semi (diff) | |
| parent | externals: Update httplib (diff) | |
| download | yuzu-3e0e4f146b3f2ad7f1935a61141c38cdce87e04f.tar.gz yuzu-3e0e4f146b3f2ad7f1935a61141c38cdce87e04f.tar.xz yuzu-3e0e4f146b3f2ad7f1935a61141c38cdce87e04f.zip | |
Merge pull request #3091 from lioncash/core-conversion
core: Make most implicit type conversion warnings errors on MSVC
Diffstat (limited to 'src/core/hle/kernel')
| -rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 46 | ||||
| -rw-r--r-- | src/core/hle/kernel/scheduler.h | 36 | ||||
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 48 | ||||
| -rw-r--r-- | src/core/hle/kernel/vm_manager.cpp | 2 |
4 files changed, 60 insertions, 72 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 0e2dbf13e..16e95381b 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -35,12 +35,12 @@ void GlobalScheduler::RemoveThread(const Thread* thread) { | |||
| 35 | thread_list.end()); | 35 | thread_list.end()); |
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | void GlobalScheduler::UnloadThread(s32 core) { | 38 | void GlobalScheduler::UnloadThread(std::size_t core) { |
| 39 | Scheduler& sched = system.Scheduler(core); | 39 | Scheduler& sched = system.Scheduler(core); |
| 40 | sched.UnloadThread(); | 40 | sched.UnloadThread(); |
| 41 | } | 41 | } |
| 42 | 42 | ||
| 43 | void GlobalScheduler::SelectThread(u32 core) { | 43 | void GlobalScheduler::SelectThread(std::size_t core) { |
| 44 | const auto update_thread = [](Thread* thread, Scheduler& sched) { | 44 | const auto update_thread = [](Thread* thread, Scheduler& sched) { |
| 45 | if (thread != sched.selected_thread) { | 45 | if (thread != sched.selected_thread) { |
| 46 | if (thread == nullptr) { | 46 | if (thread == nullptr) { |
| @@ -77,9 +77,9 @@ void GlobalScheduler::SelectThread(u32 core) { | |||
| 77 | // if we got a suggested thread, select it, else do a second pass. | 77 | // if we got a suggested thread, select it, else do a second pass. |
| 78 | if (winner && winner->GetPriority() > 2) { | 78 | if (winner && winner->GetPriority() > 2) { |
| 79 | if (winner->IsRunning()) { | 79 | if (winner->IsRunning()) { |
| 80 | UnloadThread(winner->GetProcessorID()); | 80 | UnloadThread(static_cast<u32>(winner->GetProcessorID())); |
| 81 | } | 81 | } |
| 82 | TransferToCore(winner->GetPriority(), core, winner); | 82 | TransferToCore(winner->GetPriority(), static_cast<s32>(core), winner); |
| 83 | update_thread(winner, sched); | 83 | update_thread(winner, sched); |
| 84 | return; | 84 | return; |
| 85 | } | 85 | } |
| @@ -91,9 +91,9 @@ void GlobalScheduler::SelectThread(u32 core) { | |||
| 91 | Thread* thread_on_core = scheduled_queue[src_core].front(); | 91 | Thread* thread_on_core = scheduled_queue[src_core].front(); |
| 92 | Thread* to_change = *it; | 92 | Thread* to_change = *it; |
| 93 | if (thread_on_core->IsRunning() || to_change->IsRunning()) { | 93 | if (thread_on_core->IsRunning() || to_change->IsRunning()) { |
| 94 | UnloadThread(src_core); | 94 | UnloadThread(static_cast<u32>(src_core)); |
| 95 | } | 95 | } |
| 96 | TransferToCore(thread_on_core->GetPriority(), core, thread_on_core); | 96 | TransferToCore(thread_on_core->GetPriority(), static_cast<s32>(core), thread_on_core); |
| 97 | current_thread = thread_on_core; | 97 | current_thread = thread_on_core; |
| 98 | break; | 98 | break; |
| 99 | } | 99 | } |
| @@ -154,9 +154,9 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { | |||
| 154 | if (winner != nullptr) { | 154 | if (winner != nullptr) { |
| 155 | if (winner != yielding_thread) { | 155 | if (winner != yielding_thread) { |
| 156 | if (winner->IsRunning()) { | 156 | if (winner->IsRunning()) { |
| 157 | UnloadThread(winner->GetProcessorID()); | 157 | UnloadThread(static_cast<u32>(winner->GetProcessorID())); |
| 158 | } | 158 | } |
| 159 | TransferToCore(winner->GetPriority(), core_id, winner); | 159 | TransferToCore(winner->GetPriority(), s32(core_id), winner); |
| 160 | } | 160 | } |
| 161 | } else { | 161 | } else { |
| 162 | winner = next_thread; | 162 | winner = next_thread; |
| @@ -196,9 +196,9 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread | |||
| 196 | if (winner != nullptr) { | 196 | if (winner != nullptr) { |
| 197 | if (winner != yielding_thread) { | 197 | if (winner != yielding_thread) { |
| 198 | if (winner->IsRunning()) { | 198 | if (winner->IsRunning()) { |
| 199 | UnloadThread(winner->GetProcessorID()); | 199 | UnloadThread(static_cast<u32>(winner->GetProcessorID())); |
| 200 | } | 200 | } |
| 201 | TransferToCore(winner->GetPriority(), core_id, winner); | 201 | TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner); |
| 202 | } | 202 | } |
| 203 | } else { | 203 | } else { |
| 204 | winner = yielding_thread; | 204 | winner = yielding_thread; |
| @@ -248,7 +248,7 @@ void GlobalScheduler::PreemptThreads() { | |||
| 248 | 248 | ||
| 249 | if (winner != nullptr) { | 249 | if (winner != nullptr) { |
| 250 | if (winner->IsRunning()) { | 250 | if (winner->IsRunning()) { |
| 251 | UnloadThread(winner->GetProcessorID()); | 251 | UnloadThread(static_cast<u32>(winner->GetProcessorID())); |
| 252 | } | 252 | } |
| 253 | TransferToCore(winner->GetPriority(), s32(core_id), winner); | 253 | TransferToCore(winner->GetPriority(), s32(core_id), winner); |
| 254 | current_thread = | 254 | current_thread = |
| @@ -281,7 +281,7 @@ void GlobalScheduler::PreemptThreads() { | |||
| 281 | 281 | ||
| 282 | if (winner != nullptr) { | 282 | if (winner != nullptr) { |
| 283 | if (winner->IsRunning()) { | 283 | if (winner->IsRunning()) { |
| 284 | UnloadThread(winner->GetProcessorID()); | 284 | UnloadThread(static_cast<u32>(winner->GetProcessorID())); |
| 285 | } | 285 | } |
| 286 | TransferToCore(winner->GetPriority(), s32(core_id), winner); | 286 | TransferToCore(winner->GetPriority(), s32(core_id), winner); |
| 287 | current_thread = winner; | 287 | current_thread = winner; |
| @@ -292,30 +292,30 @@ void GlobalScheduler::PreemptThreads() { | |||
| 292 | } | 292 | } |
| 293 | } | 293 | } |
| 294 | 294 | ||
| 295 | void GlobalScheduler::Suggest(u32 priority, u32 core, Thread* thread) { | 295 | void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) { |
| 296 | suggested_queue[core].add(thread, priority); | 296 | suggested_queue[core].add(thread, priority); |
| 297 | } | 297 | } |
| 298 | 298 | ||
| 299 | void GlobalScheduler::Unsuggest(u32 priority, u32 core, Thread* thread) { | 299 | void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) { |
| 300 | suggested_queue[core].remove(thread, priority); | 300 | suggested_queue[core].remove(thread, priority); |
| 301 | } | 301 | } |
| 302 | 302 | ||
| 303 | void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) { | 303 | void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) { |
| 304 | ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); | 304 | ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); |
| 305 | scheduled_queue[core].add(thread, priority); | 305 | scheduled_queue[core].add(thread, priority); |
| 306 | } | 306 | } |
| 307 | 307 | ||
| 308 | void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) { | 308 | void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) { |
| 309 | ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); | 309 | ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); |
| 310 | scheduled_queue[core].add(thread, priority, false); | 310 | scheduled_queue[core].add(thread, priority, false); |
| 311 | } | 311 | } |
| 312 | 312 | ||
| 313 | void GlobalScheduler::Reschedule(u32 priority, u32 core, Thread* thread) { | 313 | void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) { |
| 314 | scheduled_queue[core].remove(thread, priority); | 314 | scheduled_queue[core].remove(thread, priority); |
| 315 | scheduled_queue[core].add(thread, priority); | 315 | scheduled_queue[core].add(thread, priority); |
| 316 | } | 316 | } |
| 317 | 317 | ||
| 318 | void GlobalScheduler::Unschedule(u32 priority, u32 core, Thread* thread) { | 318 | void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) { |
| 319 | scheduled_queue[core].remove(thread, priority); | 319 | scheduled_queue[core].remove(thread, priority); |
| 320 | } | 320 | } |
| 321 | 321 | ||
| @@ -327,14 +327,14 @@ void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* | |||
| 327 | } | 327 | } |
| 328 | thread->SetProcessorID(destination_core); | 328 | thread->SetProcessorID(destination_core); |
| 329 | if (source_core >= 0) { | 329 | if (source_core >= 0) { |
| 330 | Unschedule(priority, source_core, thread); | 330 | Unschedule(priority, static_cast<u32>(source_core), thread); |
| 331 | } | 331 | } |
| 332 | if (destination_core >= 0) { | 332 | if (destination_core >= 0) { |
| 333 | Unsuggest(priority, destination_core, thread); | 333 | Unsuggest(priority, static_cast<u32>(destination_core), thread); |
| 334 | Schedule(priority, destination_core, thread); | 334 | Schedule(priority, static_cast<u32>(destination_core), thread); |
| 335 | } | 335 | } |
| 336 | if (source_core >= 0) { | 336 | if (source_core >= 0) { |
| 337 | Suggest(priority, source_core, thread); | 337 | Suggest(priority, static_cast<u32>(source_core), thread); |
| 338 | } | 338 | } |
| 339 | } | 339 | } |
| 340 | 340 | ||
| @@ -357,7 +357,7 @@ void GlobalScheduler::Shutdown() { | |||
| 357 | thread_list.clear(); | 357 | thread_list.clear(); |
| 358 | } | 358 | } |
| 359 | 359 | ||
| 360 | Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 core_id) | 360 | Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, std::size_t core_id) |
| 361 | : system(system), cpu_core(cpu_core), core_id(core_id) {} | 361 | : system(system), cpu_core(cpu_core), core_id(core_id) {} |
| 362 | 362 | ||
| 363 | Scheduler::~Scheduler() = default; | 363 | Scheduler::~Scheduler() = default; |
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index f2d6311b8..311849dfb 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h | |||
| @@ -42,41 +42,34 @@ public: | |||
| 42 | * Add a thread to the suggested queue of a cpu core. Suggested threads may be | 42 | * Add a thread to the suggested queue of a cpu core. Suggested threads may be |
| 43 | * picked if no thread is scheduled to run on the core. | 43 | * picked if no thread is scheduled to run on the core. |
| 44 | */ | 44 | */ |
| 45 | void Suggest(u32 priority, u32 core, Thread* thread); | 45 | void Suggest(u32 priority, std::size_t core, Thread* thread); |
| 46 | 46 | ||
| 47 | /** | 47 | /** |
| 48 | * Remove a thread to the suggested queue of a cpu core. Suggested threads may be | 48 | * Remove a thread to the suggested queue of a cpu core. Suggested threads may be |
| 49 | * picked if no thread is scheduled to run on the core. | 49 | * picked if no thread is scheduled to run on the core. |
| 50 | */ | 50 | */ |
| 51 | void Unsuggest(u32 priority, u32 core, Thread* thread); | 51 | void Unsuggest(u32 priority, std::size_t core, Thread* thread); |
| 52 | 52 | ||
| 53 | /** | 53 | /** |
| 54 | * Add a thread to the scheduling queue of a cpu core. The thread is added at the | 54 | * Add a thread to the scheduling queue of a cpu core. The thread is added at the |
| 55 | * back the queue in its priority level. | 55 | * back the queue in its priority level. |
| 56 | */ | 56 | */ |
| 57 | void Schedule(u32 priority, u32 core, Thread* thread); | 57 | void Schedule(u32 priority, std::size_t core, Thread* thread); |
| 58 | 58 | ||
| 59 | /** | 59 | /** |
| 60 | * Add a thread to the scheduling queue of a cpu core. The thread is added at the | 60 | * Add a thread to the scheduling queue of a cpu core. The thread is added at the |
| 61 | * front the queue in its priority level. | 61 | * front the queue in its priority level. |
| 62 | */ | 62 | */ |
| 63 | void SchedulePrepend(u32 priority, u32 core, Thread* thread); | 63 | void SchedulePrepend(u32 priority, std::size_t core, Thread* thread); |
| 64 | 64 | ||
| 65 | /// Reschedule an already scheduled thread based on a new priority | 65 | /// Reschedule an already scheduled thread based on a new priority |
| 66 | void Reschedule(u32 priority, u32 core, Thread* thread); | 66 | void Reschedule(u32 priority, std::size_t core, Thread* thread); |
| 67 | 67 | ||
| 68 | /// Unschedules a thread. | 68 | /// Unschedules a thread. |
| 69 | void Unschedule(u32 priority, u32 core, Thread* thread); | 69 | void Unschedule(u32 priority, std::size_t core, Thread* thread); |
| 70 | |||
| 71 | /** | ||
| 72 | * Transfers a thread into an specific core. If the destination_core is -1 | ||
| 73 | * it will be unscheduled from its source code and added into its suggested | ||
| 74 | * queue. | ||
| 75 | */ | ||
| 76 | void TransferToCore(u32 priority, s32 destination_core, Thread* thread); | ||
| 77 | 70 | ||
| 78 | /// Selects a core and forces it to unload its current thread's context | 71 | /// Selects a core and forces it to unload its current thread's context |
| 79 | void UnloadThread(s32 core); | 72 | void UnloadThread(std::size_t core); |
| 80 | 73 | ||
| 81 | /** | 74 | /** |
| 82 | * Takes care of selecting the new scheduled thread in three steps: | 75 | * Takes care of selecting the new scheduled thread in three steps: |
| @@ -90,9 +83,9 @@ public: | |||
| 90 | * 3. Third is no suggested thread is found, we do a second pass and pick a running | 83 | * 3. Third is no suggested thread is found, we do a second pass and pick a running |
| 91 | * thread in another core and swap it with its current thread. | 84 | * thread in another core and swap it with its current thread. |
| 92 | */ | 85 | */ |
| 93 | void SelectThread(u32 core); | 86 | void SelectThread(std::size_t core); |
| 94 | 87 | ||
| 95 | bool HaveReadyThreads(u32 core_id) const { | 88 | bool HaveReadyThreads(std::size_t core_id) const { |
| 96 | return !scheduled_queue[core_id].empty(); | 89 | return !scheduled_queue[core_id].empty(); |
| 97 | } | 90 | } |
| 98 | 91 | ||
| @@ -145,6 +138,13 @@ public: | |||
| 145 | void Shutdown(); | 138 | void Shutdown(); |
| 146 | 139 | ||
| 147 | private: | 140 | private: |
| 141 | /** | ||
| 142 | * Transfers a thread into an specific core. If the destination_core is -1 | ||
| 143 | * it will be unscheduled from its source code and added into its suggested | ||
| 144 | * queue. | ||
| 145 | */ | ||
| 146 | void TransferToCore(u32 priority, s32 destination_core, Thread* thread); | ||
| 147 | |||
| 148 | bool AskForReselectionOrMarkRedundant(Thread* current_thread, const Thread* winner); | 148 | bool AskForReselectionOrMarkRedundant(Thread* current_thread, const Thread* winner); |
| 149 | 149 | ||
| 150 | static constexpr u32 min_regular_priority = 2; | 150 | static constexpr u32 min_regular_priority = 2; |
| @@ -163,7 +163,7 @@ private: | |||
| 163 | 163 | ||
| 164 | class Scheduler final { | 164 | class Scheduler final { |
| 165 | public: | 165 | public: |
| 166 | explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 core_id); | 166 | explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, std::size_t core_id); |
| 167 | ~Scheduler(); | 167 | ~Scheduler(); |
| 168 | 168 | ||
| 169 | /// Returns whether there are any threads that are ready to run. | 169 | /// Returns whether there are any threads that are ready to run. |
| @@ -220,7 +220,7 @@ private: | |||
| 220 | Core::ARM_Interface& cpu_core; | 220 | Core::ARM_Interface& cpu_core; |
| 221 | u64 last_context_switch_time = 0; | 221 | u64 last_context_switch_time = 0; |
| 222 | u64 idle_selection_count = 0; | 222 | u64 idle_selection_count = 0; |
| 223 | const u32 core_id; | 223 | const std::size_t core_id; |
| 224 | 224 | ||
| 225 | bool is_context_switch_pending = false; | 225 | bool is_context_switch_pending = false; |
| 226 | }; | 226 | }; |
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 962530d2d..ee7531f2d 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -77,18 +77,6 @@ void Thread::CancelWakeupTimer() { | |||
| 77 | callback_handle); | 77 | callback_handle); |
| 78 | } | 78 | } |
| 79 | 79 | ||
| 80 | static std::optional<s32> GetNextProcessorId(u64 mask) { | ||
| 81 | for (s32 index = 0; index < Core::NUM_CPU_CORES; ++index) { | ||
| 82 | if (mask & (1ULL << index)) { | ||
| 83 | if (!Core::System::GetInstance().Scheduler(index).GetCurrentThread()) { | ||
| 84 | // Core is enabled and not running any threads, use this one | ||
| 85 | return index; | ||
| 86 | } | ||
| 87 | } | ||
| 88 | } | ||
| 89 | return {}; | ||
| 90 | } | ||
| 91 | |||
| 92 | void Thread::ResumeFromWait() { | 80 | void Thread::ResumeFromWait() { |
| 93 | ASSERT_MSG(wait_objects.empty(), "Thread is waking up while waiting for objects"); | 81 | ASSERT_MSG(wait_objects.empty(), "Thread is waking up while waiting for objects"); |
| 94 | 82 | ||
| @@ -173,7 +161,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name | |||
| 173 | if (!Memory::IsValidVirtualAddress(owner_process, entry_point)) { | 161 | if (!Memory::IsValidVirtualAddress(owner_process, entry_point)) { |
| 174 | LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point); | 162 | LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point); |
| 175 | // TODO (bunnei): Find the correct error code to use here | 163 | // TODO (bunnei): Find the correct error code to use here |
| 176 | return ResultCode(-1); | 164 | return RESULT_UNKNOWN; |
| 177 | } | 165 | } |
| 178 | 166 | ||
| 179 | auto& system = Core::System::GetInstance(); | 167 | auto& system = Core::System::GetInstance(); |
| @@ -401,7 +389,7 @@ void Thread::SetCurrentPriority(u32 new_priority) { | |||
| 401 | 389 | ||
| 402 | ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { | 390 | ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { |
| 403 | const auto HighestSetCore = [](u64 mask, u32 max_cores) { | 391 | const auto HighestSetCore = [](u64 mask, u32 max_cores) { |
| 404 | for (s32 core = max_cores - 1; core >= 0; core--) { | 392 | for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) { |
| 405 | if (((mask >> core) & 1) != 0) { | 393 | if (((mask >> core) & 1) != 0) { |
| 406 | return core; | 394 | return core; |
| 407 | } | 395 | } |
| @@ -425,7 +413,7 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { | |||
| 425 | if (old_affinity_mask != new_affinity_mask) { | 413 | if (old_affinity_mask != new_affinity_mask) { |
| 426 | const s32 old_core = processor_id; | 414 | const s32 old_core = processor_id; |
| 427 | if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { | 415 | if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { |
| 428 | if (ideal_core < 0) { | 416 | if (static_cast<s32>(ideal_core) < 0) { |
| 429 | processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES); | 417 | processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES); |
| 430 | } else { | 418 | } else { |
| 431 | processor_id = ideal_core; | 419 | processor_id = ideal_core; |
| @@ -447,23 +435,23 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) { | |||
| 447 | ThreadSchedStatus::Runnable) { | 435 | ThreadSchedStatus::Runnable) { |
| 448 | // In this case the thread was running, now it's pausing/exitting | 436 | // In this case the thread was running, now it's pausing/exitting |
| 449 | if (processor_id >= 0) { | 437 | if (processor_id >= 0) { |
| 450 | scheduler.Unschedule(current_priority, processor_id, this); | 438 | scheduler.Unschedule(current_priority, static_cast<u32>(processor_id), this); |
| 451 | } | 439 | } |
| 452 | 440 | ||
| 453 | for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 441 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { |
| 454 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { | 442 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { |
| 455 | scheduler.Unsuggest(current_priority, static_cast<u32>(core), this); | 443 | scheduler.Unsuggest(current_priority, core, this); |
| 456 | } | 444 | } |
| 457 | } | 445 | } |
| 458 | } else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) { | 446 | } else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) { |
| 459 | // The thread is now set to running from being stopped | 447 | // The thread is now set to running from being stopped |
| 460 | if (processor_id >= 0) { | 448 | if (processor_id >= 0) { |
| 461 | scheduler.Schedule(current_priority, processor_id, this); | 449 | scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this); |
| 462 | } | 450 | } |
| 463 | 451 | ||
| 464 | for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 452 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { |
| 465 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { | 453 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { |
| 466 | scheduler.Suggest(current_priority, static_cast<u32>(core), this); | 454 | scheduler.Suggest(current_priority, core, this); |
| 467 | } | 455 | } |
| 468 | } | 456 | } |
| 469 | } | 457 | } |
| @@ -477,11 +465,11 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) { | |||
| 477 | } | 465 | } |
| 478 | auto& scheduler = Core::System::GetInstance().GlobalScheduler(); | 466 | auto& scheduler = Core::System::GetInstance().GlobalScheduler(); |
| 479 | if (processor_id >= 0) { | 467 | if (processor_id >= 0) { |
| 480 | scheduler.Unschedule(old_priority, processor_id, this); | 468 | scheduler.Unschedule(old_priority, static_cast<u32>(processor_id), this); |
| 481 | } | 469 | } |
| 482 | 470 | ||
| 483 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 471 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { |
| 484 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { | 472 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { |
| 485 | scheduler.Unsuggest(old_priority, core, this); | 473 | scheduler.Unsuggest(old_priority, core, this); |
| 486 | } | 474 | } |
| 487 | } | 475 | } |
| @@ -491,14 +479,14 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) { | |||
| 491 | 479 | ||
| 492 | if (processor_id >= 0) { | 480 | if (processor_id >= 0) { |
| 493 | if (current_thread == this) { | 481 | if (current_thread == this) { |
| 494 | scheduler.SchedulePrepend(current_priority, processor_id, this); | 482 | scheduler.SchedulePrepend(current_priority, static_cast<u32>(processor_id), this); |
| 495 | } else { | 483 | } else { |
| 496 | scheduler.Schedule(current_priority, processor_id, this); | 484 | scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this); |
| 497 | } | 485 | } |
| 498 | } | 486 | } |
| 499 | 487 | ||
| 500 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 488 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { |
| 501 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { | 489 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { |
| 502 | scheduler.Suggest(current_priority, core, this); | 490 | scheduler.Suggest(current_priority, core, this); |
| 503 | } | 491 | } |
| 504 | } | 492 | } |
| @@ -515,7 +503,7 @@ void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) { | |||
| 515 | 503 | ||
| 516 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 504 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { |
| 517 | if (((old_affinity_mask >> core) & 1) != 0) { | 505 | if (((old_affinity_mask >> core) & 1) != 0) { |
| 518 | if (core == old_core) { | 506 | if (core == static_cast<u32>(old_core)) { |
| 519 | scheduler.Unschedule(current_priority, core, this); | 507 | scheduler.Unschedule(current_priority, core, this); |
| 520 | } else { | 508 | } else { |
| 521 | scheduler.Unsuggest(current_priority, core, this); | 509 | scheduler.Unsuggest(current_priority, core, this); |
| @@ -525,7 +513,7 @@ void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) { | |||
| 525 | 513 | ||
| 526 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 514 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { |
| 527 | if (((affinity_mask >> core) & 1) != 0) { | 515 | if (((affinity_mask >> core) & 1) != 0) { |
| 528 | if (core == processor_id) { | 516 | if (core == static_cast<u32>(processor_id)) { |
| 529 | scheduler.Schedule(current_priority, core, this); | 517 | scheduler.Schedule(current_priority, core, this); |
| 530 | } else { | 518 | } else { |
| 531 | scheduler.Suggest(current_priority, core, this); | 519 | scheduler.Suggest(current_priority, core, this); |
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index c7af87073..e6eee09d7 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp | |||
| @@ -167,7 +167,7 @@ ResultVal<VAddr> VMManager::FindFreeRegion(VAddr begin, VAddr end, u64 size) con | |||
| 167 | 167 | ||
| 168 | if (vma_handle == vma_map.cend()) { | 168 | if (vma_handle == vma_map.cend()) { |
| 169 | // TODO(Subv): Find the correct error code here. | 169 | // TODO(Subv): Find the correct error code here. |
| 170 | return ResultCode(-1); | 170 | return RESULT_UNKNOWN; |
| 171 | } | 171 | } |
| 172 | 172 | ||
| 173 | const VAddr target = std::max(begin, vma_handle->second.base); | 173 | const VAddr target = std::max(begin, vma_handle->second.base); |