diff options
| author | 2019-10-12 10:38:55 -0400 | |
|---|---|---|
| committer | 2019-10-15 11:55:28 -0400 | |
| commit | e28c7f521765a85e27259539f0873b15c18a98f8 (patch) | |
| tree | 86f1603d3bbdf08680ecf0a9886485597554c949 /src/core/hle/kernel/thread.cpp | |
| parent | Kernel: Clang Format (diff) | |
| download | yuzu-e28c7f521765a85e27259539f0873b15c18a98f8.tar.gz yuzu-e28c7f521765a85e27259539f0873b15c18a98f8.tar.xz yuzu-e28c7f521765a85e27259539f0873b15c18a98f8.zip | |
Kernel: Address Feedback 2
Diffstat (limited to 'src/core/hle/kernel/thread.cpp')
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 3408658e5..aeb20b24b 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -410,7 +410,7 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { | |||
| 410 | }; | 410 | }; |
| 411 | 411 | ||
| 412 | const bool use_override = affinity_override_count != 0; | 412 | const bool use_override = affinity_override_count != 0; |
| 413 | if (new_core == static_cast<s32>(CoreFlags::DontChangeIdealCore)) { | 413 | if (new_core == THREADDONTCHANGE_IDEAL) { |
| 414 | new_core = use_override ? ideal_core_override : ideal_core; | 414 | new_core = use_override ? ideal_core_override : ideal_core; |
| 415 | if ((new_affinity_mask & (1ULL << new_core)) == 0) { | 415 | if ((new_affinity_mask & (1ULL << new_core)) == 0) { |
| 416 | return ERR_INVALID_COMBINATION; | 416 | return ERR_INVALID_COMBINATION; |
| @@ -452,7 +452,7 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) { | |||
| 452 | 452 | ||
| 453 | for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 453 | for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { |
| 454 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { | 454 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { |
| 455 | scheduler.Unsuggest(current_priority, core, this); | 455 | scheduler.Unsuggest(current_priority, static_cast<u32>(core), this); |
| 456 | } | 456 | } |
| 457 | } | 457 | } |
| 458 | } else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) { | 458 | } else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) { |
| @@ -463,7 +463,7 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) { | |||
| 463 | 463 | ||
| 464 | for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 464 | for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { |
| 465 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { | 465 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { |
| 466 | scheduler.Suggest(current_priority, core, this); | 466 | scheduler.Suggest(current_priority, static_cast<u32>(core), this); |
| 467 | } | 467 | } |
| 468 | } | 468 | } |
| 469 | } | 469 | } |