summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/scheduler.cpp
diff options
context:
space:
mode:
authorGravatar bunnei2020-11-16 21:02:45 -0800
committerGravatar bunnei2020-12-06 00:03:24 -0800
commite18ee8d681bf05e8c1480dd1ad7133778ead773d (patch)
treefd319df94f2fd26d621d1b14fc5c57d7cb567d9e /src/core/hle/kernel/scheduler.cpp
parentMerge pull request #5133 from lioncash/video-shadow2 (diff)
downloadyuzu-e18ee8d681bf05e8c1480dd1ad7133778ead773d.tar.gz
yuzu-e18ee8d681bf05e8c1480dd1ad7133778ead773d.tar.xz
yuzu-e18ee8d681bf05e8c1480dd1ad7133778ead773d.zip
hle: kernel: Port KAffinityMask from Mesosphere.
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
-rw-r--r--src/core/hle/kernel/scheduler.cpp10
1 files changed, 5 insertions, 5 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 5c63b0b4a..9a969fdb5 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -452,7 +452,7 @@ void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) {
452 452
453 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { 453 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
454 if (core != static_cast<u32>(thread->processor_id) && 454 if (core != static_cast<u32>(thread->processor_id) &&
455 ((thread->affinity_mask >> core) & 1) != 0) { 455 thread->affinity_mask.GetAffinity(core)) {
456 Unsuggest(thread->current_priority, core, thread); 456 Unsuggest(thread->current_priority, core, thread);
457 } 457 }
458 } 458 }
@@ -464,7 +464,7 @@ void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) {
464 464
465 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { 465 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
466 if (core != static_cast<u32>(thread->processor_id) && 466 if (core != static_cast<u32>(thread->processor_id) &&
467 ((thread->affinity_mask >> core) & 1) != 0) { 467 thread->affinity_mask.GetAffinity(core)) {
468 Suggest(thread->current_priority, core, thread); 468 Suggest(thread->current_priority, core, thread);
469 } 469 }
470 } 470 }
@@ -484,7 +484,7 @@ void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priorit
484 484
485 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { 485 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
486 if (core != static_cast<u32>(thread->processor_id) && 486 if (core != static_cast<u32>(thread->processor_id) &&
487 ((thread->affinity_mask >> core) & 1) != 0) { 487 thread->affinity_mask.GetAffinity(core)) {
488 Unsuggest(old_priority, core, thread); 488 Unsuggest(old_priority, core, thread);
489 } 489 }
490 } 490 }
@@ -500,7 +500,7 @@ void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priorit
500 500
501 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { 501 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
502 if (core != static_cast<u32>(thread->processor_id) && 502 if (core != static_cast<u32>(thread->processor_id) &&
503 ((thread->affinity_mask >> core) & 1) != 0) { 503 thread->affinity_mask.GetAffinity(core)) {
504 Suggest(thread->current_priority, core, thread); 504 Suggest(thread->current_priority, core, thread);
505 } 505 }
506 } 506 }
@@ -527,7 +527,7 @@ void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinit
527 } 527 }
528 528
529 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { 529 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
530 if (((thread->affinity_mask >> core) & 1) != 0) { 530 if (thread->affinity_mask.GetAffinity(core)) {
531 if (core == static_cast<u32>(thread->processor_id)) { 531 if (core == static_cast<u32>(thread->processor_id)) {
532 Schedule(thread->current_priority, core, thread); 532 Schedule(thread->current_priority, core, thread);
533 } else { 533 } else {