diff options
| author | 2020-11-16 21:02:45 -0800 | |
|---|---|---|
| committer | 2020-12-06 00:03:24 -0800 | |
| commit | e18ee8d681bf05e8c1480dd1ad7133778ead773d (patch) | |
| tree | fd319df94f2fd26d621d1b14fc5c57d7cb567d9e /src/core/hle/kernel | |
| parent | Merge pull request #5133 from lioncash/video-shadow2 (diff) | |
| download | yuzu-e18ee8d681bf05e8c1480dd1ad7133778ead773d.tar.gz yuzu-e18ee8d681bf05e8c1480dd1ad7133778ead773d.tar.xz yuzu-e18ee8d681bf05e8c1480dd1ad7133778ead773d.zip | |
hle: kernel: Port KAffinityMask from Mesosphere.
Diffstat (limited to 'src/core/hle/kernel')
| -rw-r--r-- | src/core/hle/kernel/k_affinity_mask.h | 62 | ||||
| -rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 10 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc.cpp | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 11 | ||||
| -rw-r--r-- | src/core/hle/kernel/thread.h | 6 |
5 files changed, 77 insertions, 14 deletions
diff --git a/src/core/hle/kernel/k_affinity_mask.h b/src/core/hle/kernel/k_affinity_mask.h new file mode 100644 index 000000000..fa2a720a4 --- /dev/null +++ b/src/core/hle/kernel/k_affinity_mask.h | |||
| @@ -0,0 +1,62 @@ | |||
| 1 | // Copyright 2020 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | // This file references various implementation details from Atmosphere, an open-source firmware for | ||
| 6 | // the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. | ||
| 7 | |||
| 8 | #pragma once | ||
| 9 | |||
| 10 | #include "common/assert.h" | ||
| 11 | #include "common/common_types.h" | ||
| 12 | #include "core/hardware_properties.h" | ||
| 13 | |||
| 14 | namespace Kernel { | ||
| 15 | |||
| 16 | class KAffinityMask { | ||
| 17 | private: | ||
| 18 | static constexpr u64 AllowedAffinityMask = (1ul << Core::Hardware::NUM_CPU_CORES) - 1; | ||
| 19 | |||
| 20 | private: | ||
| 21 | u64 mask; | ||
| 22 | |||
| 23 | private: | ||
| 24 | static constexpr u64 GetCoreBit(s32 core) { | ||
| 25 | ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); | ||
| 26 | return (1ull << core); | ||
| 27 | } | ||
| 28 | |||
| 29 | public: | ||
| 30 | constexpr KAffinityMask() : mask(0) { | ||
| 31 | ASSERT(this); | ||
| 32 | } | ||
| 33 | |||
| 34 | constexpr u64 GetAffinityMask() const { | ||
| 35 | return this->mask; | ||
| 36 | } | ||
| 37 | |||
| 38 | constexpr void SetAffinityMask(u64 new_mask) { | ||
| 39 | ASSERT((new_mask & ~AllowedAffinityMask) == 0); | ||
| 40 | this->mask = new_mask; | ||
| 41 | } | ||
| 42 | |||
| 43 | constexpr bool GetAffinity(s32 core) const { | ||
| 44 | return this->mask & GetCoreBit(core); | ||
| 45 | } | ||
| 46 | |||
| 47 | constexpr void SetAffinity(s32 core, bool set) { | ||
| 48 | ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); | ||
| 49 | |||
| 50 | if (set) { | ||
| 51 | this->mask |= GetCoreBit(core); | ||
| 52 | } else { | ||
| 53 | this->mask &= ~GetCoreBit(core); | ||
| 54 | } | ||
| 55 | } | ||
| 56 | |||
| 57 | constexpr void SetAll() { | ||
| 58 | this->mask = AllowedAffinityMask; | ||
| 59 | } | ||
| 60 | }; | ||
| 61 | |||
| 62 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 5c63b0b4a..9a969fdb5 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -452,7 +452,7 @@ void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) { | |||
| 452 | 452 | ||
| 453 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | 453 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 454 | if (core != static_cast<u32>(thread->processor_id) && | 454 | if (core != static_cast<u32>(thread->processor_id) && |
| 455 | ((thread->affinity_mask >> core) & 1) != 0) { | 455 | thread->affinity_mask.GetAffinity(core)) { |
| 456 | Unsuggest(thread->current_priority, core, thread); | 456 | Unsuggest(thread->current_priority, core, thread); |
| 457 | } | 457 | } |
| 458 | } | 458 | } |
| @@ -464,7 +464,7 @@ void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) { | |||
| 464 | 464 | ||
| 465 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | 465 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 466 | if (core != static_cast<u32>(thread->processor_id) && | 466 | if (core != static_cast<u32>(thread->processor_id) && |
| 467 | ((thread->affinity_mask >> core) & 1) != 0) { | 467 | thread->affinity_mask.GetAffinity(core)) { |
| 468 | Suggest(thread->current_priority, core, thread); | 468 | Suggest(thread->current_priority, core, thread); |
| 469 | } | 469 | } |
| 470 | } | 470 | } |
| @@ -484,7 +484,7 @@ void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priorit | |||
| 484 | 484 | ||
| 485 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | 485 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 486 | if (core != static_cast<u32>(thread->processor_id) && | 486 | if (core != static_cast<u32>(thread->processor_id) && |
| 487 | ((thread->affinity_mask >> core) & 1) != 0) { | 487 | thread->affinity_mask.GetAffinity(core)) { |
| 488 | Unsuggest(old_priority, core, thread); | 488 | Unsuggest(old_priority, core, thread); |
| 489 | } | 489 | } |
| 490 | } | 490 | } |
| @@ -500,7 +500,7 @@ void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priorit | |||
| 500 | 500 | ||
| 501 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | 501 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 502 | if (core != static_cast<u32>(thread->processor_id) && | 502 | if (core != static_cast<u32>(thread->processor_id) && |
| 503 | ((thread->affinity_mask >> core) & 1) != 0) { | 503 | thread->affinity_mask.GetAffinity(core)) { |
| 504 | Suggest(thread->current_priority, core, thread); | 504 | Suggest(thread->current_priority, core, thread); |
| 505 | } | 505 | } |
| 506 | } | 506 | } |
| @@ -527,7 +527,7 @@ void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinit | |||
| 527 | } | 527 | } |
| 528 | 528 | ||
| 529 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | 529 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 530 | if (((thread->affinity_mask >> core) & 1) != 0) { | 530 | if (thread->affinity_mask.GetAffinity(core)) { |
| 531 | if (core == static_cast<u32>(thread->processor_id)) { | 531 | if (core == static_cast<u32>(thread->processor_id)) { |
| 532 | Schedule(thread->current_priority, core, thread); | 532 | Schedule(thread->current_priority, core, thread); |
| 533 | } else { | 533 | } else { |
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 95d6e2b4d..0cd712d09 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -2003,7 +2003,7 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, | |||
| 2003 | } | 2003 | } |
| 2004 | 2004 | ||
| 2005 | *core = thread->GetIdealCore(); | 2005 | *core = thread->GetIdealCore(); |
| 2006 | *mask = thread->GetAffinityMask(); | 2006 | *mask = thread->GetAffinityMask().GetAffinityMask(); |
| 2007 | 2007 | ||
| 2008 | return RESULT_SUCCESS; | 2008 | return RESULT_SUCCESS; |
| 2009 | } | 2009 | } |
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 7d1eb2c6e..38b4a0987 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -191,7 +191,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy | |||
| 191 | thread->last_running_ticks = 0; | 191 | thread->last_running_ticks = 0; |
| 192 | thread->processor_id = processor_id; | 192 | thread->processor_id = processor_id; |
| 193 | thread->ideal_core = processor_id; | 193 | thread->ideal_core = processor_id; |
| 194 | thread->affinity_mask = 1ULL << processor_id; | 194 | thread->affinity_mask.SetAffinity(processor_id, true); |
| 195 | thread->wait_objects = nullptr; | 195 | thread->wait_objects = nullptr; |
| 196 | thread->mutex_wait_address = 0; | 196 | thread->mutex_wait_address = 0; |
| 197 | thread->condvar_wait_address = 0; | 197 | thread->condvar_wait_address = 0; |
| @@ -479,15 +479,16 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { | |||
| 479 | } | 479 | } |
| 480 | if (use_override) { | 480 | if (use_override) { |
| 481 | ideal_core_override = new_core; | 481 | ideal_core_override = new_core; |
| 482 | affinity_mask_override = new_affinity_mask; | ||
| 483 | } else { | 482 | } else { |
| 484 | const u64 old_affinity_mask = std::exchange(affinity_mask, new_affinity_mask); | 483 | const auto old_affinity_mask = affinity_mask.GetAffinityMask(); |
| 484 | affinity_mask.SetAffinityMask(new_affinity_mask); | ||
| 485 | ideal_core = new_core; | 485 | ideal_core = new_core; |
| 486 | if (old_affinity_mask != new_affinity_mask) { | 486 | if (old_affinity_mask != new_affinity_mask) { |
| 487 | const s32 old_core = processor_id; | 487 | const s32 old_core = processor_id; |
| 488 | if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { | 488 | if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) { |
| 489 | if (static_cast<s32>(ideal_core) < 0) { | 489 | if (static_cast<s32>(ideal_core) < 0) { |
| 490 | processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES); | 490 | processor_id = HighestSetCore(affinity_mask.GetAffinityMask(), |
| 491 | Core::Hardware::NUM_CPU_CORES); | ||
| 491 | } else { | 492 | } else { |
| 492 | processor_id = ideal_core; | 493 | processor_id = ideal_core; |
| 493 | } | 494 | } |
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index a75071e9b..5192ecff1 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include "common/common_types.h" | 12 | #include "common/common_types.h" |
| 13 | #include "common/spin_lock.h" | 13 | #include "common/spin_lock.h" |
| 14 | #include "core/arm/arm_interface.h" | 14 | #include "core/arm/arm_interface.h" |
| 15 | #include "core/hle/kernel/k_affinity_mask.h" | ||
| 15 | #include "core/hle/kernel/object.h" | 16 | #include "core/hle/kernel/object.h" |
| 16 | #include "core/hle/kernel/synchronization_object.h" | 17 | #include "core/hle/kernel/synchronization_object.h" |
| 17 | #include "core/hle/result.h" | 18 | #include "core/hle/result.h" |
| @@ -469,7 +470,7 @@ public: | |||
| 469 | return ideal_core; | 470 | return ideal_core; |
| 470 | } | 471 | } |
| 471 | 472 | ||
| 472 | u64 GetAffinityMask() const { | 473 | constexpr const KAffinityMask& GetAffinityMask() const { |
| 473 | return affinity_mask; | 474 | return affinity_mask; |
| 474 | } | 475 | } |
| 475 | 476 | ||
| @@ -649,10 +650,9 @@ private: | |||
| 649 | Scheduler* scheduler = nullptr; | 650 | Scheduler* scheduler = nullptr; |
| 650 | 651 | ||
| 651 | u32 ideal_core{0xFFFFFFFF}; | 652 | u32 ideal_core{0xFFFFFFFF}; |
| 652 | u64 affinity_mask{0x1}; | 653 | KAffinityMask affinity_mask{}; |
| 653 | 654 | ||
| 654 | s32 ideal_core_override = -1; | 655 | s32 ideal_core_override = -1; |
| 655 | u64 affinity_mask_override = 0x1; | ||
| 656 | u32 affinity_override_count = 0; | 656 | u32 affinity_override_count = 0; |
| 657 | 657 | ||
| 658 | u32 scheduling_state = 0; | 658 | u32 scheduling_state = 0; |