diff options
Diffstat (limited to 'src/core/hle/kernel')
| -rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 12 | ||||
| -rw-r--r-- | src/core/hle/kernel/scheduler.h | 13 | ||||
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 15 |
3 files changed, 21 insertions, 19 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index eb196a690..b5ffa5418 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -124,8 +124,8 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { | |||
| 124 | "Thread yielding without being in front"); | 124 | "Thread yielding without being in front"); |
| 125 | scheduled_queue[core_id].yield(priority); | 125 | scheduled_queue[core_id].yield(priority); |
| 126 | 126 | ||
| 127 | std::array<Thread*, NUM_CPU_CORES> current_threads; | 127 | std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads; |
| 128 | for (u32 i = 0; i < NUM_CPU_CORES; i++) { | 128 | for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { |
| 129 | current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); | 129 | current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); |
| 130 | } | 130 | } |
| 131 | 131 | ||
| @@ -177,8 +177,8 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread | |||
| 177 | // function... | 177 | // function... |
| 178 | if (scheduled_queue[core_id].empty()) { | 178 | if (scheduled_queue[core_id].empty()) { |
| 179 | // Here, "current_threads" is calculated after the ""yield"", unlike yield -1 | 179 | // Here, "current_threads" is calculated after the ""yield"", unlike yield -1 |
| 180 | std::array<Thread*, NUM_CPU_CORES> current_threads; | 180 | std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads; |
| 181 | for (u32 i = 0; i < NUM_CPU_CORES; i++) { | 181 | for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { |
| 182 | current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); | 182 | current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); |
| 183 | } | 183 | } |
| 184 | for (auto& thread : suggested_queue[core_id]) { | 184 | for (auto& thread : suggested_queue[core_id]) { |
| @@ -208,7 +208,7 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread | |||
| 208 | } | 208 | } |
| 209 | 209 | ||
| 210 | void GlobalScheduler::PreemptThreads() { | 210 | void GlobalScheduler::PreemptThreads() { |
| 211 | for (std::size_t core_id = 0; core_id < NUM_CPU_CORES; core_id++) { | 211 | for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { |
| 212 | const u32 priority = preemption_priorities[core_id]; | 212 | const u32 priority = preemption_priorities[core_id]; |
| 213 | 213 | ||
| 214 | if (scheduled_queue[core_id].size(priority) > 0) { | 214 | if (scheduled_queue[core_id].size(priority) > 0) { |
| @@ -349,7 +349,7 @@ bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, | |||
| 349 | } | 349 | } |
| 350 | 350 | ||
| 351 | void GlobalScheduler::Shutdown() { | 351 | void GlobalScheduler::Shutdown() { |
| 352 | for (std::size_t core = 0; core < NUM_CPU_CORES; core++) { | 352 | for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 353 | scheduled_queue[core].clear(); | 353 | scheduled_queue[core].clear(); |
| 354 | suggested_queue[core].clear(); | 354 | suggested_queue[core].clear(); |
| 355 | } | 355 | } |
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index 14b77960a..96db049cb 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | 10 | ||
| 11 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 12 | #include "common/multi_level_queue.h" | 12 | #include "common/multi_level_queue.h" |
| 13 | #include "core/hardware_properties.h" | ||
| 13 | #include "core/hle/kernel/thread.h" | 14 | #include "core/hle/kernel/thread.h" |
| 14 | 15 | ||
| 15 | namespace Core { | 16 | namespace Core { |
| @@ -23,8 +24,6 @@ class Process; | |||
| 23 | 24 | ||
| 24 | class GlobalScheduler final { | 25 | class GlobalScheduler final { |
| 25 | public: | 26 | public: |
| 26 | static constexpr u32 NUM_CPU_CORES = 4; | ||
| 27 | |||
| 28 | explicit GlobalScheduler(Core::System& system); | 27 | explicit GlobalScheduler(Core::System& system); |
| 29 | ~GlobalScheduler(); | 28 | ~GlobalScheduler(); |
| 30 | 29 | ||
| @@ -125,7 +124,7 @@ public: | |||
| 125 | void PreemptThreads(); | 124 | void PreemptThreads(); |
| 126 | 125 | ||
| 127 | u32 CpuCoresCount() const { | 126 | u32 CpuCoresCount() const { |
| 128 | return NUM_CPU_CORES; | 127 | return Core::Hardware::NUM_CPU_CORES; |
| 129 | } | 128 | } |
| 130 | 129 | ||
| 131 | void SetReselectionPending() { | 130 | void SetReselectionPending() { |
| @@ -149,13 +148,15 @@ private: | |||
| 149 | bool AskForReselectionOrMarkRedundant(Thread* current_thread, const Thread* winner); | 148 | bool AskForReselectionOrMarkRedundant(Thread* current_thread, const Thread* winner); |
| 150 | 149 | ||
| 151 | static constexpr u32 min_regular_priority = 2; | 150 | static constexpr u32 min_regular_priority = 2; |
| 152 | std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> scheduled_queue; | 151 | std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES> |
| 153 | std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> suggested_queue; | 152 | scheduled_queue; |
| 153 | std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES> | ||
| 154 | suggested_queue; | ||
| 154 | std::atomic<bool> is_reselection_pending{false}; | 155 | std::atomic<bool> is_reselection_pending{false}; |
| 155 | 156 | ||
| 156 | // The priority levels at which the global scheduler preempts threads every 10 ms. They are | 157 | // The priority levels at which the global scheduler preempts threads every 10 ms. They are |
| 157 | // ordered from Core 0 to Core 3. | 158 | // ordered from Core 0 to Core 3. |
| 158 | std::array<u32, NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62}; | 159 | std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62}; |
| 159 | 160 | ||
| 160 | /// Lists all thread ids that aren't deleted/etc. | 161 | /// Lists all thread ids that aren't deleted/etc. |
| 161 | std::vector<std::shared_ptr<Thread>> thread_list; | 162 | std::vector<std::shared_ptr<Thread>> thread_list; |
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index ee9ea7d67..43b30dd3d 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include "core/core.h" | 15 | #include "core/core.h" |
| 16 | #include "core/core_timing.h" | 16 | #include "core/core_timing.h" |
| 17 | #include "core/core_timing_util.h" | 17 | #include "core/core_timing_util.h" |
| 18 | #include "core/hardware_properties.h" | ||
| 18 | #include "core/hle/kernel/errors.h" | 19 | #include "core/hle/kernel/errors.h" |
| 19 | #include "core/hle/kernel/handle_table.h" | 20 | #include "core/hle/kernel/handle_table.h" |
| 20 | #include "core/hle/kernel/kernel.h" | 21 | #include "core/hle/kernel/kernel.h" |
| @@ -431,7 +432,7 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { | |||
| 431 | const s32 old_core = processor_id; | 432 | const s32 old_core = processor_id; |
| 432 | if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { | 433 | if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { |
| 433 | if (static_cast<s32>(ideal_core) < 0) { | 434 | if (static_cast<s32>(ideal_core) < 0) { |
| 434 | processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES); | 435 | processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES); |
| 435 | } else { | 436 | } else { |
| 436 | processor_id = ideal_core; | 437 | processor_id = ideal_core; |
| 437 | } | 438 | } |
| @@ -455,7 +456,7 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) { | |||
| 455 | scheduler.Unschedule(current_priority, static_cast<u32>(processor_id), this); | 456 | scheduler.Unschedule(current_priority, static_cast<u32>(processor_id), this); |
| 456 | } | 457 | } |
| 457 | 458 | ||
| 458 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 459 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 459 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { | 460 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { |
| 460 | scheduler.Unsuggest(current_priority, core, this); | 461 | scheduler.Unsuggest(current_priority, core, this); |
| 461 | } | 462 | } |
| @@ -466,7 +467,7 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) { | |||
| 466 | scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this); | 467 | scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this); |
| 467 | } | 468 | } |
| 468 | 469 | ||
| 469 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 470 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 470 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { | 471 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { |
| 471 | scheduler.Suggest(current_priority, core, this); | 472 | scheduler.Suggest(current_priority, core, this); |
| 472 | } | 473 | } |
| @@ -485,7 +486,7 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) { | |||
| 485 | scheduler.Unschedule(old_priority, static_cast<u32>(processor_id), this); | 486 | scheduler.Unschedule(old_priority, static_cast<u32>(processor_id), this); |
| 486 | } | 487 | } |
| 487 | 488 | ||
| 488 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 489 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 489 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { | 490 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { |
| 490 | scheduler.Unsuggest(old_priority, core, this); | 491 | scheduler.Unsuggest(old_priority, core, this); |
| 491 | } | 492 | } |
| @@ -502,7 +503,7 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) { | |||
| 502 | } | 503 | } |
| 503 | } | 504 | } |
| 504 | 505 | ||
| 505 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 506 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 506 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { | 507 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { |
| 507 | scheduler.Suggest(current_priority, core, this); | 508 | scheduler.Suggest(current_priority, core, this); |
| 508 | } | 509 | } |
| @@ -518,7 +519,7 @@ void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) { | |||
| 518 | return; | 519 | return; |
| 519 | } | 520 | } |
| 520 | 521 | ||
| 521 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 522 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 522 | if (((old_affinity_mask >> core) & 1) != 0) { | 523 | if (((old_affinity_mask >> core) & 1) != 0) { |
| 523 | if (core == static_cast<u32>(old_core)) { | 524 | if (core == static_cast<u32>(old_core)) { |
| 524 | scheduler.Unschedule(current_priority, core, this); | 525 | scheduler.Unschedule(current_priority, core, this); |
| @@ -528,7 +529,7 @@ void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) { | |||
| 528 | } | 529 | } |
| 529 | } | 530 | } |
| 530 | 531 | ||
| 531 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 532 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 532 | if (((affinity_mask >> core) & 1) != 0) { | 533 | if (((affinity_mask >> core) & 1) != 0) { |
| 533 | if (core == static_cast<u32>(processor_id)) { | 534 | if (core == static_cast<u32>(processor_id)) { |
| 534 | scheduler.Schedule(current_priority, core, this); | 535 | scheduler.Schedule(current_priority, core, this); |