diff options
| author | 2019-09-10 11:04:40 -0400 | |
|---|---|---|
| committer | 2019-10-15 11:55:14 -0400 | |
| commit | b49c0dab8772afb06358e5d19af092226b3a59bb (patch) | |
| tree | 00e1cad505f915f5d7d286c5627b8e13f5eb78ff /src/core | |
| parent | Scheduler: Add protections for Yield bombing (diff) | |
| download | yuzu-b49c0dab8772afb06358e5d19af092226b3a59bb.tar.gz yuzu-b49c0dab8772afb06358e5d19af092226b3a59bb.tar.xz yuzu-b49c0dab8772afb06358e5d19af092226b3a59bb.zip | |
Kernel: Initial implementation of thread preemption.
Diffstat (limited to 'src/core')
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 16 | ||||
| -rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 10 | ||||
| -rw-r--r-- | src/core/hle/kernel/scheduler.h | 4 |
3 files changed, 30 insertions, 0 deletions
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 600d6ec74..7a913520d 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | 12 | ||
| 13 | #include "core/core.h" | 13 | #include "core/core.h" |
| 14 | #include "core/core_timing.h" | 14 | #include "core/core_timing.h" |
| 15 | #include "core/core_timing_util.h" | ||
| 15 | #include "core/hle/kernel/address_arbiter.h" | 16 | #include "core/hle/kernel/address_arbiter.h" |
| 16 | #include "core/hle/kernel/client_port.h" | 17 | #include "core/hle/kernel/client_port.h" |
| 17 | #include "core/hle/kernel/handle_table.h" | 18 | #include "core/hle/kernel/handle_table.h" |
| @@ -96,6 +97,7 @@ struct KernelCore::Impl { | |||
| 96 | 97 | ||
| 97 | InitializeSystemResourceLimit(kernel); | 98 | InitializeSystemResourceLimit(kernel); |
| 98 | InitializeThreads(); | 99 | InitializeThreads(); |
| 100 | InitializePreemption(); | ||
| 99 | } | 101 | } |
| 100 | 102 | ||
| 101 | void Shutdown() { | 103 | void Shutdown() { |
| @@ -111,6 +113,7 @@ struct KernelCore::Impl { | |||
| 111 | 113 | ||
| 112 | thread_wakeup_callback_handle_table.Clear(); | 114 | thread_wakeup_callback_handle_table.Clear(); |
| 113 | thread_wakeup_event_type = nullptr; | 115 | thread_wakeup_event_type = nullptr; |
| 116 | preemption_event = nullptr; | ||
| 114 | 117 | ||
| 115 | named_ports.clear(); | 118 | named_ports.clear(); |
| 116 | } | 119 | } |
| @@ -133,6 +136,18 @@ struct KernelCore::Impl { | |||
| 133 | system.CoreTiming().RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback); | 136 | system.CoreTiming().RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback); |
| 134 | } | 137 | } |
| 135 | 138 | ||
| 139 | void InitializePreemption() { | ||
| 140 | preemption_event = system.CoreTiming().RegisterEvent( | ||
| 141 | "PreemptionCallback", [this](u64 userdata, s64 cycles_late) { | ||
| 142 | global_scheduler.PreemptThreads(); | ||
| 143 | s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10)); | ||
| 144 | system.CoreTiming().ScheduleEvent(time_interval, preemption_event); | ||
| 145 | }); | ||
| 146 | |||
| 147 | s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10)); | ||
| 148 | system.CoreTiming().ScheduleEvent(time_interval, preemption_event); | ||
| 149 | } | ||
| 150 | |||
| 136 | std::atomic<u32> next_object_id{0}; | 151 | std::atomic<u32> next_object_id{0}; |
| 137 | std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin}; | 152 | std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin}; |
| 138 | std::atomic<u64> next_user_process_id{Process::ProcessIDMin}; | 153 | std::atomic<u64> next_user_process_id{Process::ProcessIDMin}; |
| @@ -146,6 +161,7 @@ struct KernelCore::Impl { | |||
| 146 | SharedPtr<ResourceLimit> system_resource_limit; | 161 | SharedPtr<ResourceLimit> system_resource_limit; |
| 147 | 162 | ||
| 148 | Core::Timing::EventType* thread_wakeup_event_type = nullptr; | 163 | Core::Timing::EventType* thread_wakeup_event_type = nullptr; |
| 164 | Core::Timing::EventType* preemption_event = nullptr; | ||
| 149 | // TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future, | 165 | // TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future, |
| 150 | // allowing us to simply use a pool index or similar. | 166 | // allowing us to simply use a pool index or similar. |
| 151 | Kernel::HandleTable thread_wakeup_callback_handle_table; | 167 | Kernel::HandleTable thread_wakeup_callback_handle_table; |
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 451fd8077..0d45307cd 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -238,6 +238,16 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread | |||
| 238 | return AskForReselectionOrMarkRedundant(yielding_thread, winner); | 238 | return AskForReselectionOrMarkRedundant(yielding_thread, winner); |
| 239 | } | 239 | } |
| 240 | 240 | ||
| 241 | void GlobalScheduler::PreemptThreads() { | ||
| 242 | for (std::size_t core_id = 0; core_id < NUM_CPU_CORES; core_id++) { | ||
| 243 | const u64 priority = preemption_priorities[core_id]; | ||
| 244 | if (scheduled_queue[core_id].size(priority) > 1) { | ||
| 245 | scheduled_queue[core_id].yield(priority); | ||
| 246 | reselection_pending.store(true, std::memory_order_release); | ||
| 247 | } | ||
| 248 | } | ||
| 249 | } | ||
| 250 | |||
| 241 | void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) { | 251 | void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) { |
| 242 | ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core."); | 252 | ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core."); |
| 243 | scheduled_queue[core].add(thread, priority); | 253 | scheduled_queue[core].add(thread, priority); |
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index 8fcc86bae..c13a368fd 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h | |||
| @@ -133,6 +133,8 @@ public: | |||
| 133 | */ | 133 | */ |
| 134 | bool YieldThreadAndWaitForLoadBalancing(Thread* thread); | 134 | bool YieldThreadAndWaitForLoadBalancing(Thread* thread); |
| 135 | 135 | ||
| 136 | void PreemptThreads(); | ||
| 137 | |||
| 136 | u32 CpuCoresCount() const { | 138 | u32 CpuCoresCount() const { |
| 137 | return NUM_CPU_CORES; | 139 | return NUM_CPU_CORES; |
| 138 | } | 140 | } |
| @@ -153,6 +155,8 @@ private: | |||
| 153 | std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> suggested_queue; | 155 | std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> suggested_queue; |
| 154 | std::atomic<bool> reselection_pending; | 156 | std::atomic<bool> reselection_pending; |
| 155 | 157 | ||
| 158 | std::array<u64, NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62}; | ||
| 159 | |||
| 156 | /// Lists all thread ids that aren't deleted/etc. | 160 | /// Lists all thread ids that aren't deleted/etc. |
| 157 | std::vector<SharedPtr<Thread>> thread_list; | 161 | std::vector<SharedPtr<Thread>> thread_list; |
| 158 | Core::System& system; | 162 | Core::System& system; |