summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/scheduler.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel/scheduler.h')
-rw-r--r--src/core/hle/kernel/scheduler.h320
1 files changed, 0 insertions, 320 deletions
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
deleted file mode 100644
index 68db4a5ef..000000000
--- a/src/core/hle/kernel/scheduler.h
+++ /dev/null
@@ -1,320 +0,0 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <atomic>
8#include <memory>
9#include <mutex>
10#include <vector>
11
12#include "common/common_types.h"
13#include "common/multi_level_queue.h"
14#include "common/spin_lock.h"
15#include "core/hardware_properties.h"
16#include "core/hle/kernel/thread.h"
17
18namespace Common {
19class Fiber;
20}
21
22namespace Core {
23class ARM_Interface;
24class System;
25} // namespace Core
26
27namespace Kernel {
28
29class KernelCore;
30class Process;
31class SchedulerLock;
32
33class GlobalScheduler final {
34public:
35 explicit GlobalScheduler(KernelCore& kernel);
36 ~GlobalScheduler();
37
38 /// Adds a new thread to the scheduler
39 void AddThread(std::shared_ptr<Thread> thread);
40
41 /// Removes a thread from the scheduler
42 void RemoveThread(std::shared_ptr<Thread> thread);
43
44 /// Returns a list of all threads managed by the scheduler
45 const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
46 return thread_list;
47 }
48
49 /// Notify the scheduler a thread's status has changed.
50 void AdjustSchedulingOnStatus(Thread* thread, u32 old_flags);
51
52 /// Notify the scheduler a thread's priority has changed.
53 void AdjustSchedulingOnPriority(Thread* thread, u32 old_priority);
54
55 /// Notify the scheduler a thread's core and/or affinity mask has changed.
56 void AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask, s32 old_core);
57
58 /**
59 * Takes care of selecting the new scheduled threads in three steps:
60 *
61 * 1. First a thread is selected from the top of the priority queue. If no thread
62 * is obtained then we move to step two, else we are done.
63 *
64 * 2. Second we try to get a suggested thread that's not assigned to any core or
65 * that is not the top thread in that core.
66 *
67 * 3. Third is no suggested thread is found, we do a second pass and pick a running
68 * thread in another core and swap it with its current thread.
69 *
70 * returns the cores needing scheduling.
71 */
72 u32 SelectThreads();
73
74 bool HaveReadyThreads(std::size_t core_id) const {
75 return !scheduled_queue[core_id].empty();
76 }
77
78 /**
79 * Takes a thread and moves it to the back of the it's priority list.
80 *
81 * @note This operation can be redundant and no scheduling is changed if marked as so.
82 */
83 bool YieldThread(Thread* thread);
84
85 /**
86 * Takes a thread and moves it to the back of the it's priority list.
87 * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
88 * a better priority than the next thread in the core.
89 *
90 * @note This operation can be redundant and no scheduling is changed if marked as so.
91 */
92 bool YieldThreadAndBalanceLoad(Thread* thread);
93
94 /**
95 * Takes a thread and moves it out of the scheduling queue.
96 * and into the suggested queue. If no thread can be scheduled afterwards in that core,
97 * a suggested thread is obtained instead.
98 *
99 * @note This operation can be redundant and no scheduling is changed if marked as so.
100 */
101 bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
102
103 /**
104 * Rotates the scheduling queues of threads at a preemption priority and then does
105 * some core rebalancing. Preemption priorities can be found in the array
106 * 'preemption_priorities'.
107 *
108 * @note This operation happens every 10ms.
109 */
110 void PreemptThreads();
111
112 u32 CpuCoresCount() const {
113 return Core::Hardware::NUM_CPU_CORES;
114 }
115
116 void SetReselectionPending() {
117 is_reselection_pending.store(true, std::memory_order_release);
118 }
119
120 bool IsReselectionPending() const {
121 return is_reselection_pending.load(std::memory_order_acquire);
122 }
123
124 void Shutdown();
125
126private:
127 friend class SchedulerLock;
128
129 /// Lock the scheduler to the current thread.
130 void Lock();
131
132 /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling
133 /// and reschedules current core if needed.
134 void Unlock();
135
136 void EnableInterruptAndSchedule(u32 cores_pending_reschedule,
137 Core::EmuThreadHandle global_thread);
138
139 /**
140 * Add a thread to the suggested queue of a cpu core. Suggested threads may be
141 * picked if no thread is scheduled to run on the core.
142 */
143 void Suggest(u32 priority, std::size_t core, Thread* thread);
144
145 /**
146 * Remove a thread to the suggested queue of a cpu core. Suggested threads may be
147 * picked if no thread is scheduled to run on the core.
148 */
149 void Unsuggest(u32 priority, std::size_t core, Thread* thread);
150
151 /**
152 * Add a thread to the scheduling queue of a cpu core. The thread is added at the
153 * back the queue in its priority level.
154 */
155 void Schedule(u32 priority, std::size_t core, Thread* thread);
156
157 /**
158 * Add a thread to the scheduling queue of a cpu core. The thread is added at the
159 * front the queue in its priority level.
160 */
161 void SchedulePrepend(u32 priority, std::size_t core, Thread* thread);
162
163 /// Reschedule an already scheduled thread based on a new priority
164 void Reschedule(u32 priority, std::size_t core, Thread* thread);
165
166 /// Unschedules a thread.
167 void Unschedule(u32 priority, std::size_t core, Thread* thread);
168
169 /**
170 * Transfers a thread into an specific core. If the destination_core is -1
171 * it will be unscheduled from its source code and added into its suggested
172 * queue.
173 */
174 void TransferToCore(u32 priority, s32 destination_core, Thread* thread);
175
176 bool AskForReselectionOrMarkRedundant(Thread* current_thread, const Thread* winner);
177
178 static constexpr u32 min_regular_priority = 2;
179 std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES>
180 scheduled_queue;
181 std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES>
182 suggested_queue;
183 std::atomic<bool> is_reselection_pending{false};
184
185 // The priority levels at which the global scheduler preempts threads every 10 ms. They are
186 // ordered from Core 0 to Core 3.
187 std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
188
189 /// Scheduler lock mechanisms.
190 bool is_locked{};
191 std::mutex inner_lock;
192 std::atomic<s64> scope_lock{};
193 Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()};
194
195 Common::SpinLock global_list_guard{};
196
197 /// Lists all thread ids that aren't deleted/etc.
198 std::vector<std::shared_ptr<Thread>> thread_list;
199 KernelCore& kernel;
200};
201
202class Scheduler final {
203public:
204 explicit Scheduler(Core::System& system, std::size_t core_id);
205 ~Scheduler();
206
207 /// Returns whether there are any threads that are ready to run.
208 bool HaveReadyThreads() const;
209
210 /// Reschedules to the next available thread (call after current thread is suspended)
211 void TryDoContextSwitch();
212
213 /// The next two are for SingleCore Only.
214 /// Unload current thread before preempting core.
215 void Unload(Thread* thread);
216 void Unload();
217 /// Reload current thread after core preemption.
218 void Reload(Thread* thread);
219 void Reload();
220
221 /// Gets the current running thread
222 Thread* GetCurrentThread() const;
223
224 /// Gets the currently selected thread from the top of the multilevel queue
225 Thread* GetSelectedThread() const;
226
227 /// Gets the timestamp for the last context switch in ticks.
228 u64 GetLastContextSwitchTicks() const;
229
230 bool ContextSwitchPending() const {
231 return is_context_switch_pending;
232 }
233
234 void Initialize();
235
236 /// Shutdowns the scheduler.
237 void Shutdown();
238
239 void OnThreadStart();
240
241 std::shared_ptr<Common::Fiber>& ControlContext() {
242 return switch_fiber;
243 }
244
245 const std::shared_ptr<Common::Fiber>& ControlContext() const {
246 return switch_fiber;
247 }
248
249private:
250 friend class GlobalScheduler;
251
252 /// Switches the CPU's active thread context to that of the specified thread
253 void SwitchContext();
254
255 /// When a thread wakes up, it must run this through it's new scheduler
256 void SwitchContextStep2();
257
258 /**
259 * Called on every context switch to update the internal timestamp
260 * This also updates the running time ticks for the given thread and
261 * process using the following difference:
262 *
263 * ticks += most_recent_ticks - last_context_switch_ticks
264 *
265 * The internal tick timestamp for the scheduler is simply the
266 * most recent tick count retrieved. No special arithmetic is
267 * applied to it.
268 */
269 void UpdateLastContextSwitchTime(Thread* thread, Process* process);
270
271 static void OnSwitch(void* this_scheduler);
272 void SwitchToCurrent();
273
274 std::shared_ptr<Thread> current_thread = nullptr;
275 std::shared_ptr<Thread> selected_thread = nullptr;
276 std::shared_ptr<Thread> current_thread_prev = nullptr;
277 std::shared_ptr<Thread> selected_thread_set = nullptr;
278 std::shared_ptr<Thread> idle_thread = nullptr;
279
280 std::shared_ptr<Common::Fiber> switch_fiber = nullptr;
281
282 Core::System& system;
283 u64 last_context_switch_time = 0;
284 u64 idle_selection_count = 0;
285 const std::size_t core_id;
286
287 Common::SpinLock guard{};
288
289 bool is_context_switch_pending = false;
290};
291
292class SchedulerLock {
293public:
294 [[nodiscard]] explicit SchedulerLock(KernelCore& kernel);
295 ~SchedulerLock();
296
297protected:
298 KernelCore& kernel;
299};
300
301class SchedulerLockAndSleep : public SchedulerLock {
302public:
303 explicit SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* time_task,
304 s64 nanoseconds);
305 ~SchedulerLockAndSleep();
306
307 void CancelSleep() {
308 sleep_cancelled = true;
309 }
310
311 void Release();
312
313private:
314 Handle& event_handle;
315 Thread* time_task;
316 s64 nanoseconds;
317 bool sleep_cancelled{};
318};
319
320} // namespace Kernel