summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar bunnei2020-12-03 16:43:18 -0800
committerGravatar bunnei2020-12-06 00:03:24 -0800
commit8d3e06349e12e7de17c334619f1f986792d1de4b (patch)
tree926e34570e5e51d5d7dc03c13f45b1401f5ae829 /src
parenthle: kernel: Rewrite scheduler implementation based on Mesopshere. (diff)
downloadyuzu-8d3e06349e12e7de17c334619f1f986792d1de4b.tar.gz
yuzu-8d3e06349e12e7de17c334619f1f986792d1de4b.tar.xz
yuzu-8d3e06349e12e7de17c334619f1f986792d1de4b.zip
hle: kernel: Separate KScheduler from GlobalSchedulerContext class.
Diffstat (limited to 'src')
-rw-r--r--src/common/CMakeLists.txt1
-rw-r--r--src/common/multi_level_queue.h345
-rw-r--r--src/core/CMakeLists.txt2
-rw-r--r--src/core/hle/kernel/global_scheduler_context.cpp55
-rw-r--r--src/core/hle/kernel/global_scheduler_context.h79
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp48
-rw-r--r--src/core/hle/kernel/k_scheduler.h74
-rw-r--r--src/tests/CMakeLists.txt1
-rw-r--r--src/tests/common/multi_level_queue.cpp55
9 files changed, 140 insertions, 520 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index fc2ed9999..8e51104a1 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -141,7 +141,6 @@ add_library(common STATIC
141 microprofile.h 141 microprofile.h
142 microprofileui.h 142 microprofileui.h
143 misc.cpp 143 misc.cpp
144 multi_level_queue.h
145 page_table.cpp 144 page_table.cpp
146 page_table.h 145 page_table.h
147 param_package.cpp 146 param_package.cpp
diff --git a/src/common/multi_level_queue.h b/src/common/multi_level_queue.h
deleted file mode 100644
index 4b305bf40..000000000
--- a/src/common/multi_level_queue.h
+++ /dev/null
@@ -1,345 +0,0 @@
1// Copyright 2019 TuxSH
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <iterator>
9#include <list>
10#include <utility>
11
12#include "common/bit_util.h"
13#include "common/common_types.h"
14
15namespace Common {
16
17/**
18 * A MultiLevelQueue is a type of priority queue which has the following characteristics:
19 * - iteratable through each of its elements.
20 * - back can be obtained.
21 * - O(1) add, lookup (both front and back)
22 * - discrete priorities and a max of 64 priorities (limited domain)
23 * This type of priority queue is normaly used for managing threads within an scheduler
24 */
25template <typename T, std::size_t Depth>
26class MultiLevelQueue {
27public:
28 using value_type = T;
29 using reference = value_type&;
30 using const_reference = const value_type&;
31 using pointer = value_type*;
32 using const_pointer = const value_type*;
33
34 using difference_type = typename std::pointer_traits<pointer>::difference_type;
35 using size_type = std::size_t;
36
37 template <bool is_constant>
38 class iterator_impl {
39 public:
40 using iterator_category = std::bidirectional_iterator_tag;
41 using value_type = T;
42 using pointer = std::conditional_t<is_constant, T*, const T*>;
43 using reference = std::conditional_t<is_constant, const T&, T&>;
44 using difference_type = typename std::pointer_traits<pointer>::difference_type;
45
46 friend bool operator==(const iterator_impl& lhs, const iterator_impl& rhs) {
47 if (lhs.IsEnd() && rhs.IsEnd())
48 return true;
49 return std::tie(lhs.current_priority, lhs.it) == std::tie(rhs.current_priority, rhs.it);
50 }
51
52 friend bool operator!=(const iterator_impl& lhs, const iterator_impl& rhs) {
53 return !operator==(lhs, rhs);
54 }
55
56 reference operator*() const {
57 return *it;
58 }
59
60 pointer operator->() const {
61 return it.operator->();
62 }
63
64 iterator_impl& operator++() {
65 if (IsEnd()) {
66 return *this;
67 }
68
69 ++it;
70
71 if (it == GetEndItForPrio()) {
72 u64 prios = mlq.used_priorities;
73 prios &= ~((1ULL << (current_priority + 1)) - 1);
74 if (prios == 0) {
75 current_priority = static_cast<u32>(mlq.depth());
76 } else {
77 current_priority = CountTrailingZeroes64(prios);
78 it = GetBeginItForPrio();
79 }
80 }
81 return *this;
82 }
83
84 iterator_impl& operator--() {
85 if (IsEnd()) {
86 if (mlq.used_priorities != 0) {
87 current_priority = 63 - CountLeadingZeroes64(mlq.used_priorities);
88 it = GetEndItForPrio();
89 --it;
90 }
91 } else if (it == GetBeginItForPrio()) {
92 u64 prios = mlq.used_priorities;
93 prios &= (1ULL << current_priority) - 1;
94 if (prios != 0) {
95 current_priority = CountTrailingZeroes64(prios);
96 it = GetEndItForPrio();
97 --it;
98 }
99 } else {
100 --it;
101 }
102 return *this;
103 }
104
105 iterator_impl operator++(int) {
106 const iterator_impl v{*this};
107 ++(*this);
108 return v;
109 }
110
111 iterator_impl operator--(int) {
112 const iterator_impl v{*this};
113 --(*this);
114 return v;
115 }
116
117 // allow implicit const->non-const
118 iterator_impl(const iterator_impl<false>& other)
119 : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
120
121 iterator_impl(const iterator_impl<true>& other)
122 : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
123
124 iterator_impl& operator=(const iterator_impl<false>& other) {
125 mlq = other.mlq;
126 it = other.it;
127 current_priority = other.current_priority;
128 return *this;
129 }
130
131 friend class iterator_impl<true>;
132 iterator_impl() = default;
133
134 private:
135 friend class MultiLevelQueue;
136 using container_ref =
137 std::conditional_t<is_constant, const MultiLevelQueue&, MultiLevelQueue&>;
138 using list_iterator = std::conditional_t<is_constant, typename std::list<T>::const_iterator,
139 typename std::list<T>::iterator>;
140
141 explicit iterator_impl(container_ref mlq, list_iterator it, u32 current_priority)
142 : mlq(mlq), it(it), current_priority(current_priority) {}
143 explicit iterator_impl(container_ref mlq, u32 current_priority)
144 : mlq(mlq), it(), current_priority(current_priority) {}
145
146 bool IsEnd() const {
147 return current_priority == mlq.depth();
148 }
149
150 list_iterator GetBeginItForPrio() const {
151 return mlq.levels[current_priority].begin();
152 }
153
154 list_iterator GetEndItForPrio() const {
155 return mlq.levels[current_priority].end();
156 }
157
158 container_ref mlq;
159 list_iterator it;
160 u32 current_priority;
161 };
162
163 using iterator = iterator_impl<false>;
164 using const_iterator = iterator_impl<true>;
165
166 void add(const T& element, u32 priority, bool send_back = true) {
167 if (send_back)
168 levels[priority].push_back(element);
169 else
170 levels[priority].push_front(element);
171 used_priorities |= 1ULL << priority;
172 }
173
174 void remove(const T& element, u32 priority) {
175 auto it = ListIterateTo(levels[priority], element);
176 if (it == levels[priority].end())
177 return;
178 levels[priority].erase(it);
179 if (levels[priority].empty()) {
180 used_priorities &= ~(1ULL << priority);
181 }
182 }
183
184 void adjust(const T& element, u32 old_priority, u32 new_priority, bool adjust_front = false) {
185 remove(element, old_priority);
186 add(element, new_priority, !adjust_front);
187 }
188 void adjust(const_iterator it, u32 old_priority, u32 new_priority, bool adjust_front = false) {
189 adjust(*it, old_priority, new_priority, adjust_front);
190 }
191
192 void transfer_to_front(const T& element, u32 priority, MultiLevelQueue& other) {
193 ListSplice(other.levels[priority], other.levels[priority].begin(), levels[priority],
194 ListIterateTo(levels[priority], element));
195
196 other.used_priorities |= 1ULL << priority;
197
198 if (levels[priority].empty()) {
199 used_priorities &= ~(1ULL << priority);
200 }
201 }
202
203 void transfer_to_front(const_iterator it, u32 priority, MultiLevelQueue& other) {
204 transfer_to_front(*it, priority, other);
205 }
206
207 void transfer_to_back(const T& element, u32 priority, MultiLevelQueue& other) {
208 ListSplice(other.levels[priority], other.levels[priority].end(), levels[priority],
209 ListIterateTo(levels[priority], element));
210
211 other.used_priorities |= 1ULL << priority;
212
213 if (levels[priority].empty()) {
214 used_priorities &= ~(1ULL << priority);
215 }
216 }
217
218 void transfer_to_back(const_iterator it, u32 priority, MultiLevelQueue& other) {
219 transfer_to_back(*it, priority, other);
220 }
221
222 void yield(u32 priority, std::size_t n = 1) {
223 ListShiftForward(levels[priority], n);
224 }
225
226 [[nodiscard]] std::size_t depth() const {
227 return Depth;
228 }
229
230 [[nodiscard]] std::size_t size(u32 priority) const {
231 return levels[priority].size();
232 }
233
234 [[nodiscard]] std::size_t size() const {
235 u64 priorities = used_priorities;
236 std::size_t size = 0;
237 while (priorities != 0) {
238 const u64 current_priority = CountTrailingZeroes64(priorities);
239 size += levels[current_priority].size();
240 priorities &= ~(1ULL << current_priority);
241 }
242 return size;
243 }
244
245 [[nodiscard]] bool empty() const {
246 return used_priorities == 0;
247 }
248
249 [[nodiscard]] bool empty(u32 priority) const {
250 return (used_priorities & (1ULL << priority)) == 0;
251 }
252
253 [[nodiscard]] u32 highest_priority_set(u32 max_priority = 0) const {
254 const u64 priorities =
255 max_priority == 0 ? used_priorities : (used_priorities & ~((1ULL << max_priority) - 1));
256 return priorities == 0 ? Depth : static_cast<u32>(CountTrailingZeroes64(priorities));
257 }
258
259 [[nodiscard]] u32 lowest_priority_set(u32 min_priority = Depth - 1) const {
260 const u64 priorities = min_priority >= Depth - 1
261 ? used_priorities
262 : (used_priorities & ((1ULL << (min_priority + 1)) - 1));
263 return priorities == 0 ? Depth : 63 - CountLeadingZeroes64(priorities);
264 }
265
266 [[nodiscard]] const_iterator cbegin(u32 max_prio = 0) const {
267 const u32 priority = highest_priority_set(max_prio);
268 return priority == Depth ? cend()
269 : const_iterator{*this, levels[priority].cbegin(), priority};
270 }
271 [[nodiscard]] const_iterator begin(u32 max_prio = 0) const {
272 return cbegin(max_prio);
273 }
274 [[nodiscard]] iterator begin(u32 max_prio = 0) {
275 const u32 priority = highest_priority_set(max_prio);
276 return priority == Depth ? end() : iterator{*this, levels[priority].begin(), priority};
277 }
278
279 [[nodiscard]] const_iterator cend(u32 min_prio = Depth - 1) const {
280 return min_prio == Depth - 1 ? const_iterator{*this, Depth} : cbegin(min_prio + 1);
281 }
282 [[nodiscard]] const_iterator end(u32 min_prio = Depth - 1) const {
283 return cend(min_prio);
284 }
285 [[nodiscard]] iterator end(u32 min_prio = Depth - 1) {
286 return min_prio == Depth - 1 ? iterator{*this, Depth} : begin(min_prio + 1);
287 }
288
289 [[nodiscard]] T& front(u32 max_priority = 0) {
290 const u32 priority = highest_priority_set(max_priority);
291 return levels[priority == Depth ? 0 : priority].front();
292 }
293 [[nodiscard]] const T& front(u32 max_priority = 0) const {
294 const u32 priority = highest_priority_set(max_priority);
295 return levels[priority == Depth ? 0 : priority].front();
296 }
297
298 [[nodiscard]] T& back(u32 min_priority = Depth - 1) {
299 const u32 priority = lowest_priority_set(min_priority); // intended
300 return levels[priority == Depth ? 63 : priority].back();
301 }
302 [[nodiscard]] const T& back(u32 min_priority = Depth - 1) const {
303 const u32 priority = lowest_priority_set(min_priority); // intended
304 return levels[priority == Depth ? 63 : priority].back();
305 }
306
307 void clear() {
308 used_priorities = 0;
309 for (std::size_t i = 0; i < Depth; i++) {
310 levels[i].clear();
311 }
312 }
313
314private:
315 using const_list_iterator = typename std::list<T>::const_iterator;
316
317 static void ListShiftForward(std::list<T>& list, const std::size_t shift = 1) {
318 if (shift >= list.size()) {
319 return;
320 }
321
322 const auto begin_range = list.begin();
323 const auto end_range = std::next(begin_range, shift);
324 list.splice(list.end(), list, begin_range, end_range);
325 }
326
327 static void ListSplice(std::list<T>& in_list, const_list_iterator position,
328 std::list<T>& out_list, const_list_iterator element) {
329 in_list.splice(position, out_list, element);
330 }
331
332 [[nodiscard]] static const_list_iterator ListIterateTo(const std::list<T>& list,
333 const T& element) {
334 auto it = list.cbegin();
335 while (it != list.cend() && *it != element) {
336 ++it;
337 }
338 return it;
339 }
340
341 std::array<std::list<T>, Depth> levels;
342 u64 used_priorities = 0;
343};
344
345} // namespace Common
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 662839ff8..ee61f22c0 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -148,6 +148,8 @@ add_library(core STATIC
148 hle/kernel/code_set.cpp 148 hle/kernel/code_set.cpp
149 hle/kernel/code_set.h 149 hle/kernel/code_set.h
150 hle/kernel/errors.h 150 hle/kernel/errors.h
151 hle/kernel/global_scheduler_context.cpp
152 hle/kernel/global_scheduler_context.h
151 hle/kernel/handle_table.cpp 153 hle/kernel/handle_table.cpp
152 hle/kernel/handle_table.h 154 hle/kernel/handle_table.h
153 hle/kernel/hle_ipc.cpp 155 hle/kernel/hle_ipc.cpp
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
new file mode 100644
index 000000000..40e9adf47
--- /dev/null
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -0,0 +1,55 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <mutex>
6
7#include "common/assert.h"
8#include "core/core.h"
9#include "core/hle/kernel/global_scheduler_context.h"
10#include "core/hle/kernel/k_scheduler.h"
11#include "core/hle/kernel/kernel.h"
12
13namespace Kernel {
14
15GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
16 : kernel{kernel}, scheduler_lock{kernel} {}
17
18GlobalSchedulerContext::~GlobalSchedulerContext() = default;
19
20void GlobalSchedulerContext::AddThread(std::shared_ptr<Thread> thread) {
21 std::scoped_lock lock{global_list_guard};
22 thread_list.push_back(std::move(thread));
23}
24
25void GlobalSchedulerContext::RemoveThread(std::shared_ptr<Thread> thread) {
26 std::scoped_lock lock{global_list_guard};
27 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
28 thread_list.end());
29}
30
31void GlobalSchedulerContext::PreemptThreads() {
32 // The priority levels at which the global scheduler preempts threads every 10 ms. They are
33 // ordered from Core 0 to Core 3.
34 std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 63};
35
36 ASSERT(IsLocked());
37 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
38 const u32 priority = preemption_priorities[core_id];
39 kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority);
40 }
41}
42
43bool GlobalSchedulerContext::IsLocked() const {
44 return scheduler_lock.IsLockedByCurrentThread();
45}
46
47void GlobalSchedulerContext::Lock() {
48 scheduler_lock.Lock();
49}
50
51void GlobalSchedulerContext::Unlock() {
52 scheduler_lock.Unlock();
53}
54
55} // namespace Kernel
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
new file mode 100644
index 000000000..40fe44cc0
--- /dev/null
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -0,0 +1,79 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <atomic>
8#include <vector>
9
10#include "common/common_types.h"
11#include "common/spin_lock.h"
12#include "core/hardware_properties.h"
13#include "core/hle/kernel/k_priority_queue.h"
14#include "core/hle/kernel/k_scheduler_lock.h"
15#include "core/hle/kernel/thread.h"
16
17namespace Kernel {
18
19class KernelCore;
20class SchedulerLock;
21
22using KSchedulerPriorityQueue =
23 KPriorityQueue<Thread, Core::Hardware::NUM_CPU_CORES, THREADPRIO_LOWEST, THREADPRIO_HIGHEST>;
24static constexpr s32 HighestCoreMigrationAllowedPriority = 2;
25
26class GlobalSchedulerContext final {
27 friend class KScheduler;
28
29public:
30 explicit GlobalSchedulerContext(KernelCore& kernel);
31 ~GlobalSchedulerContext();
32
33 /// Adds a new thread to the scheduler
34 void AddThread(std::shared_ptr<Thread> thread);
35
36 /// Removes a thread from the scheduler
37 void RemoveThread(std::shared_ptr<Thread> thread);
38
39 /// Returns a list of all threads managed by the scheduler
40 const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
41 return thread_list;
42 }
43
44 /**
45 * Rotates the scheduling queues of threads at a preemption priority and then does
46 * some core rebalancing. Preemption priorities can be found in the array
47 * 'preemption_priorities'.
48 *
49 * @note This operation happens every 10ms.
50 */
51 void PreemptThreads();
52
53 /// Returns true if the global scheduler lock is acquired
54 bool IsLocked() const;
55
56private:
57 friend class SchedulerLock;
58
59 /// Lock the scheduler to the current thread.
60 void Lock();
61
62 /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling
63 /// and reschedules current core if needed.
64 void Unlock();
65
66 using LockType = KAbstractSchedulerLock<KScheduler>;
67
68 KernelCore& kernel;
69
70 std::atomic_bool scheduler_update_needed{};
71 KSchedulerPriorityQueue priority_queue;
72 LockType scheduler_lock;
73
74 /// Lists all thread ids that aren't deleted/etc.
75 std::vector<std::shared_ptr<Thread>> thread_list;
76 Common::SpinLock global_list_guard{};
77};
78
79} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 7f7da610d..c7e2eabd4 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -5,12 +5,6 @@
5// This file references various implementation details from Atmosphere, an open-source firmware for 5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. 6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7 7
8#include <algorithm>
9#include <mutex>
10#include <set>
11#include <unordered_set>
12#include <utility>
13
14#include "common/assert.h" 8#include "common/assert.h"
15#include "common/bit_util.h" 9#include "common/bit_util.h"
16#include "common/fiber.h" 10#include "common/fiber.h"
@@ -19,10 +13,10 @@
19#include "core/core.h" 13#include "core/core.h"
20#include "core/core_timing.h" 14#include "core/core_timing.h"
21#include "core/cpu_manager.h" 15#include "core/cpu_manager.h"
16#include "core/hle/kernel/k_scheduler.h"
22#include "core/hle/kernel/kernel.h" 17#include "core/hle/kernel/kernel.h"
23#include "core/hle/kernel/physical_core.h" 18#include "core/hle/kernel/physical_core.h"
24#include "core/hle/kernel/process.h" 19#include "core/hle/kernel/process.h"
25#include "core/hle/kernel/k_scheduler.h"
26#include "core/hle/kernel/thread.h" 20#include "core/hle/kernel/thread.h"
27#include "core/hle/kernel/time_manager.h" 21#include "core/hle/kernel/time_manager.h"
28 22
@@ -34,11 +28,6 @@ static void IncrementScheduledCount(Kernel::Thread* thread) {
34 } 28 }
35} 29}
36 30
37GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
38 : kernel{kernel}, scheduler_lock{kernel} {}
39
40GlobalSchedulerContext::~GlobalSchedulerContext() = default;
41
42/*static*/ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, 31/*static*/ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
43 Core::EmuThreadHandle global_thread) { 32 Core::EmuThreadHandle global_thread) {
44 u32 current_core = global_thread.host_handle; 33 u32 current_core = global_thread.host_handle;
@@ -205,33 +194,6 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
205 return cores_needing_scheduling; 194 return cores_needing_scheduling;
206} 195}
207 196
208void GlobalSchedulerContext::AddThread(std::shared_ptr<Thread> thread) {
209 std::scoped_lock lock{global_list_guard};
210 thread_list.push_back(std::move(thread));
211}
212
213void GlobalSchedulerContext::RemoveThread(std::shared_ptr<Thread> thread) {
214 std::scoped_lock lock{global_list_guard};
215 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
216 thread_list.end());
217}
218
219void GlobalSchedulerContext::PreemptThreads() {
220 // The priority levels at which the global scheduler preempts threads every 10 ms. They are
221 // ordered from Core 0 to Core 3.
222 std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 63};
223
224 ASSERT(IsLocked());
225 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
226 const u32 priority = preemption_priorities[core_id];
227 kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority);
228 }
229}
230
231bool GlobalSchedulerContext::IsLocked() const {
232 return scheduler_lock.IsLockedByCurrentThread();
233}
234
235/*static*/ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, 197/*static*/ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread,
236 u32 old_state) { 198 u32 old_state) {
237 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 199 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
@@ -635,14 +597,6 @@ void KScheduler::YieldToAnyThread() {
635 } 597 }
636} 598}
637 599
638void GlobalSchedulerContext::Lock() {
639 scheduler_lock.Lock();
640}
641
642void GlobalSchedulerContext::Unlock() {
643 scheduler_lock.Unlock();
644}
645
646KScheduler::KScheduler(Core::System& system, std::size_t core_id) 600KScheduler::KScheduler(Core::System& system, std::size_t core_id)
647 : system(system), core_id(core_id) { 601 : system(system), core_id(core_id) {
648 switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this); 602 switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this);
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 535ee34b9..7f020d96e 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -8,94 +8,27 @@
8#pragma once 8#pragma once
9 9
10#include <atomic> 10#include <atomic>
11#include <memory>
12#include <mutex>
13#include <vector>
14 11
15#include "common/common_types.h" 12#include "common/common_types.h"
16#include "common/multi_level_queue.h"
17#include "common/scope_exit.h"
18#include "common/spin_lock.h" 13#include "common/spin_lock.h"
19#include "core/core_timing.h" 14#include "core/hle/kernel/global_scheduler_context.h"
20#include "core/hardware_properties.h"
21#include "core/hle/kernel/k_priority_queue.h" 15#include "core/hle/kernel/k_priority_queue.h"
22#include "core/hle/kernel/k_scheduler_lock.h" 16#include "core/hle/kernel/k_scheduler_lock.h"
23#include "core/hle/kernel/thread.h"
24 17
25namespace Common { 18namespace Common {
26class Fiber; 19class Fiber;
27} 20}
28 21
29namespace Core { 22namespace Core {
30class ARM_Interface;
31class System; 23class System;
32} // namespace Core 24}
33 25
34namespace Kernel { 26namespace Kernel {
35 27
36class KernelCore; 28class KernelCore;
37class Process; 29class Process;
38class SchedulerLock; 30class SchedulerLock;
39 31class Thread;
40using KSchedulerPriorityQueue =
41 KPriorityQueue<Thread, Core::Hardware::NUM_CPU_CORES, THREADPRIO_LOWEST, THREADPRIO_HIGHEST>;
42static constexpr s32 HighestCoreMigrationAllowedPriority = 2;
43
44class GlobalSchedulerContext final {
45 friend class KScheduler;
46
47public:
48 explicit GlobalSchedulerContext(KernelCore& kernel);
49 ~GlobalSchedulerContext();
50
51 /// Adds a new thread to the scheduler
52 void AddThread(std::shared_ptr<Thread> thread);
53
54 /// Removes a thread from the scheduler
55 void RemoveThread(std::shared_ptr<Thread> thread);
56
57 /// Returns a list of all threads managed by the scheduler
58 const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
59 return thread_list;
60 }
61
62 /**
63 * Rotates the scheduling queues of threads at a preemption priority and then does
64 * some core rebalancing. Preemption priorities can be found in the array
65 * 'preemption_priorities'.
66 *
67 * @note This operation happens every 10ms.
68 */
69 void PreemptThreads();
70
71 u32 CpuCoresCount() const {
72 return Core::Hardware::NUM_CPU_CORES;
73 }
74
75 bool IsLocked() const;
76
77private:
78 friend class SchedulerLock;
79
80 /// Lock the scheduler to the current thread.
81 void Lock();
82
83 /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling
84 /// and reschedules current core if needed.
85 void Unlock();
86
87 using LockType = KAbstractSchedulerLock<KScheduler>;
88
89 KernelCore& kernel;
90
91 std::atomic_bool scheduler_update_needed{};
92 KSchedulerPriorityQueue priority_queue;
93 LockType scheduler_lock;
94
95 /// Lists all thread ids that aren't deleted/etc.
96 std::vector<std::shared_ptr<Thread>> thread_list;
97 Common::SpinLock global_list_guard{};
98};
99 32
100class KScheduler final { 33class KScheduler final {
101public: 34public:
@@ -221,7 +154,6 @@ private:
221 154
222 /// Switches the CPU's active thread context to that of the specified thread 155 /// Switches the CPU's active thread context to that of the specified thread
223 void ScheduleImpl(); 156 void ScheduleImpl();
224 void SwitchThread(Thread* next_thread);
225 157
226 /// When a thread wakes up, it must run this through it's new scheduler 158 /// When a thread wakes up, it must run this through it's new scheduler
227 void SwitchContextStep2(); 159 void SwitchContextStep2();
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt
index 47ef30aa9..d80b0b688 100644
--- a/src/tests/CMakeLists.txt
+++ b/src/tests/CMakeLists.txt
@@ -2,7 +2,6 @@ add_executable(tests
2 common/bit_field.cpp 2 common/bit_field.cpp
3 common/bit_utils.cpp 3 common/bit_utils.cpp
4 common/fibers.cpp 4 common/fibers.cpp
5 common/multi_level_queue.cpp
6 common/param_package.cpp 5 common/param_package.cpp
7 common/ring_buffer.cpp 6 common/ring_buffer.cpp
8 core/arm/arm_test_common.cpp 7 core/arm/arm_test_common.cpp
diff --git a/src/tests/common/multi_level_queue.cpp b/src/tests/common/multi_level_queue.cpp
deleted file mode 100644
index cca7ec7da..000000000
--- a/src/tests/common/multi_level_queue.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
1// Copyright 2019 Yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <catch2/catch.hpp>
6#include <math.h>
7#include "common/common_types.h"
8#include "common/multi_level_queue.h"
9
10namespace Common {
11
12TEST_CASE("MultiLevelQueue", "[common]") {
13 std::array<f32, 8> values = {0.0, 5.0, 1.0, 9.0, 8.0, 2.0, 6.0, 7.0};
14 Common::MultiLevelQueue<f32, 64> mlq;
15 REQUIRE(mlq.empty());
16 mlq.add(values[2], 2);
17 mlq.add(values[7], 7);
18 mlq.add(values[3], 3);
19 mlq.add(values[4], 4);
20 mlq.add(values[0], 0);
21 mlq.add(values[5], 5);
22 mlq.add(values[6], 6);
23 mlq.add(values[1], 1);
24 u32 index = 0;
25 bool all_set = true;
26 for (auto& f : mlq) {
27 all_set &= (f == values[index]);
28 index++;
29 }
30 REQUIRE(all_set);
31 REQUIRE(!mlq.empty());
32 f32 v = 8.0;
33 mlq.add(v, 2);
34 v = -7.0;
35 mlq.add(v, 2, false);
36 REQUIRE(mlq.front(2) == -7.0);
37 mlq.yield(2);
38 REQUIRE(mlq.front(2) == values[2]);
39 REQUIRE(mlq.back(2) == -7.0);
40 REQUIRE(mlq.empty(8));
41 v = 10.0;
42 mlq.add(v, 8);
43 mlq.adjust(v, 8, 9);
44 REQUIRE(mlq.front(9) == v);
45 REQUIRE(mlq.empty(8));
46 REQUIRE(!mlq.empty(9));
47 mlq.adjust(values[0], 0, 9);
48 REQUIRE(mlq.highest_priority_set() == 1);
49 REQUIRE(mlq.lowest_priority_set() == 9);
50 mlq.remove(values[1], 1);
51 REQUIRE(mlq.highest_priority_set() == 2);
52 REQUIRE(mlq.empty(1));
53}
54
55} // namespace Common