summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/common/CMakeLists.txt2
-rw-r--r--src/common/bit_set.h99
-rw-r--r--src/common/multi_level_queue.h345
-rw-r--r--src/core/CMakeLists.txt11
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp3
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp5
-rw-r--r--src/core/core.cpp26
-rw-r--r--src/core/core.h20
-rw-r--r--src/core/cpu_manager.cpp98
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp21
-rw-r--r--src/core/hle/kernel/global_scheduler_context.cpp52
-rw-r--r--src/core/hle/kernel/global_scheduler_context.h81
-rw-r--r--src/core/hle/kernel/handle_table.cpp4
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp10
-rw-r--r--src/core/hle/kernel/k_affinity_mask.h58
-rw-r--r--src/core/hle/kernel/k_priority_queue.h449
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp784
-rw-r--r--src/core/hle/kernel/k_scheduler.h201
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h74
-rw-r--r--src/core/hle/kernel/k_scoped_lock.h41
-rw-r--r--src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h50
-rw-r--r--src/core/hle/kernel/kernel.cpp63
-rw-r--r--src/core/hle/kernel/kernel.h17
-rw-r--r--src/core/hle/kernel/mutex.cpp12
-rw-r--r--src/core/hle/kernel/physical_core.cpp8
-rw-r--r--src/core/hle/kernel/physical_core.h13
-rw-r--r--src/core/hle/kernel/process.cpp14
-rw-r--r--src/core/hle/kernel/process.h13
-rw-r--r--src/core/hle/kernel/readable_event.cpp4
-rw-r--r--src/core/hle/kernel/scheduler.cpp819
-rw-r--r--src/core/hle/kernel/scheduler.h320
-rw-r--r--src/core/hle/kernel/server_session.cpp4
-rw-r--r--src/core/hle/kernel/svc.cpp78
-rw-r--r--src/core/hle/kernel/synchronization.cpp11
-rw-r--r--src/core/hle/kernel/thread.cpp79
-rw-r--r--src/core/hle/kernel/thread.h114
-rw-r--r--src/core/hle/kernel/time_manager.cpp17
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue.cpp114
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue.h3
-rw-r--r--src/core/hle/service/time/time.cpp2
-rw-r--r--src/core/hle/service/vi/vi.cpp16
-rw-r--r--src/core/settings.cpp2
-rw-r--r--src/tests/CMakeLists.txt1
-rw-r--r--src/tests/common/multi_level_queue.cpp55
-rw-r--r--src/yuzu/configuration/config.cpp4
-rw-r--r--src/yuzu/configuration/configure_debug.cpp5
-rw-r--r--src/yuzu/configuration/configure_debug.ui74
-rw-r--r--src/yuzu/debugger/wait_tree.cpp10
-rw-r--r--src/yuzu_cmd/config.cpp3
-rw-r--r--src/yuzu_cmd/default_ini.h3
-rw-r--r--src/yuzu_cmd/yuzu.cpp24
-rw-r--r--src/yuzu_tester/config.cpp1
-rw-r--r--src/yuzu_tester/yuzu.cpp1
53 files changed, 2286 insertions, 2052 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 0acf70a0a..943ff996e 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -104,6 +104,7 @@ add_library(common STATIC
104 detached_tasks.h 104 detached_tasks.h
105 bit_cast.h 105 bit_cast.h
106 bit_field.h 106 bit_field.h
107 bit_set.h
107 bit_util.h 108 bit_util.h
108 cityhash.cpp 109 cityhash.cpp
109 cityhash.h 110 cityhash.h
@@ -140,7 +141,6 @@ add_library(common STATIC
140 microprofile.h 141 microprofile.h
141 microprofileui.h 142 microprofileui.h
142 misc.cpp 143 misc.cpp
143 multi_level_queue.h
144 page_table.cpp 144 page_table.cpp
145 page_table.h 145 page_table.h
146 param_package.cpp 146 param_package.cpp
diff --git a/src/common/bit_set.h b/src/common/bit_set.h
new file mode 100644
index 000000000..9235ad412
--- /dev/null
+++ b/src/common/bit_set.h
@@ -0,0 +1,99 @@
1/*
2 * Copyright (c) 2018-2020 Atmosphère-NX
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#pragma once
18
19#include <array>
20#include <bit>
21
22#include "common/alignment.h"
23#include "common/bit_util.h"
24#include "common/common_types.h"
25
26namespace Common {
27
28namespace impl {
29
30template <typename Storage, size_t N>
31class BitSet {
32
33public:
34 constexpr BitSet() = default;
35
36 constexpr void SetBit(size_t i) {
37 this->words[i / FlagsPerWord] |= GetBitMask(i % FlagsPerWord);
38 }
39
40 constexpr void ClearBit(size_t i) {
41 this->words[i / FlagsPerWord] &= ~GetBitMask(i % FlagsPerWord);
42 }
43
44 constexpr size_t CountLeadingZero() const {
45 for (size_t i = 0; i < NumWords; i++) {
46 if (this->words[i]) {
47 return FlagsPerWord * i + CountLeadingZeroImpl(this->words[i]);
48 }
49 }
50 return FlagsPerWord * NumWords;
51 }
52
53 constexpr size_t GetNextSet(size_t n) const {
54 for (size_t i = (n + 1) / FlagsPerWord; i < NumWords; i++) {
55 Storage word = this->words[i];
56 if (!IsAligned(n + 1, FlagsPerWord)) {
57 word &= GetBitMask(n % FlagsPerWord) - 1;
58 }
59 if (word) {
60 return FlagsPerWord * i + CountLeadingZeroImpl(word);
61 }
62 }
63 return FlagsPerWord * NumWords;
64 }
65
66private:
67 static_assert(std::is_unsigned_v<Storage>);
68 static_assert(sizeof(Storage) <= sizeof(u64));
69
70 static constexpr size_t FlagsPerWord = BitSize<Storage>();
71 static constexpr size_t NumWords = AlignUp(N, FlagsPerWord) / FlagsPerWord;
72
73 static constexpr auto CountLeadingZeroImpl(Storage word) {
74 return std::countl_zero(static_cast<unsigned long long>(word)) -
75 (BitSize<unsigned long long>() - FlagsPerWord);
76 }
77
78 static constexpr Storage GetBitMask(size_t bit) {
79 return Storage(1) << (FlagsPerWord - 1 - bit);
80 }
81
82 std::array<Storage, NumWords> words{};
83};
84
85} // namespace impl
86
87template <size_t N>
88using BitSet8 = impl::BitSet<u8, N>;
89
90template <size_t N>
91using BitSet16 = impl::BitSet<u16, N>;
92
93template <size_t N>
94using BitSet32 = impl::BitSet<u32, N>;
95
96template <size_t N>
97using BitSet64 = impl::BitSet<u64, N>;
98
99} // namespace Common
diff --git a/src/common/multi_level_queue.h b/src/common/multi_level_queue.h
deleted file mode 100644
index 4b305bf40..000000000
--- a/src/common/multi_level_queue.h
+++ /dev/null
@@ -1,345 +0,0 @@
1// Copyright 2019 TuxSH
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <iterator>
9#include <list>
10#include <utility>
11
12#include "common/bit_util.h"
13#include "common/common_types.h"
14
15namespace Common {
16
17/**
18 * A MultiLevelQueue is a type of priority queue which has the following characteristics:
19 * - iteratable through each of its elements.
20 * - back can be obtained.
21 * - O(1) add, lookup (both front and back)
22 * - discrete priorities and a max of 64 priorities (limited domain)
23 * This type of priority queue is normaly used for managing threads within an scheduler
24 */
25template <typename T, std::size_t Depth>
26class MultiLevelQueue {
27public:
28 using value_type = T;
29 using reference = value_type&;
30 using const_reference = const value_type&;
31 using pointer = value_type*;
32 using const_pointer = const value_type*;
33
34 using difference_type = typename std::pointer_traits<pointer>::difference_type;
35 using size_type = std::size_t;
36
37 template <bool is_constant>
38 class iterator_impl {
39 public:
40 using iterator_category = std::bidirectional_iterator_tag;
41 using value_type = T;
42 using pointer = std::conditional_t<is_constant, T*, const T*>;
43 using reference = std::conditional_t<is_constant, const T&, T&>;
44 using difference_type = typename std::pointer_traits<pointer>::difference_type;
45
46 friend bool operator==(const iterator_impl& lhs, const iterator_impl& rhs) {
47 if (lhs.IsEnd() && rhs.IsEnd())
48 return true;
49 return std::tie(lhs.current_priority, lhs.it) == std::tie(rhs.current_priority, rhs.it);
50 }
51
52 friend bool operator!=(const iterator_impl& lhs, const iterator_impl& rhs) {
53 return !operator==(lhs, rhs);
54 }
55
56 reference operator*() const {
57 return *it;
58 }
59
60 pointer operator->() const {
61 return it.operator->();
62 }
63
64 iterator_impl& operator++() {
65 if (IsEnd()) {
66 return *this;
67 }
68
69 ++it;
70
71 if (it == GetEndItForPrio()) {
72 u64 prios = mlq.used_priorities;
73 prios &= ~((1ULL << (current_priority + 1)) - 1);
74 if (prios == 0) {
75 current_priority = static_cast<u32>(mlq.depth());
76 } else {
77 current_priority = CountTrailingZeroes64(prios);
78 it = GetBeginItForPrio();
79 }
80 }
81 return *this;
82 }
83
84 iterator_impl& operator--() {
85 if (IsEnd()) {
86 if (mlq.used_priorities != 0) {
87 current_priority = 63 - CountLeadingZeroes64(mlq.used_priorities);
88 it = GetEndItForPrio();
89 --it;
90 }
91 } else if (it == GetBeginItForPrio()) {
92 u64 prios = mlq.used_priorities;
93 prios &= (1ULL << current_priority) - 1;
94 if (prios != 0) {
95 current_priority = CountTrailingZeroes64(prios);
96 it = GetEndItForPrio();
97 --it;
98 }
99 } else {
100 --it;
101 }
102 return *this;
103 }
104
105 iterator_impl operator++(int) {
106 const iterator_impl v{*this};
107 ++(*this);
108 return v;
109 }
110
111 iterator_impl operator--(int) {
112 const iterator_impl v{*this};
113 --(*this);
114 return v;
115 }
116
117 // allow implicit const->non-const
118 iterator_impl(const iterator_impl<false>& other)
119 : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
120
121 iterator_impl(const iterator_impl<true>& other)
122 : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
123
124 iterator_impl& operator=(const iterator_impl<false>& other) {
125 mlq = other.mlq;
126 it = other.it;
127 current_priority = other.current_priority;
128 return *this;
129 }
130
131 friend class iterator_impl<true>;
132 iterator_impl() = default;
133
134 private:
135 friend class MultiLevelQueue;
136 using container_ref =
137 std::conditional_t<is_constant, const MultiLevelQueue&, MultiLevelQueue&>;
138 using list_iterator = std::conditional_t<is_constant, typename std::list<T>::const_iterator,
139 typename std::list<T>::iterator>;
140
141 explicit iterator_impl(container_ref mlq, list_iterator it, u32 current_priority)
142 : mlq(mlq), it(it), current_priority(current_priority) {}
143 explicit iterator_impl(container_ref mlq, u32 current_priority)
144 : mlq(mlq), it(), current_priority(current_priority) {}
145
146 bool IsEnd() const {
147 return current_priority == mlq.depth();
148 }
149
150 list_iterator GetBeginItForPrio() const {
151 return mlq.levels[current_priority].begin();
152 }
153
154 list_iterator GetEndItForPrio() const {
155 return mlq.levels[current_priority].end();
156 }
157
158 container_ref mlq;
159 list_iterator it;
160 u32 current_priority;
161 };
162
163 using iterator = iterator_impl<false>;
164 using const_iterator = iterator_impl<true>;
165
166 void add(const T& element, u32 priority, bool send_back = true) {
167 if (send_back)
168 levels[priority].push_back(element);
169 else
170 levels[priority].push_front(element);
171 used_priorities |= 1ULL << priority;
172 }
173
174 void remove(const T& element, u32 priority) {
175 auto it = ListIterateTo(levels[priority], element);
176 if (it == levels[priority].end())
177 return;
178 levels[priority].erase(it);
179 if (levels[priority].empty()) {
180 used_priorities &= ~(1ULL << priority);
181 }
182 }
183
184 void adjust(const T& element, u32 old_priority, u32 new_priority, bool adjust_front = false) {
185 remove(element, old_priority);
186 add(element, new_priority, !adjust_front);
187 }
188 void adjust(const_iterator it, u32 old_priority, u32 new_priority, bool adjust_front = false) {
189 adjust(*it, old_priority, new_priority, adjust_front);
190 }
191
192 void transfer_to_front(const T& element, u32 priority, MultiLevelQueue& other) {
193 ListSplice(other.levels[priority], other.levels[priority].begin(), levels[priority],
194 ListIterateTo(levels[priority], element));
195
196 other.used_priorities |= 1ULL << priority;
197
198 if (levels[priority].empty()) {
199 used_priorities &= ~(1ULL << priority);
200 }
201 }
202
203 void transfer_to_front(const_iterator it, u32 priority, MultiLevelQueue& other) {
204 transfer_to_front(*it, priority, other);
205 }
206
207 void transfer_to_back(const T& element, u32 priority, MultiLevelQueue& other) {
208 ListSplice(other.levels[priority], other.levels[priority].end(), levels[priority],
209 ListIterateTo(levels[priority], element));
210
211 other.used_priorities |= 1ULL << priority;
212
213 if (levels[priority].empty()) {
214 used_priorities &= ~(1ULL << priority);
215 }
216 }
217
218 void transfer_to_back(const_iterator it, u32 priority, MultiLevelQueue& other) {
219 transfer_to_back(*it, priority, other);
220 }
221
222 void yield(u32 priority, std::size_t n = 1) {
223 ListShiftForward(levels[priority], n);
224 }
225
226 [[nodiscard]] std::size_t depth() const {
227 return Depth;
228 }
229
230 [[nodiscard]] std::size_t size(u32 priority) const {
231 return levels[priority].size();
232 }
233
234 [[nodiscard]] std::size_t size() const {
235 u64 priorities = used_priorities;
236 std::size_t size = 0;
237 while (priorities != 0) {
238 const u64 current_priority = CountTrailingZeroes64(priorities);
239 size += levels[current_priority].size();
240 priorities &= ~(1ULL << current_priority);
241 }
242 return size;
243 }
244
245 [[nodiscard]] bool empty() const {
246 return used_priorities == 0;
247 }
248
249 [[nodiscard]] bool empty(u32 priority) const {
250 return (used_priorities & (1ULL << priority)) == 0;
251 }
252
253 [[nodiscard]] u32 highest_priority_set(u32 max_priority = 0) const {
254 const u64 priorities =
255 max_priority == 0 ? used_priorities : (used_priorities & ~((1ULL << max_priority) - 1));
256 return priorities == 0 ? Depth : static_cast<u32>(CountTrailingZeroes64(priorities));
257 }
258
259 [[nodiscard]] u32 lowest_priority_set(u32 min_priority = Depth - 1) const {
260 const u64 priorities = min_priority >= Depth - 1
261 ? used_priorities
262 : (used_priorities & ((1ULL << (min_priority + 1)) - 1));
263 return priorities == 0 ? Depth : 63 - CountLeadingZeroes64(priorities);
264 }
265
266 [[nodiscard]] const_iterator cbegin(u32 max_prio = 0) const {
267 const u32 priority = highest_priority_set(max_prio);
268 return priority == Depth ? cend()
269 : const_iterator{*this, levels[priority].cbegin(), priority};
270 }
271 [[nodiscard]] const_iterator begin(u32 max_prio = 0) const {
272 return cbegin(max_prio);
273 }
274 [[nodiscard]] iterator begin(u32 max_prio = 0) {
275 const u32 priority = highest_priority_set(max_prio);
276 return priority == Depth ? end() : iterator{*this, levels[priority].begin(), priority};
277 }
278
279 [[nodiscard]] const_iterator cend(u32 min_prio = Depth - 1) const {
280 return min_prio == Depth - 1 ? const_iterator{*this, Depth} : cbegin(min_prio + 1);
281 }
282 [[nodiscard]] const_iterator end(u32 min_prio = Depth - 1) const {
283 return cend(min_prio);
284 }
285 [[nodiscard]] iterator end(u32 min_prio = Depth - 1) {
286 return min_prio == Depth - 1 ? iterator{*this, Depth} : begin(min_prio + 1);
287 }
288
289 [[nodiscard]] T& front(u32 max_priority = 0) {
290 const u32 priority = highest_priority_set(max_priority);
291 return levels[priority == Depth ? 0 : priority].front();
292 }
293 [[nodiscard]] const T& front(u32 max_priority = 0) const {
294 const u32 priority = highest_priority_set(max_priority);
295 return levels[priority == Depth ? 0 : priority].front();
296 }
297
298 [[nodiscard]] T& back(u32 min_priority = Depth - 1) {
299 const u32 priority = lowest_priority_set(min_priority); // intended
300 return levels[priority == Depth ? 63 : priority].back();
301 }
302 [[nodiscard]] const T& back(u32 min_priority = Depth - 1) const {
303 const u32 priority = lowest_priority_set(min_priority); // intended
304 return levels[priority == Depth ? 63 : priority].back();
305 }
306
307 void clear() {
308 used_priorities = 0;
309 for (std::size_t i = 0; i < Depth; i++) {
310 levels[i].clear();
311 }
312 }
313
314private:
315 using const_list_iterator = typename std::list<T>::const_iterator;
316
317 static void ListShiftForward(std::list<T>& list, const std::size_t shift = 1) {
318 if (shift >= list.size()) {
319 return;
320 }
321
322 const auto begin_range = list.begin();
323 const auto end_range = std::next(begin_range, shift);
324 list.splice(list.end(), list, begin_range, end_range);
325 }
326
327 static void ListSplice(std::list<T>& in_list, const_list_iterator position,
328 std::list<T>& out_list, const_list_iterator element) {
329 in_list.splice(position, out_list, element);
330 }
331
332 [[nodiscard]] static const_list_iterator ListIterateTo(const std::list<T>& list,
333 const T& element) {
334 auto it = list.cbegin();
335 while (it != list.cend() && *it != element) {
336 ++it;
337 }
338 return it;
339 }
340
341 std::array<std::list<T>, Depth> levels;
342 u64 used_priorities = 0;
343};
344
345} // namespace Common
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 56c165336..59bd3d2a6 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -151,10 +151,19 @@ add_library(core STATIC
151 hle/kernel/code_set.cpp 151 hle/kernel/code_set.cpp
152 hle/kernel/code_set.h 152 hle/kernel/code_set.h
153 hle/kernel/errors.h 153 hle/kernel/errors.h
154 hle/kernel/global_scheduler_context.cpp
155 hle/kernel/global_scheduler_context.h
154 hle/kernel/handle_table.cpp 156 hle/kernel/handle_table.cpp
155 hle/kernel/handle_table.h 157 hle/kernel/handle_table.h
156 hle/kernel/hle_ipc.cpp 158 hle/kernel/hle_ipc.cpp
157 hle/kernel/hle_ipc.h 159 hle/kernel/hle_ipc.h
160 hle/kernel/k_affinity_mask.h
161 hle/kernel/k_priority_queue.h
162 hle/kernel/k_scheduler.cpp
163 hle/kernel/k_scheduler.h
164 hle/kernel/k_scheduler_lock.h
165 hle/kernel/k_scoped_lock.h
166 hle/kernel/k_scoped_scheduler_lock_and_sleep.h
158 hle/kernel/kernel.cpp 167 hle/kernel/kernel.cpp
159 hle/kernel/kernel.h 168 hle/kernel/kernel.h
160 hle/kernel/memory/address_space_info.cpp 169 hle/kernel/memory/address_space_info.cpp
@@ -189,8 +198,6 @@ add_library(core STATIC
189 hle/kernel/readable_event.h 198 hle/kernel/readable_event.h
190 hle/kernel/resource_limit.cpp 199 hle/kernel/resource_limit.cpp
191 hle/kernel/resource_limit.h 200 hle/kernel/resource_limit.h
192 hle/kernel/scheduler.cpp
193 hle/kernel/scheduler.h
194 hle/kernel/server_port.cpp 201 hle/kernel/server_port.cpp
195 hle/kernel/server_port.h 202 hle/kernel/server_port.h
196 hle/kernel/server_session.cpp 203 hle/kernel/server_session.cpp
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index 193fd7d62..e9c74b1a6 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -294,6 +294,9 @@ void ARM_Dynarmic_32::InvalidateCacheRange(VAddr addr, std::size_t size) {
294} 294}
295 295
296void ARM_Dynarmic_32::ClearExclusiveState() { 296void ARM_Dynarmic_32::ClearExclusiveState() {
297 if (!jit) {
298 return;
299 }
297 jit->ClearExclusiveState(); 300 jit->ClearExclusiveState();
298} 301}
299 302
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 0f0585d0f..7a4eb88a2 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -15,8 +15,8 @@
15#include "core/core.h" 15#include "core/core.h"
16#include "core/core_timing.h" 16#include "core/core_timing.h"
17#include "core/hardware_properties.h" 17#include "core/hardware_properties.h"
18#include "core/hle/kernel/k_scheduler.h"
18#include "core/hle/kernel/process.h" 19#include "core/hle/kernel/process.h"
19#include "core/hle/kernel/scheduler.h"
20#include "core/hle/kernel/svc.h" 20#include "core/hle/kernel/svc.h"
21#include "core/memory.h" 21#include "core/memory.h"
22#include "core/settings.h" 22#include "core/settings.h"
@@ -330,6 +330,9 @@ void ARM_Dynarmic_64::InvalidateCacheRange(VAddr addr, std::size_t size) {
330} 330}
331 331
332void ARM_Dynarmic_64::ClearExclusiveState() { 332void ARM_Dynarmic_64::ClearExclusiveState() {
333 if (!jit) {
334 return;
335 }
333 jit->ClearExclusiveState(); 336 jit->ClearExclusiveState();
334} 337}
335 338
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 7e3c54618..0961c0819 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -27,10 +27,10 @@
27#include "core/file_sys/vfs_real.h" 27#include "core/file_sys/vfs_real.h"
28#include "core/hardware_interrupt_manager.h" 28#include "core/hardware_interrupt_manager.h"
29#include "core/hle/kernel/client_port.h" 29#include "core/hle/kernel/client_port.h"
30#include "core/hle/kernel/k_scheduler.h"
30#include "core/hle/kernel/kernel.h" 31#include "core/hle/kernel/kernel.h"
31#include "core/hle/kernel/physical_core.h" 32#include "core/hle/kernel/physical_core.h"
32#include "core/hle/kernel/process.h" 33#include "core/hle/kernel/process.h"
33#include "core/hle/kernel/scheduler.h"
34#include "core/hle/kernel/thread.h" 34#include "core/hle/kernel/thread.h"
35#include "core/hle/service/am/applets/applets.h" 35#include "core/hle/service/am/applets/applets.h"
36#include "core/hle/service/apm/controller.h" 36#include "core/hle/service/apm/controller.h"
@@ -507,14 +507,6 @@ std::size_t System::CurrentCoreIndex() const {
507 return core; 507 return core;
508} 508}
509 509
510Kernel::Scheduler& System::CurrentScheduler() {
511 return impl->kernel.CurrentScheduler();
512}
513
514const Kernel::Scheduler& System::CurrentScheduler() const {
515 return impl->kernel.CurrentScheduler();
516}
517
518Kernel::PhysicalCore& System::CurrentPhysicalCore() { 510Kernel::PhysicalCore& System::CurrentPhysicalCore() {
519 return impl->kernel.CurrentPhysicalCore(); 511 return impl->kernel.CurrentPhysicalCore();
520} 512}
@@ -523,22 +515,14 @@ const Kernel::PhysicalCore& System::CurrentPhysicalCore() const {
523 return impl->kernel.CurrentPhysicalCore(); 515 return impl->kernel.CurrentPhysicalCore();
524} 516}
525 517
526Kernel::Scheduler& System::Scheduler(std::size_t core_index) {
527 return impl->kernel.Scheduler(core_index);
528}
529
530const Kernel::Scheduler& System::Scheduler(std::size_t core_index) const {
531 return impl->kernel.Scheduler(core_index);
532}
533
534/// Gets the global scheduler 518/// Gets the global scheduler
535Kernel::GlobalScheduler& System::GlobalScheduler() { 519Kernel::GlobalSchedulerContext& System::GlobalSchedulerContext() {
536 return impl->kernel.GlobalScheduler(); 520 return impl->kernel.GlobalSchedulerContext();
537} 521}
538 522
539/// Gets the global scheduler 523/// Gets the global scheduler
540const Kernel::GlobalScheduler& System::GlobalScheduler() const { 524const Kernel::GlobalSchedulerContext& System::GlobalSchedulerContext() const {
541 return impl->kernel.GlobalScheduler(); 525 return impl->kernel.GlobalSchedulerContext();
542} 526}
543 527
544Kernel::Process* System::CurrentProcess() { 528Kernel::Process* System::CurrentProcess() {
diff --git a/src/core/core.h b/src/core/core.h
index 29b8fb92a..579a774e4 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -26,11 +26,11 @@ class VfsFilesystem;
26} // namespace FileSys 26} // namespace FileSys
27 27
28namespace Kernel { 28namespace Kernel {
29class GlobalScheduler; 29class GlobalSchedulerContext;
30class KernelCore; 30class KernelCore;
31class PhysicalCore; 31class PhysicalCore;
32class Process; 32class Process;
33class Scheduler; 33class KScheduler;
34} // namespace Kernel 34} // namespace Kernel
35 35
36namespace Loader { 36namespace Loader {
@@ -213,12 +213,6 @@ public:
213 /// Gets the index of the currently running CPU core 213 /// Gets the index of the currently running CPU core
214 [[nodiscard]] std::size_t CurrentCoreIndex() const; 214 [[nodiscard]] std::size_t CurrentCoreIndex() const;
215 215
216 /// Gets the scheduler for the CPU core that is currently running
217 [[nodiscard]] Kernel::Scheduler& CurrentScheduler();
218
219 /// Gets the scheduler for the CPU core that is currently running
220 [[nodiscard]] const Kernel::Scheduler& CurrentScheduler() const;
221
222 /// Gets the physical core for the CPU core that is currently running 216 /// Gets the physical core for the CPU core that is currently running
223 [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore(); 217 [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore();
224 218
@@ -261,17 +255,11 @@ public:
261 /// Gets an immutable reference to the renderer. 255 /// Gets an immutable reference to the renderer.
262 [[nodiscard]] const VideoCore::RendererBase& Renderer() const; 256 [[nodiscard]] const VideoCore::RendererBase& Renderer() const;
263 257
264 /// Gets the scheduler for the CPU core with the specified index
265 [[nodiscard]] Kernel::Scheduler& Scheduler(std::size_t core_index);
266
267 /// Gets the scheduler for the CPU core with the specified index
268 [[nodiscard]] const Kernel::Scheduler& Scheduler(std::size_t core_index) const;
269
270 /// Gets the global scheduler 258 /// Gets the global scheduler
271 [[nodiscard]] Kernel::GlobalScheduler& GlobalScheduler(); 259 [[nodiscard]] Kernel::GlobalSchedulerContext& GlobalSchedulerContext();
272 260
273 /// Gets the global scheduler 261 /// Gets the global scheduler
274 [[nodiscard]] const Kernel::GlobalScheduler& GlobalScheduler() const; 262 [[nodiscard]] const Kernel::GlobalSchedulerContext& GlobalSchedulerContext() const;
275 263
276 /// Gets the manager for the guest device memory 264 /// Gets the manager for the guest device memory
277 [[nodiscard]] Core::DeviceMemory& DeviceMemory(); 265 [[nodiscard]] Core::DeviceMemory& DeviceMemory();
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp
index 0cff985e9..373395047 100644
--- a/src/core/cpu_manager.cpp
+++ b/src/core/cpu_manager.cpp
@@ -10,9 +10,9 @@
10#include "core/core.h" 10#include "core/core.h"
11#include "core/core_timing.h" 11#include "core/core_timing.h"
12#include "core/cpu_manager.h" 12#include "core/cpu_manager.h"
13#include "core/hle/kernel/k_scheduler.h"
13#include "core/hle/kernel/kernel.h" 14#include "core/hle/kernel/kernel.h"
14#include "core/hle/kernel/physical_core.h" 15#include "core/hle/kernel/physical_core.h"
15#include "core/hle/kernel/scheduler.h"
16#include "core/hle/kernel/thread.h" 16#include "core/hle/kernel/thread.h"
17#include "video_core/gpu.h" 17#include "video_core/gpu.h"
18 18
@@ -109,11 +109,8 @@ void* CpuManager::GetStartFuncParamater() {
109 109
110void CpuManager::MultiCoreRunGuestThread() { 110void CpuManager::MultiCoreRunGuestThread() {
111 auto& kernel = system.Kernel(); 111 auto& kernel = system.Kernel();
112 { 112 kernel.CurrentScheduler()->OnThreadStart();
113 auto& sched = kernel.CurrentScheduler(); 113 auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
114 sched.OnThreadStart();
115 }
116 auto* thread = kernel.CurrentScheduler().GetCurrentThread();
117 auto& host_context = thread->GetHostContext(); 114 auto& host_context = thread->GetHostContext();
118 host_context->SetRewindPoint(GuestRewindFunction, this); 115 host_context->SetRewindPoint(GuestRewindFunction, this);
119 MultiCoreRunGuestLoop(); 116 MultiCoreRunGuestLoop();
@@ -130,8 +127,8 @@ void CpuManager::MultiCoreRunGuestLoop() {
130 physical_core = &kernel.CurrentPhysicalCore(); 127 physical_core = &kernel.CurrentPhysicalCore();
131 } 128 }
132 system.ExitDynarmicProfile(); 129 system.ExitDynarmicProfile();
133 auto& scheduler = kernel.CurrentScheduler(); 130 physical_core->ArmInterface().ClearExclusiveState();
134 scheduler.TryDoContextSwitch(); 131 kernel.CurrentScheduler()->RescheduleCurrentCore();
135 } 132 }
136} 133}
137 134
@@ -140,25 +137,21 @@ void CpuManager::MultiCoreRunIdleThread() {
140 while (true) { 137 while (true) {
141 auto& physical_core = kernel.CurrentPhysicalCore(); 138 auto& physical_core = kernel.CurrentPhysicalCore();
142 physical_core.Idle(); 139 physical_core.Idle();
143 auto& scheduler = kernel.CurrentScheduler(); 140 kernel.CurrentScheduler()->RescheduleCurrentCore();
144 scheduler.TryDoContextSwitch();
145 } 141 }
146} 142}
147 143
148void CpuManager::MultiCoreRunSuspendThread() { 144void CpuManager::MultiCoreRunSuspendThread() {
149 auto& kernel = system.Kernel(); 145 auto& kernel = system.Kernel();
150 { 146 kernel.CurrentScheduler()->OnThreadStart();
151 auto& sched = kernel.CurrentScheduler();
152 sched.OnThreadStart();
153 }
154 while (true) { 147 while (true) {
155 auto core = kernel.GetCurrentHostThreadID(); 148 auto core = kernel.GetCurrentHostThreadID();
156 auto& scheduler = kernel.CurrentScheduler(); 149 auto& scheduler = *kernel.CurrentScheduler();
157 Kernel::Thread* current_thread = scheduler.GetCurrentThread(); 150 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
158 Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context); 151 Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[core].host_context);
159 ASSERT(scheduler.ContextSwitchPending()); 152 ASSERT(scheduler.ContextSwitchPending());
160 ASSERT(core == kernel.GetCurrentHostThreadID()); 153 ASSERT(core == kernel.GetCurrentHostThreadID());
161 scheduler.TryDoContextSwitch(); 154 scheduler.RescheduleCurrentCore();
162 } 155 }
163} 156}
164 157
@@ -206,11 +199,8 @@ void CpuManager::MultiCorePause(bool paused) {
206 199
207void CpuManager::SingleCoreRunGuestThread() { 200void CpuManager::SingleCoreRunGuestThread() {
208 auto& kernel = system.Kernel(); 201 auto& kernel = system.Kernel();
209 { 202 kernel.CurrentScheduler()->OnThreadStart();
210 auto& sched = kernel.CurrentScheduler(); 203 auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
211 sched.OnThreadStart();
212 }
213 auto* thread = kernel.CurrentScheduler().GetCurrentThread();
214 auto& host_context = thread->GetHostContext(); 204 auto& host_context = thread->GetHostContext();
215 host_context->SetRewindPoint(GuestRewindFunction, this); 205 host_context->SetRewindPoint(GuestRewindFunction, this);
216 SingleCoreRunGuestLoop(); 206 SingleCoreRunGuestLoop();
@@ -218,7 +208,7 @@ void CpuManager::SingleCoreRunGuestThread() {
218 208
219void CpuManager::SingleCoreRunGuestLoop() { 209void CpuManager::SingleCoreRunGuestLoop() {
220 auto& kernel = system.Kernel(); 210 auto& kernel = system.Kernel();
221 auto* thread = kernel.CurrentScheduler().GetCurrentThread(); 211 auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
222 while (true) { 212 while (true) {
223 auto* physical_core = &kernel.CurrentPhysicalCore(); 213 auto* physical_core = &kernel.CurrentPhysicalCore();
224 system.EnterDynarmicProfile(); 214 system.EnterDynarmicProfile();
@@ -230,9 +220,10 @@ void CpuManager::SingleCoreRunGuestLoop() {
230 thread->SetPhantomMode(true); 220 thread->SetPhantomMode(true);
231 system.CoreTiming().Advance(); 221 system.CoreTiming().Advance();
232 thread->SetPhantomMode(false); 222 thread->SetPhantomMode(false);
223 physical_core->ArmInterface().ClearExclusiveState();
233 PreemptSingleCore(); 224 PreemptSingleCore();
234 auto& scheduler = kernel.Scheduler(current_core); 225 auto& scheduler = kernel.Scheduler(current_core);
235 scheduler.TryDoContextSwitch(); 226 scheduler.RescheduleCurrentCore();
236 } 227 }
237} 228}
238 229
@@ -244,51 +235,53 @@ void CpuManager::SingleCoreRunIdleThread() {
244 system.CoreTiming().AddTicks(1000U); 235 system.CoreTiming().AddTicks(1000U);
245 idle_count++; 236 idle_count++;
246 auto& scheduler = physical_core.Scheduler(); 237 auto& scheduler = physical_core.Scheduler();
247 scheduler.TryDoContextSwitch(); 238 scheduler.RescheduleCurrentCore();
248 } 239 }
249} 240}
250 241
251void CpuManager::SingleCoreRunSuspendThread() { 242void CpuManager::SingleCoreRunSuspendThread() {
252 auto& kernel = system.Kernel(); 243 auto& kernel = system.Kernel();
253 { 244 kernel.CurrentScheduler()->OnThreadStart();
254 auto& sched = kernel.CurrentScheduler();
255 sched.OnThreadStart();
256 }
257 while (true) { 245 while (true) {
258 auto core = kernel.GetCurrentHostThreadID(); 246 auto core = kernel.GetCurrentHostThreadID();
259 auto& scheduler = kernel.CurrentScheduler(); 247 auto& scheduler = *kernel.CurrentScheduler();
260 Kernel::Thread* current_thread = scheduler.GetCurrentThread(); 248 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
261 Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context); 249 Common::Fiber::YieldTo(current_thread->GetHostContext(), core_data[0].host_context);
262 ASSERT(scheduler.ContextSwitchPending()); 250 ASSERT(scheduler.ContextSwitchPending());
263 ASSERT(core == kernel.GetCurrentHostThreadID()); 251 ASSERT(core == kernel.GetCurrentHostThreadID());
264 scheduler.TryDoContextSwitch(); 252 scheduler.RescheduleCurrentCore();
265 } 253 }
266} 254}
267 255
268void CpuManager::PreemptSingleCore(bool from_running_enviroment) { 256void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
269 std::size_t old_core = current_core; 257 {
270 auto& scheduler = system.Kernel().Scheduler(old_core); 258 auto& scheduler = system.Kernel().Scheduler(current_core);
271 Kernel::Thread* current_thread = scheduler.GetCurrentThread(); 259 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
272 if (idle_count >= 4 || from_running_enviroment) { 260 if (idle_count >= 4 || from_running_enviroment) {
273 if (!from_running_enviroment) { 261 if (!from_running_enviroment) {
274 system.CoreTiming().Idle(); 262 system.CoreTiming().Idle();
275 idle_count = 0; 263 idle_count = 0;
264 }
265 current_thread->SetPhantomMode(true);
266 system.CoreTiming().Advance();
267 current_thread->SetPhantomMode(false);
276 } 268 }
277 current_thread->SetPhantomMode(true); 269 current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
278 system.CoreTiming().Advance(); 270 system.CoreTiming().ResetTicks();
279 current_thread->SetPhantomMode(false); 271 scheduler.Unload(scheduler.GetCurrentThread());
272
273 auto& next_scheduler = system.Kernel().Scheduler(current_core);
274 Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext());
280 } 275 }
281 current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES); 276
282 system.CoreTiming().ResetTicks(); 277 // May have changed scheduler
283 scheduler.Unload(); 278 {
284 auto& next_scheduler = system.Kernel().Scheduler(current_core); 279 auto& scheduler = system.Kernel().Scheduler(current_core);
285 Common::Fiber::YieldTo(current_thread->GetHostContext(), next_scheduler.ControlContext()); 280 scheduler.Reload(scheduler.GetCurrentThread());
286 /// May have changed scheduler 281 auto* currrent_thread2 = scheduler.GetCurrentThread();
287 auto& current_scheduler = system.Kernel().Scheduler(current_core); 282 if (!currrent_thread2->IsIdleThread()) {
288 current_scheduler.Reload(); 283 idle_count = 0;
289 auto* currrent_thread2 = current_scheduler.GetCurrentThread(); 284 }
290 if (!currrent_thread2->IsIdleThread()) {
291 idle_count = 0;
292 } 285 }
293} 286}
294 287
@@ -369,8 +362,7 @@ void CpuManager::RunThread(std::size_t core) {
369 return; 362 return;
370 } 363 }
371 364
372 auto& scheduler = system.Kernel().CurrentScheduler(); 365 auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
373 Kernel::Thread* current_thread = scheduler.GetCurrentThread();
374 data.is_running = true; 366 data.is_running = true;
375 Common::Fiber::YieldTo(data.host_context, current_thread->GetHostContext()); 367 Common::Fiber::YieldTo(data.host_context, current_thread->GetHostContext());
376 data.is_running = false; 368 data.is_running = false;
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index 048acd30e..20ffa7d47 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -12,8 +12,9 @@
12#include "core/hle/kernel/address_arbiter.h" 12#include "core/hle/kernel/address_arbiter.h"
13#include "core/hle/kernel/errors.h" 13#include "core/hle/kernel/errors.h"
14#include "core/hle/kernel/handle_table.h" 14#include "core/hle/kernel/handle_table.h"
15#include "core/hle/kernel/k_scheduler.h"
16#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
15#include "core/hle/kernel/kernel.h" 17#include "core/hle/kernel/kernel.h"
16#include "core/hle/kernel/scheduler.h"
17#include "core/hle/kernel/thread.h" 18#include "core/hle/kernel/thread.h"
18#include "core/hle/kernel/time_manager.h" 19#include "core/hle/kernel/time_manager.h"
19#include "core/hle/result.h" 20#include "core/hle/result.h"
@@ -58,7 +59,7 @@ ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 v
58} 59}
59 60
60ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) { 61ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
61 SchedulerLock lock(system.Kernel()); 62 KScopedSchedulerLock lock(system.Kernel());
62 const std::vector<std::shared_ptr<Thread>> waiting_threads = 63 const std::vector<std::shared_ptr<Thread>> waiting_threads =
63 GetThreadsWaitingOnAddress(address); 64 GetThreadsWaitingOnAddress(address);
64 WakeThreads(waiting_threads, num_to_wake); 65 WakeThreads(waiting_threads, num_to_wake);
@@ -67,7 +68,7 @@ ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
67 68
68ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, 69ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
69 s32 num_to_wake) { 70 s32 num_to_wake) {
70 SchedulerLock lock(system.Kernel()); 71 KScopedSchedulerLock lock(system.Kernel());
71 auto& memory = system.Memory(); 72 auto& memory = system.Memory();
72 73
73 // Ensure that we can write to the address. 74 // Ensure that we can write to the address.
@@ -92,7 +93,7 @@ ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32
92 93
93ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, 94ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
94 s32 num_to_wake) { 95 s32 num_to_wake) {
95 SchedulerLock lock(system.Kernel()); 96 KScopedSchedulerLock lock(system.Kernel());
96 auto& memory = system.Memory(); 97 auto& memory = system.Memory();
97 98
98 // Ensure that we can write to the address. 99 // Ensure that we can write to the address.
@@ -153,11 +154,11 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
153 bool should_decrement) { 154 bool should_decrement) {
154 auto& memory = system.Memory(); 155 auto& memory = system.Memory();
155 auto& kernel = system.Kernel(); 156 auto& kernel = system.Kernel();
156 Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); 157 Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
157 158
158 Handle event_handle = InvalidHandle; 159 Handle event_handle = InvalidHandle;
159 { 160 {
160 SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); 161 KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
161 162
162 if (current_thread->IsPendingTermination()) { 163 if (current_thread->IsPendingTermination()) {
163 lock.CancelSleep(); 164 lock.CancelSleep();
@@ -210,7 +211,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
210 } 211 }
211 212
212 { 213 {
213 SchedulerLock lock(kernel); 214 KScopedSchedulerLock lock(kernel);
214 if (current_thread->IsWaitingForArbitration()) { 215 if (current_thread->IsWaitingForArbitration()) {
215 RemoveThread(SharedFrom(current_thread)); 216 RemoveThread(SharedFrom(current_thread));
216 current_thread->WaitForArbitration(false); 217 current_thread->WaitForArbitration(false);
@@ -223,11 +224,11 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
223ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { 224ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
224 auto& memory = system.Memory(); 225 auto& memory = system.Memory();
225 auto& kernel = system.Kernel(); 226 auto& kernel = system.Kernel();
226 Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); 227 Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
227 228
228 Handle event_handle = InvalidHandle; 229 Handle event_handle = InvalidHandle;
229 { 230 {
230 SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); 231 KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
231 232
232 if (current_thread->IsPendingTermination()) { 233 if (current_thread->IsPendingTermination()) {
233 lock.CancelSleep(); 234 lock.CancelSleep();
@@ -265,7 +266,7 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t
265 } 266 }
266 267
267 { 268 {
268 SchedulerLock lock(kernel); 269 KScopedSchedulerLock lock(kernel);
269 if (current_thread->IsWaitingForArbitration()) { 270 if (current_thread->IsWaitingForArbitration()) {
270 RemoveThread(SharedFrom(current_thread)); 271 RemoveThread(SharedFrom(current_thread));
271 current_thread->WaitForArbitration(false); 272 current_thread->WaitForArbitration(false);
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
new file mode 100644
index 000000000..a133e8ed0
--- /dev/null
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -0,0 +1,52 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <mutex>
6
7#include "common/assert.h"
8#include "core/core.h"
9#include "core/hle/kernel/global_scheduler_context.h"
10#include "core/hle/kernel/k_scheduler.h"
11#include "core/hle/kernel/kernel.h"
12
13namespace Kernel {
14
15GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
16 : kernel{kernel}, scheduler_lock{kernel} {}
17
18GlobalSchedulerContext::~GlobalSchedulerContext() = default;
19
20void GlobalSchedulerContext::AddThread(std::shared_ptr<Thread> thread) {
21 std::scoped_lock lock{global_list_guard};
22 thread_list.push_back(std::move(thread));
23}
24
25void GlobalSchedulerContext::RemoveThread(std::shared_ptr<Thread> thread) {
26 std::scoped_lock lock{global_list_guard};
27 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
28 thread_list.end());
29}
30
31void GlobalSchedulerContext::PreemptThreads() {
32 // The priority levels at which the global scheduler preempts threads every 10 ms. They are
33 // ordered from Core 0 to Core 3.
34 static constexpr std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities{
35 59,
36 59,
37 59,
38 63,
39 };
40
41 ASSERT(IsLocked());
42 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
43 const u32 priority = preemption_priorities[core_id];
44 kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority);
45 }
46}
47
48bool GlobalSchedulerContext::IsLocked() const {
49 return scheduler_lock.IsLockedByCurrentThread();
50}
51
52} // namespace Kernel
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
new file mode 100644
index 000000000..5c7b89290
--- /dev/null
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -0,0 +1,81 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <atomic>
8#include <vector>
9
10#include "common/common_types.h"
11#include "common/spin_lock.h"
12#include "core/hardware_properties.h"
13#include "core/hle/kernel/k_priority_queue.h"
14#include "core/hle/kernel/k_scheduler_lock.h"
15#include "core/hle/kernel/thread.h"
16
17namespace Kernel {
18
19class KernelCore;
20class SchedulerLock;
21
22using KSchedulerPriorityQueue =
23 KPriorityQueue<Thread, Core::Hardware::NUM_CPU_CORES, THREADPRIO_LOWEST, THREADPRIO_HIGHEST>;
24constexpr s32 HighestCoreMigrationAllowedPriority = 2;
25
26class GlobalSchedulerContext final {
27 friend class KScheduler;
28
29public:
30 using LockType = KAbstractSchedulerLock<KScheduler>;
31
32 explicit GlobalSchedulerContext(KernelCore& kernel);
33 ~GlobalSchedulerContext();
34
35 /// Adds a new thread to the scheduler
36 void AddThread(std::shared_ptr<Thread> thread);
37
38 /// Removes a thread from the scheduler
39 void RemoveThread(std::shared_ptr<Thread> thread);
40
41 /// Returns a list of all threads managed by the scheduler
42 [[nodiscard]] const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
43 return thread_list;
44 }
45
46 /**
47 * Rotates the scheduling queues of threads at a preemption priority and then does
48 * some core rebalancing. Preemption priorities can be found in the array
49 * 'preemption_priorities'.
50 *
51 * @note This operation happens every 10ms.
52 */
53 void PreemptThreads();
54
55 /// Returns true if the global scheduler lock is acquired
56 bool IsLocked() const;
57
58 [[nodiscard]] LockType& SchedulerLock() {
59 return scheduler_lock;
60 }
61
62 [[nodiscard]] const LockType& SchedulerLock() const {
63 return scheduler_lock;
64 }
65
66private:
67 friend class KScopedSchedulerLock;
68 friend class KScopedSchedulerLockAndSleep;
69
70 KernelCore& kernel;
71
72 std::atomic_bool scheduler_update_needed{};
73 KSchedulerPriorityQueue priority_queue;
74 LockType scheduler_lock;
75
76 /// Lists all thread ids that aren't deleted/etc.
77 std::vector<std::shared_ptr<Thread>> thread_list;
78 Common::SpinLock global_list_guard{};
79};
80
81} // namespace Kernel
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
index 3e745c18b..40988b0fd 100644
--- a/src/core/hle/kernel/handle_table.cpp
+++ b/src/core/hle/kernel/handle_table.cpp
@@ -8,9 +8,9 @@
8#include "core/core.h" 8#include "core/core.h"
9#include "core/hle/kernel/errors.h" 9#include "core/hle/kernel/errors.h"
10#include "core/hle/kernel/handle_table.h" 10#include "core/hle/kernel/handle_table.h"
11#include "core/hle/kernel/k_scheduler.h"
11#include "core/hle/kernel/kernel.h" 12#include "core/hle/kernel/kernel.h"
12#include "core/hle/kernel/process.h" 13#include "core/hle/kernel/process.h"
13#include "core/hle/kernel/scheduler.h"
14#include "core/hle/kernel/thread.h" 14#include "core/hle/kernel/thread.h"
15 15
16namespace Kernel { 16namespace Kernel {
@@ -105,7 +105,7 @@ bool HandleTable::IsValid(Handle handle) const {
105 105
106std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const { 106std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
107 if (handle == CurrentThread) { 107 if (handle == CurrentThread) {
108 return SharedFrom(kernel.CurrentScheduler().GetCurrentThread()); 108 return SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
109 } else if (handle == CurrentProcess) { 109 } else if (handle == CurrentProcess) {
110 return SharedFrom(kernel.CurrentProcess()); 110 return SharedFrom(kernel.CurrentProcess());
111 } 111 }
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 81f85643b..e75e80ad0 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -17,11 +17,12 @@
17#include "core/hle/kernel/errors.h" 17#include "core/hle/kernel/errors.h"
18#include "core/hle/kernel/handle_table.h" 18#include "core/hle/kernel/handle_table.h"
19#include "core/hle/kernel/hle_ipc.h" 19#include "core/hle/kernel/hle_ipc.h"
20#include "core/hle/kernel/k_scheduler.h"
21#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
20#include "core/hle/kernel/kernel.h" 22#include "core/hle/kernel/kernel.h"
21#include "core/hle/kernel/object.h" 23#include "core/hle/kernel/object.h"
22#include "core/hle/kernel/process.h" 24#include "core/hle/kernel/process.h"
23#include "core/hle/kernel/readable_event.h" 25#include "core/hle/kernel/readable_event.h"
24#include "core/hle/kernel/scheduler.h"
25#include "core/hle/kernel/server_session.h" 26#include "core/hle/kernel/server_session.h"
26#include "core/hle/kernel/thread.h" 27#include "core/hle/kernel/thread.h"
27#include "core/hle/kernel/time_manager.h" 28#include "core/hle/kernel/time_manager.h"
@@ -56,9 +57,9 @@ std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread(
56 writable_event = pair.writable; 57 writable_event = pair.writable;
57 } 58 }
58 59
60 Handle event_handle = InvalidHandle;
59 { 61 {
60 Handle event_handle = InvalidHandle; 62 KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout);
61 SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout);
62 thread->SetHLECallback( 63 thread->SetHLECallback(
63 [context = *this, callback](std::shared_ptr<Thread> thread) mutable -> bool { 64 [context = *this, callback](std::shared_ptr<Thread> thread) mutable -> bool {
64 ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT 65 ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT
@@ -74,9 +75,8 @@ std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread(
74 thread->SetStatus(ThreadStatus::WaitHLEEvent); 75 thread->SetStatus(ThreadStatus::WaitHLEEvent);
75 thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); 76 thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
76 readable_event->AddWaitingThread(thread); 77 readable_event->AddWaitingThread(thread);
77 lock.Release();
78 thread->SetHLETimeEvent(event_handle);
79 } 78 }
79 thread->SetHLETimeEvent(event_handle);
80 80
81 is_thread_waiting = true; 81 is_thread_waiting = true;
82 82
diff --git a/src/core/hle/kernel/k_affinity_mask.h b/src/core/hle/kernel/k_affinity_mask.h
new file mode 100644
index 000000000..dd73781cd
--- /dev/null
+++ b/src/core/hle/kernel/k_affinity_mask.h
@@ -0,0 +1,58 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7
8#pragma once
9
10#include "common/assert.h"
11#include "common/common_types.h"
12#include "core/hardware_properties.h"
13
14namespace Kernel {
15
16class KAffinityMask {
17public:
18 constexpr KAffinityMask() = default;
19
20 [[nodiscard]] constexpr u64 GetAffinityMask() const {
21 return this->mask;
22 }
23
24 constexpr void SetAffinityMask(u64 new_mask) {
25 ASSERT((new_mask & ~AllowedAffinityMask) == 0);
26 this->mask = new_mask;
27 }
28
29 [[nodiscard]] constexpr bool GetAffinity(s32 core) const {
30 return this->mask & GetCoreBit(core);
31 }
32
33 constexpr void SetAffinity(s32 core, bool set) {
34 ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
35
36 if (set) {
37 this->mask |= GetCoreBit(core);
38 } else {
39 this->mask &= ~GetCoreBit(core);
40 }
41 }
42
43 constexpr void SetAll() {
44 this->mask = AllowedAffinityMask;
45 }
46
47private:
48 [[nodiscard]] static constexpr u64 GetCoreBit(s32 core) {
49 ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
50 return (1ULL << core);
51 }
52
53 static constexpr u64 AllowedAffinityMask = (1ULL << Core::Hardware::NUM_CPU_CORES) - 1;
54
55 u64 mask{};
56};
57
58} // namespace Kernel
diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h
new file mode 100644
index 000000000..01a577d0c
--- /dev/null
+++ b/src/core/hle/kernel/k_priority_queue.h
@@ -0,0 +1,449 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7
8#pragma once
9
10#include <array>
11
12#include "common/assert.h"
13#include "common/bit_set.h"
14#include "common/bit_util.h"
15#include "common/common_types.h"
16
17namespace Kernel {
18
19class Thread;
20
21template <typename T>
22concept KPriorityQueueAffinityMask = !std::is_reference_v<T> && requires(T & t) {
23 { t.GetAffinityMask() }
24 ->std::convertible_to<u64>;
25 {t.SetAffinityMask(std::declval<u64>())};
26
27 { t.GetAffinity(std::declval<int32_t>()) }
28 ->std::same_as<bool>;
29 {t.SetAffinity(std::declval<int32_t>(), std::declval<bool>())};
30 {t.SetAll()};
31};
32
33template <typename T>
34concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
35 {typename T::QueueEntry()};
36 {(typename T::QueueEntry()).Initialize()};
37 {(typename T::QueueEntry()).SetPrev(std::addressof(t))};
38 {(typename T::QueueEntry()).SetNext(std::addressof(t))};
39 { (typename T::QueueEntry()).GetNext() }
40 ->std::same_as<T*>;
41 { (typename T::QueueEntry()).GetPrev() }
42 ->std::same_as<T*>;
43 { t.GetPriorityQueueEntry(std::declval<s32>()) }
44 ->std::same_as<typename T::QueueEntry&>;
45
46 {t.GetAffinityMask()};
47 { typename std::remove_cvref<decltype(t.GetAffinityMask())>::type() }
48 ->KPriorityQueueAffinityMask;
49
50 { t.GetActiveCore() }
51 ->std::convertible_to<s32>;
52 { t.GetPriority() }
53 ->std::convertible_to<s32>;
54};
55
56template <typename Member, size_t _NumCores, int LowestPriority, int HighestPriority>
57requires KPriorityQueueMember<Member> class KPriorityQueue {
58public:
59 using AffinityMaskType = typename std::remove_cv_t<
60 typename std::remove_reference<decltype(std::declval<Member>().GetAffinityMask())>::type>;
61
62 static_assert(LowestPriority >= 0);
63 static_assert(HighestPriority >= 0);
64 static_assert(LowestPriority >= HighestPriority);
65 static constexpr size_t NumPriority = LowestPriority - HighestPriority + 1;
66 static constexpr size_t NumCores = _NumCores;
67
68 static constexpr bool IsValidCore(s32 core) {
69 return 0 <= core && core < static_cast<s32>(NumCores);
70 }
71
72 static constexpr bool IsValidPriority(s32 priority) {
73 return HighestPriority <= priority && priority <= LowestPriority + 1;
74 }
75
76private:
77 using Entry = typename Member::QueueEntry;
78
79public:
80 class KPerCoreQueue {
81 private:
82 std::array<Entry, NumCores> root{};
83
84 public:
85 constexpr KPerCoreQueue() {
86 for (auto& per_core_root : root) {
87 per_core_root.Initialize();
88 }
89 }
90
91 constexpr bool PushBack(s32 core, Member* member) {
92 // Get the entry associated with the member.
93 Entry& member_entry = member->GetPriorityQueueEntry(core);
94
95 // Get the entry associated with the end of the queue.
96 Member* tail = this->root[core].GetPrev();
97 Entry& tail_entry =
98 (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core];
99
100 // Link the entries.
101 member_entry.SetPrev(tail);
102 member_entry.SetNext(nullptr);
103 tail_entry.SetNext(member);
104 this->root[core].SetPrev(member);
105
106 return tail == nullptr;
107 }
108
109 constexpr bool PushFront(s32 core, Member* member) {
110 // Get the entry associated with the member.
111 Entry& member_entry = member->GetPriorityQueueEntry(core);
112
113 // Get the entry associated with the front of the queue.
114 Member* head = this->root[core].GetNext();
115 Entry& head_entry =
116 (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core];
117
118 // Link the entries.
119 member_entry.SetPrev(nullptr);
120 member_entry.SetNext(head);
121 head_entry.SetPrev(member);
122 this->root[core].SetNext(member);
123
124 return (head == nullptr);
125 }
126
127 constexpr bool Remove(s32 core, Member* member) {
128 // Get the entry associated with the member.
129 Entry& member_entry = member->GetPriorityQueueEntry(core);
130
131 // Get the entries associated with next and prev.
132 Member* prev = member_entry.GetPrev();
133 Member* next = member_entry.GetNext();
134 Entry& prev_entry =
135 (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : this->root[core];
136 Entry& next_entry =
137 (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core];
138
139 // Unlink.
140 prev_entry.SetNext(next);
141 next_entry.SetPrev(prev);
142
143 return (this->GetFront(core) == nullptr);
144 }
145
146 constexpr Member* GetFront(s32 core) const {
147 return this->root[core].GetNext();
148 }
149 };
150
151 class KPriorityQueueImpl {
152 public:
153 constexpr KPriorityQueueImpl() = default;
154
155 constexpr void PushBack(s32 priority, s32 core, Member* member) {
156 ASSERT(IsValidCore(core));
157 ASSERT(IsValidPriority(priority));
158
159 if (priority > LowestPriority) {
160 return;
161 }
162
163 if (this->queues[priority].PushBack(core, member)) {
164 this->available_priorities[core].SetBit(priority);
165 }
166 }
167
168 constexpr void PushFront(s32 priority, s32 core, Member* member) {
169 ASSERT(IsValidCore(core));
170 ASSERT(IsValidPriority(priority));
171
172 if (priority > LowestPriority) {
173 return;
174 }
175
176 if (this->queues[priority].PushFront(core, member)) {
177 this->available_priorities[core].SetBit(priority);
178 }
179 }
180
181 constexpr void Remove(s32 priority, s32 core, Member* member) {
182 ASSERT(IsValidCore(core));
183 ASSERT(IsValidPriority(priority));
184
185 if (priority > LowestPriority) {
186 return;
187 }
188
189 if (this->queues[priority].Remove(core, member)) {
190 this->available_priorities[core].ClearBit(priority);
191 }
192 }
193
194 constexpr Member* GetFront(s32 core) const {
195 ASSERT(IsValidCore(core));
196
197 const s32 priority =
198 static_cast<s32>(this->available_priorities[core].CountLeadingZero());
199 if (priority <= LowestPriority) {
200 return this->queues[priority].GetFront(core);
201 } else {
202 return nullptr;
203 }
204 }
205
206 constexpr Member* GetFront(s32 priority, s32 core) const {
207 ASSERT(IsValidCore(core));
208 ASSERT(IsValidPriority(priority));
209
210 if (priority <= LowestPriority) {
211 return this->queues[priority].GetFront(core);
212 } else {
213 return nullptr;
214 }
215 }
216
217 constexpr Member* GetNext(s32 core, const Member* member) const {
218 ASSERT(IsValidCore(core));
219
220 Member* next = member->GetPriorityQueueEntry(core).GetNext();
221 if (next == nullptr) {
222 const s32 priority = static_cast<s32>(
223 this->available_priorities[core].GetNextSet(member->GetPriority()));
224 if (priority <= LowestPriority) {
225 next = this->queues[priority].GetFront(core);
226 }
227 }
228 return next;
229 }
230
231 constexpr void MoveToFront(s32 priority, s32 core, Member* member) {
232 ASSERT(IsValidCore(core));
233 ASSERT(IsValidPriority(priority));
234
235 if (priority <= LowestPriority) {
236 this->queues[priority].Remove(core, member);
237 this->queues[priority].PushFront(core, member);
238 }
239 }
240
241 constexpr Member* MoveToBack(s32 priority, s32 core, Member* member) {
242 ASSERT(IsValidCore(core));
243 ASSERT(IsValidPriority(priority));
244
245 if (priority <= LowestPriority) {
246 this->queues[priority].Remove(core, member);
247 this->queues[priority].PushBack(core, member);
248 return this->queues[priority].GetFront(core);
249 } else {
250 return nullptr;
251 }
252 }
253
254 private:
255 std::array<KPerCoreQueue, NumPriority> queues{};
256 std::array<Common::BitSet64<NumPriority>, NumCores> available_priorities{};
257 };
258
259private:
260 KPriorityQueueImpl scheduled_queue;
261 KPriorityQueueImpl suggested_queue;
262
263private:
264 constexpr void ClearAffinityBit(u64& affinity, s32 core) {
265 affinity &= ~(u64(1) << core);
266 }
267
268 constexpr s32 GetNextCore(u64& affinity) {
269 const s32 core = Common::CountTrailingZeroes64(affinity);
270 ClearAffinityBit(affinity, core);
271 return core;
272 }
273
274 constexpr void PushBack(s32 priority, Member* member) {
275 ASSERT(IsValidPriority(priority));
276
277 // Push onto the scheduled queue for its core, if we can.
278 u64 affinity = member->GetAffinityMask().GetAffinityMask();
279 if (const s32 core = member->GetActiveCore(); core >= 0) {
280 this->scheduled_queue.PushBack(priority, core, member);
281 ClearAffinityBit(affinity, core);
282 }
283
284 // And suggest the thread for all other cores.
285 while (affinity) {
286 this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
287 }
288 }
289
290 constexpr void PushFront(s32 priority, Member* member) {
291 ASSERT(IsValidPriority(priority));
292
293 // Push onto the scheduled queue for its core, if we can.
294 u64 affinity = member->GetAffinityMask().GetAffinityMask();
295 if (const s32 core = member->GetActiveCore(); core >= 0) {
296 this->scheduled_queue.PushFront(priority, core, member);
297 ClearAffinityBit(affinity, core);
298 }
299
300 // And suggest the thread for all other cores.
301 // Note: Nintendo pushes onto the back of the suggested queue, not the front.
302 while (affinity) {
303 this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
304 }
305 }
306
307 constexpr void Remove(s32 priority, Member* member) {
308 ASSERT(IsValidPriority(priority));
309
310 // Remove from the scheduled queue for its core.
311 u64 affinity = member->GetAffinityMask().GetAffinityMask();
312 if (const s32 core = member->GetActiveCore(); core >= 0) {
313 this->scheduled_queue.Remove(priority, core, member);
314 ClearAffinityBit(affinity, core);
315 }
316
317 // Remove from the suggested queue for all other cores.
318 while (affinity) {
319 this->suggested_queue.Remove(priority, GetNextCore(affinity), member);
320 }
321 }
322
323public:
324 constexpr KPriorityQueue() = default;
325
326 // Getters.
327 constexpr Member* GetScheduledFront(s32 core) const {
328 return this->scheduled_queue.GetFront(core);
329 }
330
331 constexpr Member* GetScheduledFront(s32 core, s32 priority) const {
332 return this->scheduled_queue.GetFront(priority, core);
333 }
334
335 constexpr Member* GetSuggestedFront(s32 core) const {
336 return this->suggested_queue.GetFront(core);
337 }
338
339 constexpr Member* GetSuggestedFront(s32 core, s32 priority) const {
340 return this->suggested_queue.GetFront(priority, core);
341 }
342
343 constexpr Member* GetScheduledNext(s32 core, const Member* member) const {
344 return this->scheduled_queue.GetNext(core, member);
345 }
346
347 constexpr Member* GetSuggestedNext(s32 core, const Member* member) const {
348 return this->suggested_queue.GetNext(core, member);
349 }
350
351 constexpr Member* GetSamePriorityNext(s32 core, const Member* member) const {
352 return member->GetPriorityQueueEntry(core).GetNext();
353 }
354
355 // Mutators.
356 constexpr void PushBack(Member* member) {
357 this->PushBack(member->GetPriority(), member);
358 }
359
360 constexpr void Remove(Member* member) {
361 this->Remove(member->GetPriority(), member);
362 }
363
364 constexpr void MoveToScheduledFront(Member* member) {
365 this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
366 }
367
368 constexpr Thread* MoveToScheduledBack(Member* member) {
369 return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(),
370 member);
371 }
372
373 // First class fancy operations.
374 constexpr void ChangePriority(s32 prev_priority, bool is_running, Member* member) {
375 ASSERT(IsValidPriority(prev_priority));
376
377 // Remove the member from the queues.
378 const s32 new_priority = member->GetPriority();
379 this->Remove(prev_priority, member);
380
381 // And enqueue. If the member is running, we want to keep it running.
382 if (is_running) {
383 this->PushFront(new_priority, member);
384 } else {
385 this->PushBack(new_priority, member);
386 }
387 }
388
389 constexpr void ChangeAffinityMask(s32 prev_core, const AffinityMaskType& prev_affinity,
390 Member* member) {
391 // Get the new information.
392 const s32 priority = member->GetPriority();
393 const AffinityMaskType& new_affinity = member->GetAffinityMask();
394 const s32 new_core = member->GetActiveCore();
395
396 // Remove the member from all queues it was in before.
397 for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
398 if (prev_affinity.GetAffinity(core)) {
399 if (core == prev_core) {
400 this->scheduled_queue.Remove(priority, core, member);
401 } else {
402 this->suggested_queue.Remove(priority, core, member);
403 }
404 }
405 }
406
407 // And add the member to all queues it should be in now.
408 for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
409 if (new_affinity.GetAffinity(core)) {
410 if (core == new_core) {
411 this->scheduled_queue.PushBack(priority, core, member);
412 } else {
413 this->suggested_queue.PushBack(priority, core, member);
414 }
415 }
416 }
417 }
418
419 constexpr void ChangeCore(s32 prev_core, Member* member, bool to_front = false) {
420 // Get the new information.
421 const s32 new_core = member->GetActiveCore();
422 const s32 priority = member->GetPriority();
423
424 // We don't need to do anything if the core is the same.
425 if (prev_core != new_core) {
426 // Remove from the scheduled queue for the previous core.
427 if (prev_core >= 0) {
428 this->scheduled_queue.Remove(priority, prev_core, member);
429 }
430
431 // Remove from the suggested queue and add to the scheduled queue for the new core.
432 if (new_core >= 0) {
433 this->suggested_queue.Remove(priority, new_core, member);
434 if (to_front) {
435 this->scheduled_queue.PushFront(priority, new_core, member);
436 } else {
437 this->scheduled_queue.PushBack(priority, new_core, member);
438 }
439 }
440
441 // Add to the suggested queue for the previous core.
442 if (prev_core >= 0) {
443 this->suggested_queue.PushBack(priority, prev_core, member);
444 }
445 }
446 }
447};
448
449} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
new file mode 100644
index 000000000..c5fd82a6b
--- /dev/null
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -0,0 +1,784 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7
8#include "common/assert.h"
9#include "common/bit_util.h"
10#include "common/fiber.h"
11#include "common/logging/log.h"
12#include "core/arm/arm_interface.h"
13#include "core/core.h"
14#include "core/core_timing.h"
15#include "core/cpu_manager.h"
16#include "core/hle/kernel/k_scheduler.h"
17#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
18#include "core/hle/kernel/kernel.h"
19#include "core/hle/kernel/physical_core.h"
20#include "core/hle/kernel/process.h"
21#include "core/hle/kernel/thread.h"
22#include "core/hle/kernel/time_manager.h"
23
24namespace Kernel {
25
26static void IncrementScheduledCount(Kernel::Thread* thread) {
27 if (auto process = thread->GetOwnerProcess(); process) {
28 process->IncrementScheduledCount();
29 }
30}
31
32void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
33 Core::EmuThreadHandle global_thread) {
34 u32 current_core = global_thread.host_handle;
35 bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
36 (current_core < Core::Hardware::NUM_CPU_CORES);
37
38 while (cores_pending_reschedule != 0) {
39 u32 core = Common::CountTrailingZeroes64(cores_pending_reschedule);
40 ASSERT(core < Core::Hardware::NUM_CPU_CORES);
41 if (!must_context_switch || core != current_core) {
42 auto& phys_core = kernel.PhysicalCore(core);
43 phys_core.Interrupt();
44 } else {
45 must_context_switch = true;
46 }
47 cores_pending_reschedule &= ~(1ULL << core);
48 }
49 if (must_context_switch) {
50 auto core_scheduler = kernel.CurrentScheduler();
51 kernel.ExitSVCProfile();
52 core_scheduler->RescheduleCurrentCore();
53 kernel.EnterSVCProfile();
54 }
55}
56
57u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
58 std::scoped_lock lock{guard};
59 if (Thread* prev_highest_thread = this->state.highest_priority_thread;
60 prev_highest_thread != highest_thread) {
61 if (prev_highest_thread != nullptr) {
62 IncrementScheduledCount(prev_highest_thread);
63 prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks());
64 }
65 if (this->state.should_count_idle) {
66 if (highest_thread != nullptr) {
67 // if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) {
68 // process->SetRunningThread(this->core_id, highest_thread,
69 // this->state.idle_count);
70 //}
71 } else {
72 this->state.idle_count++;
73 }
74 }
75
76 this->state.highest_priority_thread = highest_thread;
77 this->state.needs_scheduling = true;
78 return (1ULL << this->core_id);
79 } else {
80 return 0;
81 }
82}
83
84u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
85 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
86
87 // Clear that we need to update.
88 ClearSchedulerUpdateNeeded(kernel);
89
90 u64 cores_needing_scheduling = 0, idle_cores = 0;
91 Thread* top_threads[Core::Hardware::NUM_CPU_CORES];
92 auto& priority_queue = GetPriorityQueue(kernel);
93
94 /// We want to go over all cores, finding the highest priority thread and determining if
95 /// scheduling is needed for that core.
96 for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
97 Thread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
98 if (top_thread != nullptr) {
99 // If the thread has no waiters, we need to check if the process has a thread pinned.
100 // TODO(bunnei): Implement thread pinning
101 } else {
102 idle_cores |= (1ULL << core_id);
103 }
104
105 top_threads[core_id] = top_thread;
106 cores_needing_scheduling |=
107 kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
108 }
109
110 // Idle cores are bad. We're going to try to migrate threads to each idle core in turn.
111 while (idle_cores != 0) {
112 u32 core_id = Common::CountTrailingZeroes64(idle_cores);
113 if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
114 s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
115 size_t num_candidates = 0;
116
117 // While we have a suggested thread, try to migrate it!
118 while (suggested != nullptr) {
119 // Check if the suggested thread is the top thread on its core.
120 const s32 suggested_core = suggested->GetActiveCore();
121 if (Thread* top_thread =
122 (suggested_core >= 0) ? top_threads[suggested_core] : nullptr;
123 top_thread != suggested) {
124 // Make sure we're not dealing with threads too high priority for migration.
125 if (top_thread != nullptr &&
126 top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) {
127 break;
128 }
129
130 // The suggested thread isn't bound to its core, so we can migrate it!
131 suggested->SetActiveCore(core_id);
132 priority_queue.ChangeCore(suggested_core, suggested);
133
134 top_threads[core_id] = suggested;
135 cores_needing_scheduling |=
136 kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
137 break;
138 }
139
140 // Note this core as a candidate for migration.
141 ASSERT(num_candidates < Core::Hardware::NUM_CPU_CORES);
142 migration_candidates[num_candidates++] = suggested_core;
143 suggested = priority_queue.GetSuggestedNext(core_id, suggested);
144 }
145
146 // If suggested is nullptr, we failed to migrate a specific thread. So let's try all our
147 // candidate cores' top threads.
148 if (suggested == nullptr) {
149 for (size_t i = 0; i < num_candidates; i++) {
150 // Check if there's some other thread that can run on the candidate core.
151 const s32 candidate_core = migration_candidates[i];
152 suggested = top_threads[candidate_core];
153 if (Thread* next_on_candidate_core =
154 priority_queue.GetScheduledNext(candidate_core, suggested);
155 next_on_candidate_core != nullptr) {
156 // The candidate core can run some other thread! We'll migrate its current
157 // top thread to us.
158 top_threads[candidate_core] = next_on_candidate_core;
159 cores_needing_scheduling |=
160 kernel.Scheduler(candidate_core)
161 .UpdateHighestPriorityThread(top_threads[candidate_core]);
162
163 // Perform the migration.
164 suggested->SetActiveCore(core_id);
165 priority_queue.ChangeCore(candidate_core, suggested);
166
167 top_threads[core_id] = suggested;
168 cores_needing_scheduling |=
169 kernel.Scheduler(core_id).UpdateHighestPriorityThread(
170 top_threads[core_id]);
171 break;
172 }
173 }
174 }
175 }
176
177 idle_cores &= ~(1ULL << core_id);
178 }
179
180 return cores_needing_scheduling;
181}
182
183void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state) {
184 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
185
186 // Check if the state has changed, because if it hasn't there's nothing to do.
187 const auto cur_state = thread->scheduling_state;
188 if (cur_state == old_state) {
189 return;
190 }
191
192 // Update the priority queues.
193 if (old_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
194 // If we were previously runnable, then we're not runnable now, and we should remove.
195 GetPriorityQueue(kernel).Remove(thread);
196 IncrementScheduledCount(thread);
197 SetSchedulerUpdateNeeded(kernel);
198 } else if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
199 // If we're now runnable, then we weren't previously, and we should add.
200 GetPriorityQueue(kernel).PushBack(thread);
201 IncrementScheduledCount(thread);
202 SetSchedulerUpdateNeeded(kernel);
203 }
204}
205
206void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
207 u32 old_priority) {
208
209 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
210
211 // If the thread is runnable, we want to change its priority in the queue.
212 if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
213 GetPriorityQueue(kernel).ChangePriority(
214 old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread);
215 IncrementScheduledCount(thread);
216 SetSchedulerUpdateNeeded(kernel);
217 }
218}
219
220void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
221 const KAffinityMask& old_affinity, s32 old_core) {
222 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
223
224 // If the thread is runnable, we want to change its affinity in the queue.
225 if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
226 GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread);
227 IncrementScheduledCount(thread);
228 SetSchedulerUpdateNeeded(kernel);
229 }
230}
231
232void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
233 ASSERT(system.GlobalSchedulerContext().IsLocked());
234
235 // Get a reference to the priority queue.
236 auto& kernel = system.Kernel();
237 auto& priority_queue = GetPriorityQueue(kernel);
238
239 // Rotate the front of the queue to the end.
240 Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
241 Thread* next_thread = nullptr;
242 if (top_thread != nullptr) {
243 next_thread = priority_queue.MoveToScheduledBack(top_thread);
244 if (next_thread != top_thread) {
245 IncrementScheduledCount(top_thread);
246 IncrementScheduledCount(next_thread);
247 }
248 }
249
250 // While we have a suggested thread, try to migrate it!
251 {
252 Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
253 while (suggested != nullptr) {
254 // Check if the suggested thread is the top thread on its core.
255 const s32 suggested_core = suggested->GetActiveCore();
256 if (Thread* top_on_suggested_core =
257 (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
258 : nullptr;
259 top_on_suggested_core != suggested) {
260 // If the next thread is a new thread that has been waiting longer than our
261 // suggestion, we prefer it to our suggestion.
262 if (top_thread != next_thread && next_thread != nullptr &&
263 next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) {
264 suggested = nullptr;
265 break;
266 }
267
268 // If we're allowed to do a migration, do one.
269 // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion
270 // to the front of the queue.
271 if (top_on_suggested_core == nullptr ||
272 top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
273 suggested->SetActiveCore(core_id);
274 priority_queue.ChangeCore(suggested_core, suggested, true);
275 IncrementScheduledCount(suggested);
276 break;
277 }
278 }
279
280 // Get the next suggestion.
281 suggested = priority_queue.GetSamePriorityNext(core_id, suggested);
282 }
283 }
284
285 // Now that we might have migrated a thread with the same priority, check if we can do better.
286
287 {
288 Thread* best_thread = priority_queue.GetScheduledFront(core_id);
289 if (best_thread == GetCurrentThread()) {
290 best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
291 }
292
293 // If the best thread we can choose has a priority the same or worse than ours, try to
294 // migrate a higher priority thread.
295 if (best_thread != nullptr && best_thread->GetPriority() >= static_cast<u32>(priority)) {
296 Thread* suggested = priority_queue.GetSuggestedFront(core_id);
297 while (suggested != nullptr) {
298 // If the suggestion's priority is the same as ours, don't bother.
299 if (suggested->GetPriority() >= best_thread->GetPriority()) {
300 break;
301 }
302
303 // Check if the suggested thread is the top thread on its core.
304 const s32 suggested_core = suggested->GetActiveCore();
305 if (Thread* top_on_suggested_core =
306 (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
307 : nullptr;
308 top_on_suggested_core != suggested) {
309 // If we're allowed to do a migration, do one.
310 // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
311 // suggestion to the front of the queue.
312 if (top_on_suggested_core == nullptr ||
313 top_on_suggested_core->GetPriority() >=
314 HighestCoreMigrationAllowedPriority) {
315 suggested->SetActiveCore(core_id);
316 priority_queue.ChangeCore(suggested_core, suggested, true);
317 IncrementScheduledCount(suggested);
318 break;
319 }
320 }
321
322 // Get the next suggestion.
323 suggested = priority_queue.GetSuggestedNext(core_id, suggested);
324 }
325 }
326 }
327
328 // After a rotation, we need a scheduler update.
329 SetSchedulerUpdateNeeded(kernel);
330}
331
332bool KScheduler::CanSchedule(KernelCore& kernel) {
333 return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1;
334}
335
336bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
337 return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire);
338}
339
340void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) {
341 kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release);
342}
343
344void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
345 kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release);
346}
347
348void KScheduler::DisableScheduling(KernelCore& kernel) {
349 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
350 ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0);
351 scheduler->GetCurrentThread()->DisableDispatch();
352 }
353}
354
355void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
356 Core::EmuThreadHandle global_thread) {
357 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
358 scheduler->GetCurrentThread()->EnableDispatch();
359 }
360 RescheduleCores(kernel, cores_needing_scheduling, global_thread);
361}
362
363u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
364 if (IsSchedulerUpdateNeeded(kernel)) {
365 return UpdateHighestPriorityThreadsImpl(kernel);
366 } else {
367 return 0;
368 }
369}
370
371KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) {
372 return kernel.GlobalSchedulerContext().priority_queue;
373}
374
375void KScheduler::YieldWithoutCoreMigration() {
376 auto& kernel = system.Kernel();
377
378 // Validate preconditions.
379 ASSERT(CanSchedule(kernel));
380 ASSERT(kernel.CurrentProcess() != nullptr);
381
382 // Get the current thread and process.
383 Thread& cur_thread = *GetCurrentThread();
384 Process& cur_process = *kernel.CurrentProcess();
385
386 // If the thread's yield count matches, there's nothing for us to do.
387 if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
388 return;
389 }
390
391 // Get a reference to the priority queue.
392 auto& priority_queue = GetPriorityQueue(kernel);
393
394 // Perform the yield.
395 {
396 KScopedSchedulerLock lock(kernel);
397
398 const auto cur_state = cur_thread.scheduling_state;
399 if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
400 // Put the current thread at the back of the queue.
401 Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
402 IncrementScheduledCount(std::addressof(cur_thread));
403
404 // If the next thread is different, we have an update to perform.
405 if (next_thread != std::addressof(cur_thread)) {
406 SetSchedulerUpdateNeeded(kernel);
407 } else {
408 // Otherwise, set the thread's yield count so that we won't waste work until the
409 // process is scheduled again.
410 cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
411 }
412 }
413 }
414}
415
416void KScheduler::YieldWithCoreMigration() {
417 auto& kernel = system.Kernel();
418
419 // Validate preconditions.
420 ASSERT(CanSchedule(kernel));
421 ASSERT(kernel.CurrentProcess() != nullptr);
422
423 // Get the current thread and process.
424 Thread& cur_thread = *GetCurrentThread();
425 Process& cur_process = *kernel.CurrentProcess();
426
427 // If the thread's yield count matches, there's nothing for us to do.
428 if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
429 return;
430 }
431
432 // Get a reference to the priority queue.
433 auto& priority_queue = GetPriorityQueue(kernel);
434
435 // Perform the yield.
436 {
437 KScopedSchedulerLock lock(kernel);
438
439 const auto cur_state = cur_thread.scheduling_state;
440 if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
441 // Get the current active core.
442 const s32 core_id = cur_thread.GetActiveCore();
443
444 // Put the current thread at the back of the queue.
445 Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
446 IncrementScheduledCount(std::addressof(cur_thread));
447
448 // While we have a suggested thread, try to migrate it!
449 bool recheck = false;
450 Thread* suggested = priority_queue.GetSuggestedFront(core_id);
451 while (suggested != nullptr) {
452 // Check if the suggested thread is the thread running on its core.
453 const s32 suggested_core = suggested->GetActiveCore();
454
455 if (Thread* running_on_suggested_core =
456 (suggested_core >= 0)
457 ? kernel.Scheduler(suggested_core).state.highest_priority_thread
458 : nullptr;
459 running_on_suggested_core != suggested) {
460 // If the current thread's priority is higher than our suggestion's we prefer
461 // the next thread to the suggestion. We also prefer the next thread when the
462 // current thread's priority is equal to the suggestions, but the next thread
463 // has been waiting longer.
464 if ((suggested->GetPriority() > cur_thread.GetPriority()) ||
465 (suggested->GetPriority() == cur_thread.GetPriority() &&
466 next_thread != std::addressof(cur_thread) &&
467 next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick())) {
468 suggested = nullptr;
469 break;
470 }
471
472 // If we're allowed to do a migration, do one.
473 // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
474 // suggestion to the front of the queue.
475 if (running_on_suggested_core == nullptr ||
476 running_on_suggested_core->GetPriority() >=
477 HighestCoreMigrationAllowedPriority) {
478 suggested->SetActiveCore(core_id);
479 priority_queue.ChangeCore(suggested_core, suggested, true);
480 IncrementScheduledCount(suggested);
481 break;
482 } else {
483 // We couldn't perform a migration, but we should check again on a future
484 // yield.
485 recheck = true;
486 }
487 }
488
489 // Get the next suggestion.
490 suggested = priority_queue.GetSuggestedNext(core_id, suggested);
491 }
492
493 // If we still have a suggestion or the next thread is different, we have an update to
494 // perform.
495 if (suggested != nullptr || next_thread != std::addressof(cur_thread)) {
496 SetSchedulerUpdateNeeded(kernel);
497 } else if (!recheck) {
498 // Otherwise if we don't need to re-check, set the thread's yield count so that we
499 // won't waste work until the process is scheduled again.
500 cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
501 }
502 }
503 }
504}
505
506void KScheduler::YieldToAnyThread() {
507 auto& kernel = system.Kernel();
508
509 // Validate preconditions.
510 ASSERT(CanSchedule(kernel));
511 ASSERT(kernel.CurrentProcess() != nullptr);
512
513 // Get the current thread and process.
514 Thread& cur_thread = *GetCurrentThread();
515 Process& cur_process = *kernel.CurrentProcess();
516
517 // If the thread's yield count matches, there's nothing for us to do.
518 if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
519 return;
520 }
521
522 // Get a reference to the priority queue.
523 auto& priority_queue = GetPriorityQueue(kernel);
524
525 // Perform the yield.
526 {
527 KScopedSchedulerLock lock(kernel);
528
529 const auto cur_state = cur_thread.scheduling_state;
530 if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
531 // Get the current active core.
532 const s32 core_id = cur_thread.GetActiveCore();
533
534 // Migrate the current thread to core -1.
535 cur_thread.SetActiveCore(-1);
536 priority_queue.ChangeCore(core_id, std::addressof(cur_thread));
537 IncrementScheduledCount(std::addressof(cur_thread));
538
539 // If there's nothing scheduled, we can try to perform a migration.
540 if (priority_queue.GetScheduledFront(core_id) == nullptr) {
541 // While we have a suggested thread, try to migrate it!
542 Thread* suggested = priority_queue.GetSuggestedFront(core_id);
543 while (suggested != nullptr) {
544 // Check if the suggested thread is the top thread on its core.
545 const s32 suggested_core = suggested->GetActiveCore();
546 if (Thread* top_on_suggested_core =
547 (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
548 : nullptr;
549 top_on_suggested_core != suggested) {
550 // If we're allowed to do a migration, do one.
551 if (top_on_suggested_core == nullptr ||
552 top_on_suggested_core->GetPriority() >=
553 HighestCoreMigrationAllowedPriority) {
554 suggested->SetActiveCore(core_id);
555 priority_queue.ChangeCore(suggested_core, suggested);
556 IncrementScheduledCount(suggested);
557 }
558
559 // Regardless of whether we migrated, we had a candidate, so we're done.
560 break;
561 }
562
563 // Get the next suggestion.
564 suggested = priority_queue.GetSuggestedNext(core_id, suggested);
565 }
566
567 // If the suggestion is different from the current thread, we need to perform an
568 // update.
569 if (suggested != std::addressof(cur_thread)) {
570 SetSchedulerUpdateNeeded(kernel);
571 } else {
572 // Otherwise, set the thread's yield count so that we won't waste work until the
573 // process is scheduled again.
574 cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
575 }
576 } else {
577 // Otherwise, we have an update to perform.
578 SetSchedulerUpdateNeeded(kernel);
579 }
580 }
581 }
582}
583
584KScheduler::KScheduler(Core::System& system, std::size_t core_id)
585 : system(system), core_id(core_id) {
586 switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this);
587 this->state.needs_scheduling = true;
588 this->state.interrupt_task_thread_runnable = false;
589 this->state.should_count_idle = false;
590 this->state.idle_count = 0;
591 this->state.idle_thread_stack = nullptr;
592 this->state.highest_priority_thread = nullptr;
593}
594
595KScheduler::~KScheduler() = default;
596
597Thread* KScheduler::GetCurrentThread() const {
598 if (current_thread) {
599 return current_thread;
600 }
601 return idle_thread;
602}
603
604u64 KScheduler::GetLastContextSwitchTicks() const {
605 return last_context_switch_time;
606}
607
608void KScheduler::RescheduleCurrentCore() {
609 ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
610
611 auto& phys_core = system.Kernel().PhysicalCore(core_id);
612 if (phys_core.IsInterrupted()) {
613 phys_core.ClearInterrupt();
614 }
615 guard.lock();
616 if (this->state.needs_scheduling) {
617 Schedule();
618 } else {
619 guard.unlock();
620 }
621}
622
623void KScheduler::OnThreadStart() {
624 SwitchContextStep2();
625}
626
627void KScheduler::Unload(Thread* thread) {
628 if (thread) {
629 thread->SetIsRunning(false);
630 if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) {
631 system.ArmInterface(core_id).ExceptionalExit();
632 thread->SetContinuousOnSVC(false);
633 }
634 if (!thread->IsHLEThread() && !thread->HasExited()) {
635 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
636 cpu_core.SaveContext(thread->GetContext32());
637 cpu_core.SaveContext(thread->GetContext64());
638 // Save the TPIDR_EL0 system register in case it was modified.
639 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
640 cpu_core.ClearExclusiveState();
641 }
642 thread->context_guard.unlock();
643 }
644}
645
646void KScheduler::Reload(Thread* thread) {
647 if (thread) {
648 ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
649 "Thread must be runnable.");
650
651 // Cancel any outstanding wakeup events for this thread
652 thread->SetIsRunning(true);
653 thread->SetWasRunning(false);
654
655 auto* const thread_owner_process = thread->GetOwnerProcess();
656 if (thread_owner_process != nullptr) {
657 system.Kernel().MakeCurrentProcess(thread_owner_process);
658 }
659 if (!thread->IsHLEThread()) {
660 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
661 cpu_core.LoadContext(thread->GetContext32());
662 cpu_core.LoadContext(thread->GetContext64());
663 cpu_core.SetTlsAddress(thread->GetTLSAddress());
664 cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
665 cpu_core.ClearExclusiveState();
666 }
667 }
668}
669
670void KScheduler::SwitchContextStep2() {
671 // Load context of new thread
672 Reload(current_thread);
673
674 RescheduleCurrentCore();
675}
676
677void KScheduler::ScheduleImpl() {
678 Thread* previous_thread = current_thread;
679 current_thread = state.highest_priority_thread;
680
681 this->state.needs_scheduling = false;
682
683 if (current_thread == previous_thread) {
684 guard.unlock();
685 return;
686 }
687
688 Process* const previous_process = system.Kernel().CurrentProcess();
689
690 UpdateLastContextSwitchTime(previous_thread, previous_process);
691
692 // Save context for previous thread
693 Unload(previous_thread);
694
695 std::shared_ptr<Common::Fiber>* old_context;
696 if (previous_thread != nullptr) {
697 old_context = &previous_thread->GetHostContext();
698 } else {
699 old_context = &idle_thread->GetHostContext();
700 }
701 guard.unlock();
702
703 Common::Fiber::YieldTo(*old_context, switch_fiber);
704 /// When a thread wakes up, the scheduler may have changed to other in another core.
705 auto& next_scheduler = *system.Kernel().CurrentScheduler();
706 next_scheduler.SwitchContextStep2();
707}
708
709void KScheduler::OnSwitch(void* this_scheduler) {
710 KScheduler* sched = static_cast<KScheduler*>(this_scheduler);
711 sched->SwitchToCurrent();
712}
713
714void KScheduler::SwitchToCurrent() {
715 while (true) {
716 {
717 std::scoped_lock lock{guard};
718 current_thread = state.highest_priority_thread;
719 this->state.needs_scheduling = false;
720 }
721 const auto is_switch_pending = [this] {
722 std::scoped_lock lock{guard};
723 return state.needs_scheduling.load(std::memory_order_relaxed);
724 };
725 do {
726 if (current_thread != nullptr && !current_thread->IsHLEThread()) {
727 current_thread->context_guard.lock();
728 if (!current_thread->IsRunnable()) {
729 current_thread->context_guard.unlock();
730 break;
731 }
732 if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) {
733 current_thread->context_guard.unlock();
734 break;
735 }
736 }
737 std::shared_ptr<Common::Fiber>* next_context;
738 if (current_thread != nullptr) {
739 next_context = &current_thread->GetHostContext();
740 } else {
741 next_context = &idle_thread->GetHostContext();
742 }
743 Common::Fiber::YieldTo(switch_fiber, *next_context);
744 } while (!is_switch_pending());
745 }
746}
747
748void KScheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
749 const u64 prev_switch_ticks = last_context_switch_time;
750 const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
751 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
752
753 if (thread != nullptr) {
754 thread->UpdateCPUTimeTicks(update_ticks);
755 }
756
757 if (process != nullptr) {
758 process->UpdateCPUTimeTicks(update_ticks);
759 }
760
761 last_context_switch_time = most_recent_switch_ticks;
762}
763
764void KScheduler::Initialize() {
765 std::string name = "Idle Thread Id:" + std::to_string(core_id);
766 std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
767 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
768 ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
769 auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
770 nullptr, std::move(init_func), init_func_parameter);
771 idle_thread = thread_res.Unwrap().get();
772
773 {
774 KScopedSchedulerLock lock{system.Kernel()};
775 idle_thread->SetStatus(ThreadStatus::Ready);
776 }
777}
778
779KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)
780 : KScopedLock(kernel.GlobalSchedulerContext().SchedulerLock()) {}
781
782KScopedSchedulerLock::~KScopedSchedulerLock() = default;
783
784} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
new file mode 100644
index 000000000..e84abc84c
--- /dev/null
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -0,0 +1,201 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7
8#pragma once
9
10#include <atomic>
11
12#include "common/common_types.h"
13#include "common/spin_lock.h"
14#include "core/hle/kernel/global_scheduler_context.h"
15#include "core/hle/kernel/k_priority_queue.h"
16#include "core/hle/kernel/k_scheduler_lock.h"
17#include "core/hle/kernel/k_scoped_lock.h"
18
19namespace Common {
20class Fiber;
21}
22
23namespace Core {
24class System;
25}
26
27namespace Kernel {
28
29class KernelCore;
30class Process;
31class SchedulerLock;
32class Thread;
33
34class KScheduler final {
35public:
36 explicit KScheduler(Core::System& system, std::size_t core_id);
37 ~KScheduler();
38
39 /// Reschedules to the next available thread (call after current thread is suspended)
40 void RescheduleCurrentCore();
41
42 /// Reschedules cores pending reschedule, to be called on EnableScheduling.
43 static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
44 Core::EmuThreadHandle global_thread);
45
46 /// The next two are for SingleCore Only.
47 /// Unload current thread before preempting core.
48 void Unload(Thread* thread);
49
50 /// Reload current thread after core preemption.
51 void Reload(Thread* thread);
52
53 /// Gets the current running thread
54 [[nodiscard]] Thread* GetCurrentThread() const;
55
56 /// Gets the timestamp for the last context switch in ticks.
57 [[nodiscard]] u64 GetLastContextSwitchTicks() const;
58
59 [[nodiscard]] bool ContextSwitchPending() const {
60 return state.needs_scheduling.load(std::memory_order_relaxed);
61 }
62
63 void Initialize();
64
65 void OnThreadStart();
66
67 [[nodiscard]] std::shared_ptr<Common::Fiber>& ControlContext() {
68 return switch_fiber;
69 }
70
71 [[nodiscard]] const std::shared_ptr<Common::Fiber>& ControlContext() const {
72 return switch_fiber;
73 }
74
75 [[nodiscard]] u64 UpdateHighestPriorityThread(Thread* highest_thread);
76
77 /**
78 * Takes a thread and moves it to the back of the it's priority list.
79 *
80 * @note This operation can be redundant and no scheduling is changed if marked as so.
81 */
82 void YieldWithoutCoreMigration();
83
84 /**
85 * Takes a thread and moves it to the back of the it's priority list.
86 * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
87 * a better priority than the next thread in the core.
88 *
89 * @note This operation can be redundant and no scheduling is changed if marked as so.
90 */
91 void YieldWithCoreMigration();
92
93 /**
94 * Takes a thread and moves it out of the scheduling queue.
95 * and into the suggested queue. If no thread can be scheduled afterwards in that core,
96 * a suggested thread is obtained instead.
97 *
98 * @note This operation can be redundant and no scheduling is changed if marked as so.
99 */
100 void YieldToAnyThread();
101
102 /// Notify the scheduler a thread's status has changed.
103 static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state);
104
105 /// Notify the scheduler a thread's priority has changed.
106 static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
107 u32 old_priority);
108
109 /// Notify the scheduler a thread's core and/or affinity mask has changed.
110 static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
111 const KAffinityMask& old_affinity, s32 old_core);
112
113 static bool CanSchedule(KernelCore& kernel);
114 static bool IsSchedulerUpdateNeeded(const KernelCore& kernel);
115 static void SetSchedulerUpdateNeeded(KernelCore& kernel);
116 static void ClearSchedulerUpdateNeeded(KernelCore& kernel);
117 static void DisableScheduling(KernelCore& kernel);
118 static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
119 Core::EmuThreadHandle global_thread);
120 [[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
121
122private:
123 friend class GlobalSchedulerContext;
124
125 /**
126 * Takes care of selecting the new scheduled threads in three steps:
127 *
128 * 1. First a thread is selected from the top of the priority queue. If no thread
129 * is obtained then we move to step two, else we are done.
130 *
131 * 2. Second we try to get a suggested thread that's not assigned to any core or
132 * that is not the top thread in that core.
133 *
134 * 3. Third is no suggested thread is found, we do a second pass and pick a running
135 * thread in another core and swap it with its current thread.
136 *
137 * returns the cores needing scheduling.
138 */
139 [[nodiscard]] static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
140
141 [[nodiscard]] static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel);
142
143 void RotateScheduledQueue(s32 core_id, s32 priority);
144
145 void Schedule() {
146 ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
147 this->ScheduleImpl();
148 }
149
150 /// Switches the CPU's active thread context to that of the specified thread
151 void ScheduleImpl();
152
153 /// When a thread wakes up, it must run this through it's new scheduler
154 void SwitchContextStep2();
155
156 /**
157 * Called on every context switch to update the internal timestamp
158 * This also updates the running time ticks for the given thread and
159 * process using the following difference:
160 *
161 * ticks += most_recent_ticks - last_context_switch_ticks
162 *
163 * The internal tick timestamp for the scheduler is simply the
164 * most recent tick count retrieved. No special arithmetic is
165 * applied to it.
166 */
167 void UpdateLastContextSwitchTime(Thread* thread, Process* process);
168
169 static void OnSwitch(void* this_scheduler);
170 void SwitchToCurrent();
171
172 Thread* current_thread{};
173 Thread* idle_thread{};
174
175 std::shared_ptr<Common::Fiber> switch_fiber{};
176
177 struct SchedulingState {
178 std::atomic<bool> needs_scheduling;
179 bool interrupt_task_thread_runnable{};
180 bool should_count_idle{};
181 u64 idle_count{};
182 Thread* highest_priority_thread{};
183 void* idle_thread_stack{};
184 };
185
186 SchedulingState state;
187
188 Core::System& system;
189 u64 last_context_switch_time{};
190 const std::size_t core_id;
191
192 Common::SpinLock guard{};
193};
194
195class KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> {
196public:
197 explicit KScopedSchedulerLock(KernelCore& kernel);
198 ~KScopedSchedulerLock();
199};
200
201} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
new file mode 100644
index 000000000..2d675b39e
--- /dev/null
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -0,0 +1,74 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7
8#pragma once
9
10#include "common/assert.h"
11#include "common/spin_lock.h"
12#include "core/hardware_properties.h"
13
14namespace Kernel {
15
16class KernelCore;
17
18template <typename SchedulerType>
19class KAbstractSchedulerLock {
20public:
21 explicit KAbstractSchedulerLock(KernelCore& kernel) : kernel{kernel} {}
22
23 bool IsLockedByCurrentThread() const {
24 return this->owner_thread == kernel.GetCurrentEmuThreadID();
25 }
26
27 void Lock() {
28 if (this->IsLockedByCurrentThread()) {
29 // If we already own the lock, we can just increment the count.
30 ASSERT(this->lock_count > 0);
31 this->lock_count++;
32 } else {
33 // Otherwise, we want to disable scheduling and acquire the spinlock.
34 SchedulerType::DisableScheduling(kernel);
35 this->spin_lock.lock();
36
37 // For debug, ensure that our state is valid.
38 ASSERT(this->lock_count == 0);
39 ASSERT(this->owner_thread == Core::EmuThreadHandle::InvalidHandle());
40
41 // Increment count, take ownership.
42 this->lock_count = 1;
43 this->owner_thread = kernel.GetCurrentEmuThreadID();
44 }
45 }
46
47 void Unlock() {
48 ASSERT(this->IsLockedByCurrentThread());
49 ASSERT(this->lock_count > 0);
50
51 // Release an instance of the lock.
52 if ((--this->lock_count) == 0) {
53 // We're no longer going to hold the lock. Take note of what cores need scheduling.
54 const u64 cores_needing_scheduling =
55 SchedulerType::UpdateHighestPriorityThreads(kernel);
56 Core::EmuThreadHandle leaving_thread = owner_thread;
57
58 // Note that we no longer hold the lock, and unlock the spinlock.
59 this->owner_thread = Core::EmuThreadHandle::InvalidHandle();
60 this->spin_lock.unlock();
61
62 // Enable scheduling, and perform a rescheduling operation.
63 SchedulerType::EnableScheduling(kernel, cores_needing_scheduling, leaving_thread);
64 }
65 }
66
67private:
68 KernelCore& kernel;
69 Common::SpinLock spin_lock{};
70 s32 lock_count{};
71 Core::EmuThreadHandle owner_thread{Core::EmuThreadHandle::InvalidHandle()};
72};
73
74} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scoped_lock.h b/src/core/hle/kernel/k_scoped_lock.h
new file mode 100644
index 000000000..d7cc557b2
--- /dev/null
+++ b/src/core/hle/kernel/k_scoped_lock.h
@@ -0,0 +1,41 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7
8#pragma once
9
10#include "common/common_types.h"
11
12namespace Kernel {
13
14template <typename T>
15concept KLockable = !std::is_reference_v<T> && requires(T & t) {
16 { t.Lock() }
17 ->std::same_as<void>;
18 { t.Unlock() }
19 ->std::same_as<void>;
20};
21
22template <typename T>
23requires KLockable<T> class KScopedLock {
24public:
25 explicit KScopedLock(T* l) : lock_ptr(l) {
26 this->lock_ptr->Lock();
27 }
28 explicit KScopedLock(T& l) : KScopedLock(std::addressof(l)) { /* ... */
29 }
30 ~KScopedLock() {
31 this->lock_ptr->Unlock();
32 }
33
34 KScopedLock(const KScopedLock&) = delete;
35 KScopedLock(KScopedLock&&) = delete;
36
37private:
38 T* lock_ptr;
39};
40
41} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
new file mode 100644
index 000000000..2bb3817fa
--- /dev/null
+++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
@@ -0,0 +1,50 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7
8#pragma once
9
10#include "common/common_types.h"
11#include "core/hle/kernel/handle_table.h"
12#include "core/hle/kernel/kernel.h"
13#include "core/hle/kernel/thread.h"
14#include "core/hle/kernel/time_manager.h"
15
16namespace Kernel {
17
18class KScopedSchedulerLockAndSleep {
19public:
20 explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* t,
21 s64 timeout)
22 : kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) {
23 event_handle = InvalidHandle;
24
25 // Lock the scheduler.
26 kernel.GlobalSchedulerContext().scheduler_lock.Lock();
27 }
28
29 ~KScopedSchedulerLockAndSleep() {
30 // Register the sleep.
31 if (this->timeout_tick > 0) {
32 kernel.TimeManager().ScheduleTimeEvent(event_handle, this->thread, this->timeout_tick);
33 }
34
35 // Unlock the scheduler.
36 kernel.GlobalSchedulerContext().scheduler_lock.Unlock();
37 }
38
39 void CancelSleep() {
40 this->timeout_tick = 0;
41 }
42
43private:
44 KernelCore& kernel;
45 Handle& event_handle;
46 Thread* thread{};
47 s64 timeout_tick{};
48};
49
50} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 929db696d..04cae3a43 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -27,6 +27,7 @@
27#include "core/hle/kernel/client_port.h" 27#include "core/hle/kernel/client_port.h"
28#include "core/hle/kernel/errors.h" 28#include "core/hle/kernel/errors.h"
29#include "core/hle/kernel/handle_table.h" 29#include "core/hle/kernel/handle_table.h"
30#include "core/hle/kernel/k_scheduler.h"
30#include "core/hle/kernel/kernel.h" 31#include "core/hle/kernel/kernel.h"
31#include "core/hle/kernel/memory/memory_layout.h" 32#include "core/hle/kernel/memory/memory_layout.h"
32#include "core/hle/kernel/memory/memory_manager.h" 33#include "core/hle/kernel/memory/memory_manager.h"
@@ -34,7 +35,6 @@
34#include "core/hle/kernel/physical_core.h" 35#include "core/hle/kernel/physical_core.h"
35#include "core/hle/kernel/process.h" 36#include "core/hle/kernel/process.h"
36#include "core/hle/kernel/resource_limit.h" 37#include "core/hle/kernel/resource_limit.h"
37#include "core/hle/kernel/scheduler.h"
38#include "core/hle/kernel/shared_memory.h" 38#include "core/hle/kernel/shared_memory.h"
39#include "core/hle/kernel/synchronization.h" 39#include "core/hle/kernel/synchronization.h"
40#include "core/hle/kernel/thread.h" 40#include "core/hle/kernel/thread.h"
@@ -49,17 +49,18 @@ namespace Kernel {
49 49
50struct KernelCore::Impl { 50struct KernelCore::Impl {
51 explicit Impl(Core::System& system, KernelCore& kernel) 51 explicit Impl(Core::System& system, KernelCore& kernel)
52 : global_scheduler{kernel}, synchronization{system}, time_manager{system}, 52 : synchronization{system}, time_manager{system}, global_handle_table{kernel}, system{
53 global_handle_table{kernel}, system{system} {} 53 system} {}
54 54
55 void SetMulticore(bool is_multicore) { 55 void SetMulticore(bool is_multicore) {
56 this->is_multicore = is_multicore; 56 this->is_multicore = is_multicore;
57 } 57 }
58 58
59 void Initialize(KernelCore& kernel) { 59 void Initialize(KernelCore& kernel) {
60 Shutdown();
61 RegisterHostThread(); 60 RegisterHostThread();
62 61
62 global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
63
63 InitializePhysicalCores(); 64 InitializePhysicalCores();
64 InitializeSystemResourceLimit(kernel); 65 InitializeSystemResourceLimit(kernel);
65 InitializeMemoryLayout(); 66 InitializeMemoryLayout();
@@ -86,29 +87,20 @@ struct KernelCore::Impl {
86 } 87 }
87 } 88 }
88 89
89 for (std::size_t i = 0; i < cores.size(); i++) {
90 cores[i].Shutdown();
91 schedulers[i].reset();
92 }
93 cores.clear(); 90 cores.clear();
94 91
95 process_list.clear(); 92 process_list.clear();
93
96 current_process = nullptr; 94 current_process = nullptr;
97 95
98 system_resource_limit = nullptr; 96 system_resource_limit = nullptr;
99 97
100 global_handle_table.Clear(); 98 global_handle_table.Clear();
101 preemption_event = nullptr;
102 99
103 global_scheduler.Shutdown(); 100 preemption_event = nullptr;
104 101
105 named_ports.clear(); 102 named_ports.clear();
106 103
107 for (auto& core : cores) {
108 core.Shutdown();
109 }
110 cores.clear();
111
112 exclusive_monitor.reset(); 104 exclusive_monitor.reset();
113 105
114 num_host_threads = 0; 106 num_host_threads = 0;
@@ -121,7 +113,7 @@ struct KernelCore::Impl {
121 exclusive_monitor = 113 exclusive_monitor =
122 Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); 114 Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
123 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 115 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
124 schedulers[i] = std::make_unique<Kernel::Scheduler>(system, i); 116 schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i);
125 cores.emplace_back(i, system, *schedulers[i], interrupts); 117 cores.emplace_back(i, system, *schedulers[i], interrupts);
126 } 118 }
127 } 119 }
@@ -154,8 +146,8 @@ struct KernelCore::Impl {
154 preemption_event = Core::Timing::CreateEvent( 146 preemption_event = Core::Timing::CreateEvent(
155 "PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) { 147 "PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) {
156 { 148 {
157 SchedulerLock lock(kernel); 149 KScopedSchedulerLock lock(kernel);
158 global_scheduler.PreemptThreads(); 150 global_scheduler_context->PreemptThreads();
159 } 151 }
160 const auto time_interval = std::chrono::nanoseconds{ 152 const auto time_interval = std::chrono::nanoseconds{
161 Core::Timing::msToCycles(std::chrono::milliseconds(10))}; 153 Core::Timing::msToCycles(std::chrono::milliseconds(10))};
@@ -245,7 +237,7 @@ struct KernelCore::Impl {
245 if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) { 237 if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) {
246 return result; 238 return result;
247 } 239 }
248 const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler(); 240 const Kernel::KScheduler& sched = cores[result.host_handle].Scheduler();
249 const Kernel::Thread* current = sched.GetCurrentThread(); 241 const Kernel::Thread* current = sched.GetCurrentThread();
250 if (current != nullptr && !current->IsPhantomMode()) { 242 if (current != nullptr && !current->IsPhantomMode()) {
251 result.guest_handle = current->GetGlobalHandle(); 243 result.guest_handle = current->GetGlobalHandle();
@@ -314,7 +306,7 @@ struct KernelCore::Impl {
314 // Lists all processes that exist in the current session. 306 // Lists all processes that exist in the current session.
315 std::vector<std::shared_ptr<Process>> process_list; 307 std::vector<std::shared_ptr<Process>> process_list;
316 Process* current_process = nullptr; 308 Process* current_process = nullptr;
317 Kernel::GlobalScheduler global_scheduler; 309 std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
318 Kernel::Synchronization synchronization; 310 Kernel::Synchronization synchronization;
319 Kernel::TimeManager time_manager; 311 Kernel::TimeManager time_manager;
320 312
@@ -355,7 +347,7 @@ struct KernelCore::Impl {
355 347
356 std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; 348 std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
357 std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{}; 349 std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
358 std::array<std::unique_ptr<Kernel::Scheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; 350 std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
359 351
360 bool is_multicore{}; 352 bool is_multicore{};
361 std::thread::id single_core_thread_id{}; 353 std::thread::id single_core_thread_id{};
@@ -415,19 +407,19 @@ const std::vector<std::shared_ptr<Process>>& KernelCore::GetProcessList() const
415 return impl->process_list; 407 return impl->process_list;
416} 408}
417 409
418Kernel::GlobalScheduler& KernelCore::GlobalScheduler() { 410Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() {
419 return impl->global_scheduler; 411 return *impl->global_scheduler_context;
420} 412}
421 413
422const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const { 414const Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() const {
423 return impl->global_scheduler; 415 return *impl->global_scheduler_context;
424} 416}
425 417
426Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) { 418Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) {
427 return *impl->schedulers[id]; 419 return *impl->schedulers[id];
428} 420}
429 421
430const Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) const { 422const Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) const {
431 return *impl->schedulers[id]; 423 return *impl->schedulers[id];
432} 424}
433 425
@@ -451,16 +443,13 @@ const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
451 return impl->cores[core_id]; 443 return impl->cores[core_id];
452} 444}
453 445
454Kernel::Scheduler& KernelCore::CurrentScheduler() { 446Kernel::KScheduler* KernelCore::CurrentScheduler() {
455 u32 core_id = impl->GetCurrentHostThreadID(); 447 u32 core_id = impl->GetCurrentHostThreadID();
456 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); 448 if (core_id >= Core::Hardware::NUM_CPU_CORES) {
457 return *impl->schedulers[core_id]; 449 // This is expected when called from not a guest thread
458} 450 return {};
459 451 }
460const Kernel::Scheduler& KernelCore::CurrentScheduler() const { 452 return impl->schedulers[core_id].get();
461 u32 core_id = impl->GetCurrentHostThreadID();
462 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
463 return *impl->schedulers[core_id];
464} 453}
465 454
466std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() { 455std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() {
@@ -623,7 +612,7 @@ const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const {
623void KernelCore::Suspend(bool in_suspention) { 612void KernelCore::Suspend(bool in_suspention) {
624 const bool should_suspend = exception_exited || in_suspention; 613 const bool should_suspend = exception_exited || in_suspention;
625 { 614 {
626 SchedulerLock lock(*this); 615 KScopedSchedulerLock lock(*this);
627 ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep; 616 ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep;
628 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 617 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
629 impl->suspend_threads[i]->SetStatus(status); 618 impl->suspend_threads[i]->SetStatus(status);
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index a73a93039..5846c3f39 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -35,12 +35,12 @@ class SlabHeap;
35 35
36class AddressArbiter; 36class AddressArbiter;
37class ClientPort; 37class ClientPort;
38class GlobalScheduler; 38class GlobalSchedulerContext;
39class HandleTable; 39class HandleTable;
40class PhysicalCore; 40class PhysicalCore;
41class Process; 41class Process;
42class ResourceLimit; 42class ResourceLimit;
43class Scheduler; 43class KScheduler;
44class SharedMemory; 44class SharedMemory;
45class Synchronization; 45class Synchronization;
46class Thread; 46class Thread;
@@ -102,16 +102,16 @@ public:
102 const std::vector<std::shared_ptr<Process>>& GetProcessList() const; 102 const std::vector<std::shared_ptr<Process>>& GetProcessList() const;
103 103
104 /// Gets the sole instance of the global scheduler 104 /// Gets the sole instance of the global scheduler
105 Kernel::GlobalScheduler& GlobalScheduler(); 105 Kernel::GlobalSchedulerContext& GlobalSchedulerContext();
106 106
107 /// Gets the sole instance of the global scheduler 107 /// Gets the sole instance of the global scheduler
108 const Kernel::GlobalScheduler& GlobalScheduler() const; 108 const Kernel::GlobalSchedulerContext& GlobalSchedulerContext() const;
109 109
110 /// Gets the sole instance of the Scheduler assoviated with cpu core 'id' 110 /// Gets the sole instance of the Scheduler assoviated with cpu core 'id'
111 Kernel::Scheduler& Scheduler(std::size_t id); 111 Kernel::KScheduler& Scheduler(std::size_t id);
112 112
113 /// Gets the sole instance of the Scheduler assoviated with cpu core 'id' 113 /// Gets the sole instance of the Scheduler assoviated with cpu core 'id'
114 const Kernel::Scheduler& Scheduler(std::size_t id) const; 114 const Kernel::KScheduler& Scheduler(std::size_t id) const;
115 115
116 /// Gets the an instance of the respective physical CPU core. 116 /// Gets the an instance of the respective physical CPU core.
117 Kernel::PhysicalCore& PhysicalCore(std::size_t id); 117 Kernel::PhysicalCore& PhysicalCore(std::size_t id);
@@ -120,10 +120,7 @@ public:
120 const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; 120 const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
121 121
122 /// Gets the sole instance of the Scheduler at the current running core. 122 /// Gets the sole instance of the Scheduler at the current running core.
123 Kernel::Scheduler& CurrentScheduler(); 123 Kernel::KScheduler* CurrentScheduler();
124
125 /// Gets the sole instance of the Scheduler at the current running core.
126 const Kernel::Scheduler& CurrentScheduler() const;
127 124
128 /// Gets the an instance of the current physical CPU core. 125 /// Gets the an instance of the current physical CPU core.
129 Kernel::PhysicalCore& CurrentPhysicalCore(); 126 Kernel::PhysicalCore& CurrentPhysicalCore();
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
index 8f6c944d1..4f8075e0e 100644
--- a/src/core/hle/kernel/mutex.cpp
+++ b/src/core/hle/kernel/mutex.cpp
@@ -11,11 +11,11 @@
11#include "core/core.h" 11#include "core/core.h"
12#include "core/hle/kernel/errors.h" 12#include "core/hle/kernel/errors.h"
13#include "core/hle/kernel/handle_table.h" 13#include "core/hle/kernel/handle_table.h"
14#include "core/hle/kernel/k_scheduler.h"
14#include "core/hle/kernel/kernel.h" 15#include "core/hle/kernel/kernel.h"
15#include "core/hle/kernel/mutex.h" 16#include "core/hle/kernel/mutex.h"
16#include "core/hle/kernel/object.h" 17#include "core/hle/kernel/object.h"
17#include "core/hle/kernel/process.h" 18#include "core/hle/kernel/process.h"
18#include "core/hle/kernel/scheduler.h"
19#include "core/hle/kernel/thread.h" 19#include "core/hle/kernel/thread.h"
20#include "core/hle/result.h" 20#include "core/hle/result.h"
21#include "core/memory.h" 21#include "core/memory.h"
@@ -73,9 +73,9 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
73 73
74 auto& kernel = system.Kernel(); 74 auto& kernel = system.Kernel();
75 std::shared_ptr<Thread> current_thread = 75 std::shared_ptr<Thread> current_thread =
76 SharedFrom(kernel.CurrentScheduler().GetCurrentThread()); 76 SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
77 { 77 {
78 SchedulerLock lock(kernel); 78 KScopedSchedulerLock lock(kernel);
79 // The mutex address must be 4-byte aligned 79 // The mutex address must be 4-byte aligned
80 if ((address % sizeof(u32)) != 0) { 80 if ((address % sizeof(u32)) != 0) {
81 return ERR_INVALID_ADDRESS; 81 return ERR_INVALID_ADDRESS;
@@ -114,7 +114,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
114 } 114 }
115 115
116 { 116 {
117 SchedulerLock lock(kernel); 117 KScopedSchedulerLock lock(kernel);
118 auto* owner = current_thread->GetLockOwner(); 118 auto* owner = current_thread->GetLockOwner();
119 if (owner != nullptr) { 119 if (owner != nullptr) {
120 owner->RemoveMutexWaiter(current_thread); 120 owner->RemoveMutexWaiter(current_thread);
@@ -153,10 +153,10 @@ std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thr
153 153
154ResultCode Mutex::Release(VAddr address) { 154ResultCode Mutex::Release(VAddr address) {
155 auto& kernel = system.Kernel(); 155 auto& kernel = system.Kernel();
156 SchedulerLock lock(kernel); 156 KScopedSchedulerLock lock(kernel);
157 157
158 std::shared_ptr<Thread> current_thread = 158 std::shared_ptr<Thread> current_thread =
159 SharedFrom(kernel.CurrentScheduler().GetCurrentThread()); 159 SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
160 160
161 auto [result, new_owner] = Unlock(current_thread, address); 161 auto [result, new_owner] = Unlock(current_thread, address);
162 162
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index 50aca5752..7fea45f96 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -7,14 +7,14 @@
7#include "core/arm/dynarmic/arm_dynarmic_32.h" 7#include "core/arm/dynarmic/arm_dynarmic_32.h"
8#include "core/arm/dynarmic/arm_dynarmic_64.h" 8#include "core/arm/dynarmic/arm_dynarmic_64.h"
9#include "core/core.h" 9#include "core/core.h"
10#include "core/hle/kernel/k_scheduler.h"
10#include "core/hle/kernel/kernel.h" 11#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/physical_core.h" 12#include "core/hle/kernel/physical_core.h"
12#include "core/hle/kernel/scheduler.h"
13 13
14namespace Kernel { 14namespace Kernel {
15 15
16PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system, 16PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system,
17 Kernel::Scheduler& scheduler, Core::CPUInterrupts& interrupts) 17 Kernel::KScheduler& scheduler, Core::CPUInterrupts& interrupts)
18 : core_index{core_index}, system{system}, scheduler{scheduler}, 18 : core_index{core_index}, system{system}, scheduler{scheduler},
19 interrupts{interrupts}, guard{std::make_unique<Common::SpinLock>()} {} 19 interrupts{interrupts}, guard{std::make_unique<Common::SpinLock>()} {}
20 20
@@ -43,10 +43,6 @@ void PhysicalCore::Idle() {
43 interrupts[core_index].AwaitInterrupt(); 43 interrupts[core_index].AwaitInterrupt();
44} 44}
45 45
46void PhysicalCore::Shutdown() {
47 scheduler.Shutdown();
48}
49
50bool PhysicalCore::IsInterrupted() const { 46bool PhysicalCore::IsInterrupted() const {
51 return interrupts[core_index].IsInterrupted(); 47 return interrupts[core_index].IsInterrupted();
52} 48}
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h
index 801d24c28..f2b0911aa 100644
--- a/src/core/hle/kernel/physical_core.h
+++ b/src/core/hle/kernel/physical_core.h
@@ -15,7 +15,7 @@ class SpinLock;
15} 15}
16 16
17namespace Kernel { 17namespace Kernel {
18class Scheduler; 18class KScheduler;
19} // namespace Kernel 19} // namespace Kernel
20 20
21namespace Core { 21namespace Core {
@@ -28,7 +28,7 @@ namespace Kernel {
28 28
29class PhysicalCore { 29class PhysicalCore {
30public: 30public:
31 PhysicalCore(std::size_t core_index, Core::System& system, Kernel::Scheduler& scheduler, 31 PhysicalCore(std::size_t core_index, Core::System& system, Kernel::KScheduler& scheduler,
32 Core::CPUInterrupts& interrupts); 32 Core::CPUInterrupts& interrupts);
33 ~PhysicalCore(); 33 ~PhysicalCore();
34 34
@@ -55,9 +55,6 @@ public:
55 /// Check if this core is interrupted 55 /// Check if this core is interrupted
56 bool IsInterrupted() const; 56 bool IsInterrupted() const;
57 57
58 // Shutdown this physical core.
59 void Shutdown();
60
61 bool IsInitialized() const { 58 bool IsInitialized() const {
62 return arm_interface != nullptr; 59 return arm_interface != nullptr;
63 } 60 }
@@ -82,18 +79,18 @@ public:
82 return core_index; 79 return core_index;
83 } 80 }
84 81
85 Kernel::Scheduler& Scheduler() { 82 Kernel::KScheduler& Scheduler() {
86 return scheduler; 83 return scheduler;
87 } 84 }
88 85
89 const Kernel::Scheduler& Scheduler() const { 86 const Kernel::KScheduler& Scheduler() const {
90 return scheduler; 87 return scheduler;
91 } 88 }
92 89
93private: 90private:
94 const std::size_t core_index; 91 const std::size_t core_index;
95 Core::System& system; 92 Core::System& system;
96 Kernel::Scheduler& scheduler; 93 Kernel::KScheduler& scheduler;
97 Core::CPUInterrupts& interrupts; 94 Core::CPUInterrupts& interrupts;
98 std::unique_ptr<Common::SpinLock> guard; 95 std::unique_ptr<Common::SpinLock> guard;
99 std::unique_ptr<Core::ARM_Interface> arm_interface; 96 std::unique_ptr<Core::ARM_Interface> arm_interface;
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index b17529dee..b905b486a 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -15,13 +15,13 @@
15#include "core/file_sys/program_metadata.h" 15#include "core/file_sys/program_metadata.h"
16#include "core/hle/kernel/code_set.h" 16#include "core/hle/kernel/code_set.h"
17#include "core/hle/kernel/errors.h" 17#include "core/hle/kernel/errors.h"
18#include "core/hle/kernel/k_scheduler.h"
18#include "core/hle/kernel/kernel.h" 19#include "core/hle/kernel/kernel.h"
19#include "core/hle/kernel/memory/memory_block_manager.h" 20#include "core/hle/kernel/memory/memory_block_manager.h"
20#include "core/hle/kernel/memory/page_table.h" 21#include "core/hle/kernel/memory/page_table.h"
21#include "core/hle/kernel/memory/slab_heap.h" 22#include "core/hle/kernel/memory/slab_heap.h"
22#include "core/hle/kernel/process.h" 23#include "core/hle/kernel/process.h"
23#include "core/hle/kernel/resource_limit.h" 24#include "core/hle/kernel/resource_limit.h"
24#include "core/hle/kernel/scheduler.h"
25#include "core/hle/kernel/thread.h" 25#include "core/hle/kernel/thread.h"
26#include "core/hle/lock.h" 26#include "core/hle/lock.h"
27#include "core/memory.h" 27#include "core/memory.h"
@@ -54,7 +54,7 @@ void SetupMainThread(Core::System& system, Process& owner_process, u32 priority,
54 auto& kernel = system.Kernel(); 54 auto& kernel = system.Kernel();
55 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires 55 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
56 { 56 {
57 SchedulerLock lock{kernel}; 57 KScopedSchedulerLock lock{kernel};
58 thread->SetStatus(ThreadStatus::Ready); 58 thread->SetStatus(ThreadStatus::Ready);
59 } 59 }
60} 60}
@@ -213,7 +213,7 @@ void Process::UnregisterThread(const Thread* thread) {
213} 213}
214 214
215ResultCode Process::ClearSignalState() { 215ResultCode Process::ClearSignalState() {
216 SchedulerLock lock(system.Kernel()); 216 KScopedSchedulerLock lock(system.Kernel());
217 if (status == ProcessStatus::Exited) { 217 if (status == ProcessStatus::Exited) {
218 LOG_ERROR(Kernel, "called on a terminated process instance."); 218 LOG_ERROR(Kernel, "called on a terminated process instance.");
219 return ERR_INVALID_STATE; 219 return ERR_INVALID_STATE;
@@ -314,7 +314,7 @@ void Process::PrepareForTermination() {
314 if (thread->GetOwnerProcess() != this) 314 if (thread->GetOwnerProcess() != this)
315 continue; 315 continue;
316 316
317 if (thread.get() == system.CurrentScheduler().GetCurrentThread()) 317 if (thread.get() == kernel.CurrentScheduler()->GetCurrentThread())
318 continue; 318 continue;
319 319
320 // TODO(Subv): When are the other running/ready threads terminated? 320 // TODO(Subv): When are the other running/ready threads terminated?
@@ -325,7 +325,7 @@ void Process::PrepareForTermination() {
325 } 325 }
326 }; 326 };
327 327
328 stop_threads(system.GlobalScheduler().GetThreadList()); 328 stop_threads(system.GlobalSchedulerContext().GetThreadList());
329 329
330 FreeTLSRegion(tls_region_address); 330 FreeTLSRegion(tls_region_address);
331 tls_region_address = 0; 331 tls_region_address = 0;
@@ -347,7 +347,7 @@ static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
347} 347}
348 348
349VAddr Process::CreateTLSRegion() { 349VAddr Process::CreateTLSRegion() {
350 SchedulerLock lock(system.Kernel()); 350 KScopedSchedulerLock lock(system.Kernel());
351 if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)}; 351 if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
352 tls_page_iter != tls_pages.cend()) { 352 tls_page_iter != tls_pages.cend()) {
353 return *tls_page_iter->ReserveSlot(); 353 return *tls_page_iter->ReserveSlot();
@@ -378,7 +378,7 @@ VAddr Process::CreateTLSRegion() {
378} 378}
379 379
380void Process::FreeTLSRegion(VAddr tls_address) { 380void Process::FreeTLSRegion(VAddr tls_address) {
381 SchedulerLock lock(system.Kernel()); 381 KScopedSchedulerLock lock(system.Kernel());
382 const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE); 382 const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
383 auto iter = 383 auto iter =
384 std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) { 384 std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index f45cb5674..e412e58aa 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -216,6 +216,16 @@ public:
216 total_process_running_time_ticks += ticks; 216 total_process_running_time_ticks += ticks;
217 } 217 }
218 218
219 /// Gets the process schedule count, used for thread yelding
220 s64 GetScheduledCount() const {
221 return schedule_count;
222 }
223
224 /// Increments the process schedule count, used for thread yielding.
225 void IncrementScheduledCount() {
226 ++schedule_count;
227 }
228
219 /// Gets 8 bytes of random data for svcGetInfo RandomEntropy 229 /// Gets 8 bytes of random data for svcGetInfo RandomEntropy
220 u64 GetRandomEntropy(std::size_t index) const { 230 u64 GetRandomEntropy(std::size_t index) const {
221 return random_entropy.at(index); 231 return random_entropy.at(index);
@@ -397,6 +407,9 @@ private:
397 /// Name of this process 407 /// Name of this process
398 std::string name; 408 std::string name;
399 409
410 /// Schedule count of this process
411 s64 schedule_count{};
412
400 /// System context 413 /// System context
401 Core::System& system; 414 Core::System& system;
402}; 415};
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index 6e286419e..cea262ce0 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -6,10 +6,10 @@
6#include "common/assert.h" 6#include "common/assert.h"
7#include "common/logging/log.h" 7#include "common/logging/log.h"
8#include "core/hle/kernel/errors.h" 8#include "core/hle/kernel/errors.h"
9#include "core/hle/kernel/k_scheduler.h"
9#include "core/hle/kernel/kernel.h" 10#include "core/hle/kernel/kernel.h"
10#include "core/hle/kernel/object.h" 11#include "core/hle/kernel/object.h"
11#include "core/hle/kernel/readable_event.h" 12#include "core/hle/kernel/readable_event.h"
12#include "core/hle/kernel/scheduler.h"
13#include "core/hle/kernel/thread.h" 13#include "core/hle/kernel/thread.h"
14 14
15namespace Kernel { 15namespace Kernel {
@@ -39,7 +39,7 @@ void ReadableEvent::Clear() {
39} 39}
40 40
41ResultCode ReadableEvent::Reset() { 41ResultCode ReadableEvent::Reset() {
42 SchedulerLock lock(kernel); 42 KScopedSchedulerLock lock(kernel);
43 if (!is_signaled) { 43 if (!is_signaled) {
44 LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}", 44 LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}",
45 GetObjectId(), GetTypeName(), GetName()); 45 GetObjectId(), GetTypeName(), GetName());
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
deleted file mode 100644
index 5c63b0b4a..000000000
--- a/src/core/hle/kernel/scheduler.cpp
+++ /dev/null
@@ -1,819 +0,0 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4//
5// SelectThreads, Yield functions originally by TuxSH.
6// licensed under GPLv2 or later under exception provided by the author.
7
8#include <algorithm>
9#include <mutex>
10#include <set>
11#include <unordered_set>
12#include <utility>
13
14#include "common/assert.h"
15#include "common/bit_util.h"
16#include "common/fiber.h"
17#include "common/logging/log.h"
18#include "core/arm/arm_interface.h"
19#include "core/core.h"
20#include "core/core_timing.h"
21#include "core/cpu_manager.h"
22#include "core/hle/kernel/kernel.h"
23#include "core/hle/kernel/physical_core.h"
24#include "core/hle/kernel/process.h"
25#include "core/hle/kernel/scheduler.h"
26#include "core/hle/kernel/time_manager.h"
27
28namespace Kernel {
29
30GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {}
31
32GlobalScheduler::~GlobalScheduler() = default;
33
34void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) {
35 std::scoped_lock lock{global_list_guard};
36 thread_list.push_back(std::move(thread));
37}
38
39void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) {
40 std::scoped_lock lock{global_list_guard};
41 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
42 thread_list.end());
43}
44
45u32 GlobalScheduler::SelectThreads() {
46 ASSERT(is_locked);
47 const auto update_thread = [](Thread* thread, Scheduler& sched) {
48 std::scoped_lock lock{sched.guard};
49 if (thread != sched.selected_thread_set.get()) {
50 if (thread == nullptr) {
51 ++sched.idle_selection_count;
52 }
53 sched.selected_thread_set = SharedFrom(thread);
54 }
55 const bool reschedule_pending =
56 sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread);
57 sched.is_context_switch_pending = reschedule_pending;
58 std::atomic_thread_fence(std::memory_order_seq_cst);
59 return reschedule_pending;
60 };
61 if (!is_reselection_pending.load()) {
62 return 0;
63 }
64 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> top_threads{};
65
66 u32 idle_cores{};
67
68 // Step 1: Get top thread in schedule queue.
69 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
70 Thread* top_thread =
71 scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
72 if (top_thread != nullptr) {
73 // TODO(Blinkhawk): Implement Thread Pinning
74 } else {
75 idle_cores |= (1U << core);
76 }
77 top_threads[core] = top_thread;
78 }
79
80 while (idle_cores != 0) {
81 u32 core_id = Common::CountTrailingZeroes32(idle_cores);
82
83 if (!suggested_queue[core_id].empty()) {
84 std::array<s32, Core::Hardware::NUM_CPU_CORES> migration_candidates{};
85 std::size_t num_candidates = 0;
86 auto iter = suggested_queue[core_id].begin();
87 Thread* suggested = nullptr;
88 // Step 2: Try selecting a suggested thread.
89 while (iter != suggested_queue[core_id].end()) {
90 suggested = *iter;
91 iter++;
92 s32 suggested_core_id = suggested->GetProcessorID();
93 Thread* top_thread =
94 suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr;
95 if (top_thread != suggested) {
96 if (top_thread != nullptr &&
97 top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) {
98 suggested = nullptr;
99 break;
100 // There's a too high thread to do core migration, cancel
101 }
102 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested);
103 break;
104 }
105 suggested = nullptr;
106 migration_candidates[num_candidates++] = suggested_core_id;
107 }
108 // Step 3: Select a suggested thread from another core
109 if (suggested == nullptr) {
110 for (std::size_t i = 0; i < num_candidates; i++) {
111 s32 candidate_core = migration_candidates[i];
112 suggested = top_threads[candidate_core];
113 auto it = scheduled_queue[candidate_core].begin();
114 it++;
115 Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr;
116 if (next != nullptr) {
117 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id),
118 suggested);
119 top_threads[candidate_core] = next;
120 break;
121 } else {
122 suggested = nullptr;
123 }
124 }
125 }
126 top_threads[core_id] = suggested;
127 }
128
129 idle_cores &= ~(1U << core_id);
130 }
131 u32 cores_needing_context_switch{};
132 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
133 Scheduler& sched = kernel.Scheduler(core);
134 ASSERT(top_threads[core] == nullptr ||
135 static_cast<u32>(top_threads[core]->GetProcessorID()) == core);
136 if (update_thread(top_threads[core], sched)) {
137 cores_needing_context_switch |= (1U << core);
138 }
139 }
140 return cores_needing_context_switch;
141}
142
143bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
144 ASSERT(is_locked);
145 // Note: caller should use critical section, etc.
146 if (!yielding_thread->IsRunnable()) {
147 // Normally this case shouldn't happen except for SetThreadActivity.
148 is_reselection_pending.store(true, std::memory_order_release);
149 return false;
150 }
151 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
152 const u32 priority = yielding_thread->GetPriority();
153
154 // Yield the thread
155 Reschedule(priority, core_id, yielding_thread);
156 const Thread* const winner = scheduled_queue[core_id].front();
157 if (kernel.GetCurrentHostThreadID() != core_id) {
158 is_reselection_pending.store(true, std::memory_order_release);
159 }
160
161 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
162}
163
164bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
165 ASSERT(is_locked);
166 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
167 // etc.
168 if (!yielding_thread->IsRunnable()) {
169 // Normally this case shouldn't happen except for SetThreadActivity.
170 is_reselection_pending.store(true, std::memory_order_release);
171 return false;
172 }
173 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
174 const u32 priority = yielding_thread->GetPriority();
175
176 // Yield the thread
177 Reschedule(priority, core_id, yielding_thread);
178
179 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
180 for (std::size_t i = 0; i < current_threads.size(); i++) {
181 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
182 }
183
184 Thread* next_thread = scheduled_queue[core_id].front(priority);
185 Thread* winner = nullptr;
186 for (auto& thread : suggested_queue[core_id]) {
187 const s32 source_core = thread->GetProcessorID();
188 if (source_core >= 0) {
189 if (current_threads[source_core] != nullptr) {
190 if (thread == current_threads[source_core] ||
191 current_threads[source_core]->GetPriority() < min_regular_priority) {
192 continue;
193 }
194 }
195 }
196 if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() ||
197 next_thread->GetPriority() < thread->GetPriority()) {
198 if (thread->GetPriority() <= priority) {
199 winner = thread;
200 break;
201 }
202 }
203 }
204
205 if (winner != nullptr) {
206 if (winner != yielding_thread) {
207 TransferToCore(winner->GetPriority(), s32(core_id), winner);
208 }
209 } else {
210 winner = next_thread;
211 }
212
213 if (kernel.GetCurrentHostThreadID() != core_id) {
214 is_reselection_pending.store(true, std::memory_order_release);
215 }
216
217 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
218}
219
220bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
221 ASSERT(is_locked);
222 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
223 // etc.
224 if (!yielding_thread->IsRunnable()) {
225 // Normally this case shouldn't happen except for SetThreadActivity.
226 is_reselection_pending.store(true, std::memory_order_release);
227 return false;
228 }
229 Thread* winner = nullptr;
230 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
231
232 // Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead
233 TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread);
234
235 // If the core is idle, perform load balancing, excluding the threads that have just used this
236 // function...
237 if (scheduled_queue[core_id].empty()) {
238 // Here, "current_threads" is calculated after the ""yield"", unlike yield -1
239 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
240 for (std::size_t i = 0; i < current_threads.size(); i++) {
241 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
242 }
243 for (auto& thread : suggested_queue[core_id]) {
244 const s32 source_core = thread->GetProcessorID();
245 if (source_core < 0 || thread == current_threads[source_core]) {
246 continue;
247 }
248 if (current_threads[source_core] == nullptr ||
249 current_threads[source_core]->GetPriority() >= min_regular_priority) {
250 winner = thread;
251 }
252 break;
253 }
254 if (winner != nullptr) {
255 if (winner != yielding_thread) {
256 TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner);
257 }
258 } else {
259 winner = yielding_thread;
260 }
261 } else {
262 winner = scheduled_queue[core_id].front();
263 }
264
265 if (kernel.GetCurrentHostThreadID() != core_id) {
266 is_reselection_pending.store(true, std::memory_order_release);
267 }
268
269 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
270}
271
272void GlobalScheduler::PreemptThreads() {
273 ASSERT(is_locked);
274 for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
275 const u32 priority = preemption_priorities[core_id];
276
277 if (scheduled_queue[core_id].size(priority) > 0) {
278 if (scheduled_queue[core_id].size(priority) > 1) {
279 scheduled_queue[core_id].front(priority)->IncrementYieldCount();
280 }
281 scheduled_queue[core_id].yield(priority);
282 if (scheduled_queue[core_id].size(priority) > 1) {
283 scheduled_queue[core_id].front(priority)->IncrementYieldCount();
284 }
285 }
286
287 Thread* current_thread =
288 scheduled_queue[core_id].empty() ? nullptr : scheduled_queue[core_id].front();
289 Thread* winner = nullptr;
290 for (auto& thread : suggested_queue[core_id]) {
291 const s32 source_core = thread->GetProcessorID();
292 if (thread->GetPriority() != priority) {
293 continue;
294 }
295 if (source_core >= 0) {
296 Thread* next_thread = scheduled_queue[source_core].empty()
297 ? nullptr
298 : scheduled_queue[source_core].front();
299 if (next_thread != nullptr && next_thread->GetPriority() < 2) {
300 break;
301 }
302 if (next_thread == thread) {
303 continue;
304 }
305 }
306 if (current_thread != nullptr &&
307 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
308 winner = thread;
309 break;
310 }
311 }
312
313 if (winner != nullptr) {
314 TransferToCore(winner->GetPriority(), s32(core_id), winner);
315 current_thread =
316 winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread;
317 }
318
319 if (current_thread != nullptr && current_thread->GetPriority() > priority) {
320 for (auto& thread : suggested_queue[core_id]) {
321 const s32 source_core = thread->GetProcessorID();
322 if (thread->GetPriority() < priority) {
323 continue;
324 }
325 if (source_core >= 0) {
326 Thread* next_thread = scheduled_queue[source_core].empty()
327 ? nullptr
328 : scheduled_queue[source_core].front();
329 if (next_thread != nullptr && next_thread->GetPriority() < 2) {
330 break;
331 }
332 if (next_thread == thread) {
333 continue;
334 }
335 }
336 if (current_thread != nullptr &&
337 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
338 winner = thread;
339 break;
340 }
341 }
342
343 if (winner != nullptr) {
344 TransferToCore(winner->GetPriority(), s32(core_id), winner);
345 current_thread = winner;
346 }
347 }
348
349 is_reselection_pending.store(true, std::memory_order_release);
350 }
351}
352
353void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
354 Core::EmuThreadHandle global_thread) {
355 u32 current_core = global_thread.host_handle;
356 bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
357 (current_core < Core::Hardware::NUM_CPU_CORES);
358 while (cores_pending_reschedule != 0) {
359 u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule);
360 ASSERT(core < Core::Hardware::NUM_CPU_CORES);
361 if (!must_context_switch || core != current_core) {
362 auto& phys_core = kernel.PhysicalCore(core);
363 phys_core.Interrupt();
364 } else {
365 must_context_switch = true;
366 }
367 cores_pending_reschedule &= ~(1U << core);
368 }
369 if (must_context_switch) {
370 auto& core_scheduler = kernel.CurrentScheduler();
371 kernel.ExitSVCProfile();
372 core_scheduler.TryDoContextSwitch();
373 kernel.EnterSVCProfile();
374 }
375}
376
377void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) {
378 ASSERT(is_locked);
379 suggested_queue[core].add(thread, priority);
380}
381
382void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) {
383 ASSERT(is_locked);
384 suggested_queue[core].remove(thread, priority);
385}
386
387void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) {
388 ASSERT(is_locked);
389 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
390 scheduled_queue[core].add(thread, priority);
391}
392
393void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) {
394 ASSERT(is_locked);
395 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
396 scheduled_queue[core].add(thread, priority, false);
397}
398
399void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) {
400 ASSERT(is_locked);
401 scheduled_queue[core].remove(thread, priority);
402 scheduled_queue[core].add(thread, priority);
403}
404
405void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) {
406 ASSERT(is_locked);
407 scheduled_queue[core].remove(thread, priority);
408}
409
410void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
411 ASSERT(is_locked);
412 const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
413 const s32 source_core = thread->GetProcessorID();
414 if (source_core == destination_core || !schedulable) {
415 return;
416 }
417 thread->SetProcessorID(destination_core);
418 if (source_core >= 0) {
419 Unschedule(priority, static_cast<u32>(source_core), thread);
420 }
421 if (destination_core >= 0) {
422 Unsuggest(priority, static_cast<u32>(destination_core), thread);
423 Schedule(priority, static_cast<u32>(destination_core), thread);
424 }
425 if (source_core >= 0) {
426 Suggest(priority, static_cast<u32>(source_core), thread);
427 }
428}
429
430bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread,
431 const Thread* winner) {
432 if (current_thread == winner) {
433 current_thread->IncrementYieldCount();
434 return true;
435 } else {
436 is_reselection_pending.store(true, std::memory_order_release);
437 return false;
438 }
439}
440
441void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) {
442 if (old_flags == thread->scheduling_state) {
443 return;
444 }
445 ASSERT(is_locked);
446
447 if (old_flags == static_cast<u32>(ThreadSchedStatus::Runnable)) {
448 // In this case the thread was running, now it's pausing/exitting
449 if (thread->processor_id >= 0) {
450 Unschedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
451 }
452
453 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
454 if (core != static_cast<u32>(thread->processor_id) &&
455 ((thread->affinity_mask >> core) & 1) != 0) {
456 Unsuggest(thread->current_priority, core, thread);
457 }
458 }
459 } else if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
460 // The thread is now set to running from being stopped
461 if (thread->processor_id >= 0) {
462 Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
463 }
464
465 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
466 if (core != static_cast<u32>(thread->processor_id) &&
467 ((thread->affinity_mask >> core) & 1) != 0) {
468 Suggest(thread->current_priority, core, thread);
469 }
470 }
471 }
472
473 SetReselectionPending();
474}
475
476void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priority) {
477 if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable)) {
478 return;
479 }
480 ASSERT(is_locked);
481 if (thread->processor_id >= 0) {
482 Unschedule(old_priority, static_cast<u32>(thread->processor_id), thread);
483 }
484
485 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
486 if (core != static_cast<u32>(thread->processor_id) &&
487 ((thread->affinity_mask >> core) & 1) != 0) {
488 Unsuggest(old_priority, core, thread);
489 }
490 }
491
492 if (thread->processor_id >= 0) {
493 if (thread == kernel.CurrentScheduler().GetCurrentThread()) {
494 SchedulePrepend(thread->current_priority, static_cast<u32>(thread->processor_id),
495 thread);
496 } else {
497 Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
498 }
499 }
500
501 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
502 if (core != static_cast<u32>(thread->processor_id) &&
503 ((thread->affinity_mask >> core) & 1) != 0) {
504 Suggest(thread->current_priority, core, thread);
505 }
506 }
507 thread->IncrementYieldCount();
508 SetReselectionPending();
509}
510
511void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask,
512 s32 old_core) {
513 if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable) ||
514 thread->current_priority >= THREADPRIO_COUNT) {
515 return;
516 }
517 ASSERT(is_locked);
518
519 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
520 if (((old_affinity_mask >> core) & 1) != 0) {
521 if (core == static_cast<u32>(old_core)) {
522 Unschedule(thread->current_priority, core, thread);
523 } else {
524 Unsuggest(thread->current_priority, core, thread);
525 }
526 }
527 }
528
529 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
530 if (((thread->affinity_mask >> core) & 1) != 0) {
531 if (core == static_cast<u32>(thread->processor_id)) {
532 Schedule(thread->current_priority, core, thread);
533 } else {
534 Suggest(thread->current_priority, core, thread);
535 }
536 }
537 }
538
539 thread->IncrementYieldCount();
540 SetReselectionPending();
541}
542
543void GlobalScheduler::Shutdown() {
544 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
545 scheduled_queue[core].clear();
546 suggested_queue[core].clear();
547 }
548 thread_list.clear();
549}
550
551void GlobalScheduler::Lock() {
552 Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID();
553 ASSERT(!current_thread.IsInvalid());
554 if (current_thread == current_owner) {
555 ++scope_lock;
556 } else {
557 inner_lock.lock();
558 is_locked = true;
559 current_owner = current_thread;
560 ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle());
561 scope_lock = 1;
562 }
563}
564
565void GlobalScheduler::Unlock() {
566 if (--scope_lock != 0) {
567 ASSERT(scope_lock > 0);
568 return;
569 }
570 u32 cores_pending_reschedule = SelectThreads();
571 Core::EmuThreadHandle leaving_thread = current_owner;
572 current_owner = Core::EmuThreadHandle::InvalidHandle();
573 scope_lock = 1;
574 is_locked = false;
575 inner_lock.unlock();
576 EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread);
577}
578
579Scheduler::Scheduler(Core::System& system, std::size_t core_id) : system(system), core_id(core_id) {
580 switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this);
581}
582
583Scheduler::~Scheduler() = default;
584
585bool Scheduler::HaveReadyThreads() const {
586 return system.GlobalScheduler().HaveReadyThreads(core_id);
587}
588
589Thread* Scheduler::GetCurrentThread() const {
590 if (current_thread) {
591 return current_thread.get();
592 }
593 return idle_thread.get();
594}
595
596Thread* Scheduler::GetSelectedThread() const {
597 return selected_thread.get();
598}
599
600u64 Scheduler::GetLastContextSwitchTicks() const {
601 return last_context_switch_time;
602}
603
604void Scheduler::TryDoContextSwitch() {
605 auto& phys_core = system.Kernel().CurrentPhysicalCore();
606 if (phys_core.IsInterrupted()) {
607 phys_core.ClearInterrupt();
608 }
609 guard.lock();
610 if (is_context_switch_pending) {
611 SwitchContext();
612 } else {
613 guard.unlock();
614 }
615}
616
617void Scheduler::OnThreadStart() {
618 SwitchContextStep2();
619}
620
621void Scheduler::Unload(Thread* thread) {
622 if (thread) {
623 thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
624 thread->SetIsRunning(false);
625 if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) {
626 system.ArmInterface(core_id).ExceptionalExit();
627 thread->SetContinuousOnSVC(false);
628 }
629 if (!thread->IsHLEThread() && !thread->HasExited()) {
630 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
631 cpu_core.SaveContext(thread->GetContext32());
632 cpu_core.SaveContext(thread->GetContext64());
633 // Save the TPIDR_EL0 system register in case it was modified.
634 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
635 cpu_core.ClearExclusiveState();
636 }
637 thread->context_guard.unlock();
638 }
639}
640
641void Scheduler::Unload() {
642 Unload(current_thread.get());
643}
644
645void Scheduler::Reload(Thread* thread) {
646 if (thread) {
647 ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
648 "Thread must be runnable.");
649
650 // Cancel any outstanding wakeup events for this thread
651 thread->SetIsRunning(true);
652 thread->SetWasRunning(false);
653 thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
654
655 auto* const thread_owner_process = thread->GetOwnerProcess();
656 if (thread_owner_process != nullptr) {
657 system.Kernel().MakeCurrentProcess(thread_owner_process);
658 }
659 if (!thread->IsHLEThread()) {
660 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
661 cpu_core.LoadContext(thread->GetContext32());
662 cpu_core.LoadContext(thread->GetContext64());
663 cpu_core.SetTlsAddress(thread->GetTLSAddress());
664 cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
665 cpu_core.ClearExclusiveState();
666 }
667 }
668}
669
670void Scheduler::Reload() {
671 Reload(current_thread.get());
672}
673
674void Scheduler::SwitchContextStep2() {
675 // Load context of new thread
676 Reload(selected_thread.get());
677
678 TryDoContextSwitch();
679}
680
681void Scheduler::SwitchContext() {
682 current_thread_prev = current_thread;
683 selected_thread = selected_thread_set;
684 Thread* previous_thread = current_thread_prev.get();
685 Thread* new_thread = selected_thread.get();
686 current_thread = selected_thread;
687
688 is_context_switch_pending = false;
689
690 if (new_thread == previous_thread) {
691 guard.unlock();
692 return;
693 }
694
695 Process* const previous_process = system.Kernel().CurrentProcess();
696
697 UpdateLastContextSwitchTime(previous_thread, previous_process);
698
699 // Save context for previous thread
700 Unload(previous_thread);
701
702 std::shared_ptr<Common::Fiber>* old_context;
703 if (previous_thread != nullptr) {
704 old_context = &previous_thread->GetHostContext();
705 } else {
706 old_context = &idle_thread->GetHostContext();
707 }
708 guard.unlock();
709
710 Common::Fiber::YieldTo(*old_context, switch_fiber);
711 /// When a thread wakes up, the scheduler may have changed to other in another core.
712 auto& next_scheduler = system.Kernel().CurrentScheduler();
713 next_scheduler.SwitchContextStep2();
714}
715
716void Scheduler::OnSwitch(void* this_scheduler) {
717 Scheduler* sched = static_cast<Scheduler*>(this_scheduler);
718 sched->SwitchToCurrent();
719}
720
721void Scheduler::SwitchToCurrent() {
722 while (true) {
723 {
724 std::scoped_lock lock{guard};
725 selected_thread = selected_thread_set;
726 current_thread = selected_thread;
727 is_context_switch_pending = false;
728 }
729 const auto is_switch_pending = [this] {
730 std::scoped_lock lock{guard};
731 return is_context_switch_pending;
732 };
733 do {
734 if (current_thread != nullptr && !current_thread->IsHLEThread()) {
735 current_thread->context_guard.lock();
736 if (!current_thread->IsRunnable()) {
737 current_thread->context_guard.unlock();
738 break;
739 }
740 if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) {
741 current_thread->context_guard.unlock();
742 break;
743 }
744 }
745 std::shared_ptr<Common::Fiber>* next_context;
746 if (current_thread != nullptr) {
747 next_context = &current_thread->GetHostContext();
748 } else {
749 next_context = &idle_thread->GetHostContext();
750 }
751 Common::Fiber::YieldTo(switch_fiber, *next_context);
752 } while (!is_switch_pending());
753 }
754}
755
756void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
757 const u64 prev_switch_ticks = last_context_switch_time;
758 const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
759 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
760
761 if (thread != nullptr) {
762 thread->UpdateCPUTimeTicks(update_ticks);
763 }
764
765 if (process != nullptr) {
766 process->UpdateCPUTimeTicks(update_ticks);
767 }
768
769 last_context_switch_time = most_recent_switch_ticks;
770}
771
772void Scheduler::Initialize() {
773 std::string name = "Idle Thread Id:" + std::to_string(core_id);
774 std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
775 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
776 ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
777 auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
778 nullptr, std::move(init_func), init_func_parameter);
779 idle_thread = std::move(thread_res).Unwrap();
780}
781
782void Scheduler::Shutdown() {
783 current_thread = nullptr;
784 selected_thread = nullptr;
785}
786
787SchedulerLock::SchedulerLock(KernelCore& kernel) : kernel{kernel} {
788 kernel.GlobalScheduler().Lock();
789}
790
791SchedulerLock::~SchedulerLock() {
792 kernel.GlobalScheduler().Unlock();
793}
794
795SchedulerLockAndSleep::SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle,
796 Thread* time_task, s64 nanoseconds)
797 : SchedulerLock{kernel}, event_handle{event_handle}, time_task{time_task}, nanoseconds{
798 nanoseconds} {
799 event_handle = InvalidHandle;
800}
801
802SchedulerLockAndSleep::~SchedulerLockAndSleep() {
803 if (sleep_cancelled) {
804 return;
805 }
806 auto& time_manager = kernel.TimeManager();
807 time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
808}
809
810void SchedulerLockAndSleep::Release() {
811 if (sleep_cancelled) {
812 return;
813 }
814 auto& time_manager = kernel.TimeManager();
815 time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
816 sleep_cancelled = true;
817}
818
819} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
deleted file mode 100644
index 68db4a5ef..000000000
--- a/src/core/hle/kernel/scheduler.h
+++ /dev/null
@@ -1,320 +0,0 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <atomic>
8#include <memory>
9#include <mutex>
10#include <vector>
11
12#include "common/common_types.h"
13#include "common/multi_level_queue.h"
14#include "common/spin_lock.h"
15#include "core/hardware_properties.h"
16#include "core/hle/kernel/thread.h"
17
18namespace Common {
19class Fiber;
20}
21
22namespace Core {
23class ARM_Interface;
24class System;
25} // namespace Core
26
27namespace Kernel {
28
29class KernelCore;
30class Process;
31class SchedulerLock;
32
33class GlobalScheduler final {
34public:
35 explicit GlobalScheduler(KernelCore& kernel);
36 ~GlobalScheduler();
37
38 /// Adds a new thread to the scheduler
39 void AddThread(std::shared_ptr<Thread> thread);
40
41 /// Removes a thread from the scheduler
42 void RemoveThread(std::shared_ptr<Thread> thread);
43
44 /// Returns a list of all threads managed by the scheduler
45 const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
46 return thread_list;
47 }
48
49 /// Notify the scheduler a thread's status has changed.
50 void AdjustSchedulingOnStatus(Thread* thread, u32 old_flags);
51
52 /// Notify the scheduler a thread's priority has changed.
53 void AdjustSchedulingOnPriority(Thread* thread, u32 old_priority);
54
55 /// Notify the scheduler a thread's core and/or affinity mask has changed.
56 void AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask, s32 old_core);
57
58 /**
59 * Takes care of selecting the new scheduled threads in three steps:
60 *
61 * 1. First a thread is selected from the top of the priority queue. If no thread
62 * is obtained then we move to step two, else we are done.
63 *
64 * 2. Second we try to get a suggested thread that's not assigned to any core or
65 * that is not the top thread in that core.
66 *
67 * 3. Third is no suggested thread is found, we do a second pass and pick a running
68 * thread in another core and swap it with its current thread.
69 *
70 * returns the cores needing scheduling.
71 */
72 u32 SelectThreads();
73
74 bool HaveReadyThreads(std::size_t core_id) const {
75 return !scheduled_queue[core_id].empty();
76 }
77
78 /**
79 * Takes a thread and moves it to the back of the it's priority list.
80 *
81 * @note This operation can be redundant and no scheduling is changed if marked as so.
82 */
83 bool YieldThread(Thread* thread);
84
85 /**
86 * Takes a thread and moves it to the back of the it's priority list.
87 * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
88 * a better priority than the next thread in the core.
89 *
90 * @note This operation can be redundant and no scheduling is changed if marked as so.
91 */
92 bool YieldThreadAndBalanceLoad(Thread* thread);
93
94 /**
95 * Takes a thread and moves it out of the scheduling queue.
96 * and into the suggested queue. If no thread can be scheduled afterwards in that core,
97 * a suggested thread is obtained instead.
98 *
99 * @note This operation can be redundant and no scheduling is changed if marked as so.
100 */
101 bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
102
103 /**
104 * Rotates the scheduling queues of threads at a preemption priority and then does
105 * some core rebalancing. Preemption priorities can be found in the array
106 * 'preemption_priorities'.
107 *
108 * @note This operation happens every 10ms.
109 */
110 void PreemptThreads();
111
112 u32 CpuCoresCount() const {
113 return Core::Hardware::NUM_CPU_CORES;
114 }
115
116 void SetReselectionPending() {
117 is_reselection_pending.store(true, std::memory_order_release);
118 }
119
120 bool IsReselectionPending() const {
121 return is_reselection_pending.load(std::memory_order_acquire);
122 }
123
124 void Shutdown();
125
126private:
127 friend class SchedulerLock;
128
129 /// Lock the scheduler to the current thread.
130 void Lock();
131
132 /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling
133 /// and reschedules current core if needed.
134 void Unlock();
135
136 void EnableInterruptAndSchedule(u32 cores_pending_reschedule,
137 Core::EmuThreadHandle global_thread);
138
139 /**
140 * Add a thread to the suggested queue of a cpu core. Suggested threads may be
141 * picked if no thread is scheduled to run on the core.
142 */
143 void Suggest(u32 priority, std::size_t core, Thread* thread);
144
145 /**
146 * Remove a thread to the suggested queue of a cpu core. Suggested threads may be
147 * picked if no thread is scheduled to run on the core.
148 */
149 void Unsuggest(u32 priority, std::size_t core, Thread* thread);
150
151 /**
152 * Add a thread to the scheduling queue of a cpu core. The thread is added at the
153 * back the queue in its priority level.
154 */
155 void Schedule(u32 priority, std::size_t core, Thread* thread);
156
157 /**
158 * Add a thread to the scheduling queue of a cpu core. The thread is added at the
159 * front the queue in its priority level.
160 */
161 void SchedulePrepend(u32 priority, std::size_t core, Thread* thread);
162
163 /// Reschedule an already scheduled thread based on a new priority
164 void Reschedule(u32 priority, std::size_t core, Thread* thread);
165
166 /// Unschedules a thread.
167 void Unschedule(u32 priority, std::size_t core, Thread* thread);
168
169 /**
170 * Transfers a thread into an specific core. If the destination_core is -1
171 * it will be unscheduled from its source code and added into its suggested
172 * queue.
173 */
174 void TransferToCore(u32 priority, s32 destination_core, Thread* thread);
175
176 bool AskForReselectionOrMarkRedundant(Thread* current_thread, const Thread* winner);
177
178 static constexpr u32 min_regular_priority = 2;
179 std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES>
180 scheduled_queue;
181 std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES>
182 suggested_queue;
183 std::atomic<bool> is_reselection_pending{false};
184
185 // The priority levels at which the global scheduler preempts threads every 10 ms. They are
186 // ordered from Core 0 to Core 3.
187 std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
188
189 /// Scheduler lock mechanisms.
190 bool is_locked{};
191 std::mutex inner_lock;
192 std::atomic<s64> scope_lock{};
193 Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()};
194
195 Common::SpinLock global_list_guard{};
196
197 /// Lists all thread ids that aren't deleted/etc.
198 std::vector<std::shared_ptr<Thread>> thread_list;
199 KernelCore& kernel;
200};
201
202class Scheduler final {
203public:
204 explicit Scheduler(Core::System& system, std::size_t core_id);
205 ~Scheduler();
206
207 /// Returns whether there are any threads that are ready to run.
208 bool HaveReadyThreads() const;
209
210 /// Reschedules to the next available thread (call after current thread is suspended)
211 void TryDoContextSwitch();
212
213 /// The next two are for SingleCore Only.
214 /// Unload current thread before preempting core.
215 void Unload(Thread* thread);
216 void Unload();
217 /// Reload current thread after core preemption.
218 void Reload(Thread* thread);
219 void Reload();
220
221 /// Gets the current running thread
222 Thread* GetCurrentThread() const;
223
224 /// Gets the currently selected thread from the top of the multilevel queue
225 Thread* GetSelectedThread() const;
226
227 /// Gets the timestamp for the last context switch in ticks.
228 u64 GetLastContextSwitchTicks() const;
229
230 bool ContextSwitchPending() const {
231 return is_context_switch_pending;
232 }
233
234 void Initialize();
235
236 /// Shutdowns the scheduler.
237 void Shutdown();
238
239 void OnThreadStart();
240
241 std::shared_ptr<Common::Fiber>& ControlContext() {
242 return switch_fiber;
243 }
244
245 const std::shared_ptr<Common::Fiber>& ControlContext() const {
246 return switch_fiber;
247 }
248
249private:
250 friend class GlobalScheduler;
251
252 /// Switches the CPU's active thread context to that of the specified thread
253 void SwitchContext();
254
255 /// When a thread wakes up, it must run this through it's new scheduler
256 void SwitchContextStep2();
257
258 /**
259 * Called on every context switch to update the internal timestamp
260 * This also updates the running time ticks for the given thread and
261 * process using the following difference:
262 *
263 * ticks += most_recent_ticks - last_context_switch_ticks
264 *
265 * The internal tick timestamp for the scheduler is simply the
266 * most recent tick count retrieved. No special arithmetic is
267 * applied to it.
268 */
269 void UpdateLastContextSwitchTime(Thread* thread, Process* process);
270
271 static void OnSwitch(void* this_scheduler);
272 void SwitchToCurrent();
273
274 std::shared_ptr<Thread> current_thread = nullptr;
275 std::shared_ptr<Thread> selected_thread = nullptr;
276 std::shared_ptr<Thread> current_thread_prev = nullptr;
277 std::shared_ptr<Thread> selected_thread_set = nullptr;
278 std::shared_ptr<Thread> idle_thread = nullptr;
279
280 std::shared_ptr<Common::Fiber> switch_fiber = nullptr;
281
282 Core::System& system;
283 u64 last_context_switch_time = 0;
284 u64 idle_selection_count = 0;
285 const std::size_t core_id;
286
287 Common::SpinLock guard{};
288
289 bool is_context_switch_pending = false;
290};
291
292class SchedulerLock {
293public:
294 [[nodiscard]] explicit SchedulerLock(KernelCore& kernel);
295 ~SchedulerLock();
296
297protected:
298 KernelCore& kernel;
299};
300
301class SchedulerLockAndSleep : public SchedulerLock {
302public:
303 explicit SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* time_task,
304 s64 nanoseconds);
305 ~SchedulerLockAndSleep();
306
307 void CancelSleep() {
308 sleep_cancelled = true;
309 }
310
311 void Release();
312
313private:
314 Handle& event_handle;
315 Thread* time_task;
316 s64 nanoseconds;
317 bool sleep_cancelled{};
318};
319
320} // namespace Kernel
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp
index ae088cf41..a35c8aa4b 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/server_session.cpp
@@ -14,9 +14,9 @@
14#include "core/hle/kernel/client_session.h" 14#include "core/hle/kernel/client_session.h"
15#include "core/hle/kernel/handle_table.h" 15#include "core/hle/kernel/handle_table.h"
16#include "core/hle/kernel/hle_ipc.h" 16#include "core/hle/kernel/hle_ipc.h"
17#include "core/hle/kernel/k_scheduler.h"
17#include "core/hle/kernel/kernel.h" 18#include "core/hle/kernel/kernel.h"
18#include "core/hle/kernel/process.h" 19#include "core/hle/kernel/process.h"
19#include "core/hle/kernel/scheduler.h"
20#include "core/hle/kernel/server_session.h" 20#include "core/hle/kernel/server_session.h"
21#include "core/hle/kernel/session.h" 21#include "core/hle/kernel/session.h"
22#include "core/hle/kernel/thread.h" 22#include "core/hle/kernel/thread.h"
@@ -170,7 +170,7 @@ ResultCode ServerSession::CompleteSyncRequest() {
170 170
171 // Some service requests require the thread to block 171 // Some service requests require the thread to block
172 { 172 {
173 SchedulerLock lock(kernel); 173 KScopedSchedulerLock lock(kernel);
174 if (!context.IsThreadWaiting()) { 174 if (!context.IsThreadWaiting()) {
175 context.GetThread().ResumeFromWait(); 175 context.GetThread().ResumeFromWait();
176 context.GetThread().SetSynchronizationResults(nullptr, result); 176 context.GetThread().SetSynchronizationResults(nullptr, result);
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index c8060f179..2d225392f 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -24,6 +24,8 @@
24#include "core/hle/kernel/client_session.h" 24#include "core/hle/kernel/client_session.h"
25#include "core/hle/kernel/errors.h" 25#include "core/hle/kernel/errors.h"
26#include "core/hle/kernel/handle_table.h" 26#include "core/hle/kernel/handle_table.h"
27#include "core/hle/kernel/k_scheduler.h"
28#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
27#include "core/hle/kernel/kernel.h" 29#include "core/hle/kernel/kernel.h"
28#include "core/hle/kernel/memory/memory_block.h" 30#include "core/hle/kernel/memory/memory_block.h"
29#include "core/hle/kernel/memory/page_table.h" 31#include "core/hle/kernel/memory/page_table.h"
@@ -32,7 +34,6 @@
32#include "core/hle/kernel/process.h" 34#include "core/hle/kernel/process.h"
33#include "core/hle/kernel/readable_event.h" 35#include "core/hle/kernel/readable_event.h"
34#include "core/hle/kernel/resource_limit.h" 36#include "core/hle/kernel/resource_limit.h"
35#include "core/hle/kernel/scheduler.h"
36#include "core/hle/kernel/shared_memory.h" 37#include "core/hle/kernel/shared_memory.h"
37#include "core/hle/kernel/svc.h" 38#include "core/hle/kernel/svc.h"
38#include "core/hle/kernel/svc_types.h" 39#include "core/hle/kernel/svc_types.h"
@@ -329,7 +330,8 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
329 330
330/// Makes a blocking IPC call to an OS service. 331/// Makes a blocking IPC call to an OS service.
331static ResultCode SendSyncRequest(Core::System& system, Handle handle) { 332static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
332 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 333 auto& kernel = system.Kernel();
334 const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
333 std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle); 335 std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle);
334 if (!session) { 336 if (!session) {
335 LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle); 337 LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle);
@@ -338,9 +340,9 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
338 340
339 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); 341 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
340 342
341 auto thread = system.CurrentScheduler().GetCurrentThread(); 343 auto thread = kernel.CurrentScheduler()->GetCurrentThread();
342 { 344 {
343 SchedulerLock lock(system.Kernel()); 345 KScopedSchedulerLock lock(kernel);
344 thread->InvalidateHLECallback(); 346 thread->InvalidateHLECallback();
345 thread->SetStatus(ThreadStatus::WaitIPC); 347 thread->SetStatus(ThreadStatus::WaitIPC);
346 session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); 348 session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming());
@@ -349,12 +351,12 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
349 if (thread->HasHLECallback()) { 351 if (thread->HasHLECallback()) {
350 Handle event_handle = thread->GetHLETimeEvent(); 352 Handle event_handle = thread->GetHLETimeEvent();
351 if (event_handle != InvalidHandle) { 353 if (event_handle != InvalidHandle) {
352 auto& time_manager = system.Kernel().TimeManager(); 354 auto& time_manager = kernel.TimeManager();
353 time_manager.UnscheduleTimeEvent(event_handle); 355 time_manager.UnscheduleTimeEvent(event_handle);
354 } 356 }
355 357
356 { 358 {
357 SchedulerLock lock(system.Kernel()); 359 KScopedSchedulerLock lock(kernel);
358 auto* sync_object = thread->GetHLESyncObject(); 360 auto* sync_object = thread->GetHLESyncObject();
359 sync_object->RemoveWaitingThread(SharedFrom(thread)); 361 sync_object->RemoveWaitingThread(SharedFrom(thread));
360 } 362 }
@@ -654,7 +656,6 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
654 info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt); 656 info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt);
655 657
656 if (!break_reason.signal_debugger) { 658 if (!break_reason.signal_debugger) {
657 SchedulerLock lock(system.Kernel());
658 LOG_CRITICAL( 659 LOG_CRITICAL(
659 Debug_Emulated, 660 Debug_Emulated,
660 "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}", 661 "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}",
@@ -662,13 +663,9 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
662 663
663 handle_debug_buffer(info1, info2); 664 handle_debug_buffer(info1, info2);
664 665
665 auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); 666 auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
666 const auto thread_processor_id = current_thread->GetProcessorID(); 667 const auto thread_processor_id = current_thread->GetProcessorID();
667 system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace(); 668 system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
668
669 // Kill the current thread
670 system.Kernel().ExceptionalExit();
671 current_thread->Stop();
672 } 669 }
673} 670}
674 671
@@ -918,7 +915,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
918 } 915 }
919 916
920 const auto& core_timing = system.CoreTiming(); 917 const auto& core_timing = system.CoreTiming();
921 const auto& scheduler = system.CurrentScheduler(); 918 const auto& scheduler = *system.Kernel().CurrentScheduler();
922 const auto* const current_thread = scheduler.GetCurrentThread(); 919 const auto* const current_thread = scheduler.GetCurrentThread();
923 const bool same_thread = current_thread == thread.get(); 920 const bool same_thread = current_thread == thread.get();
924 921
@@ -1086,7 +1083,7 @@ static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 act
1086 return ERR_INVALID_HANDLE; 1083 return ERR_INVALID_HANDLE;
1087 } 1084 }
1088 1085
1089 if (thread.get() == system.CurrentScheduler().GetCurrentThread()) { 1086 if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) {
1090 LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread"); 1087 LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
1091 return ERR_BUSY; 1088 return ERR_BUSY;
1092 } 1089 }
@@ -1119,7 +1116,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H
1119 return ERR_INVALID_HANDLE; 1116 return ERR_INVALID_HANDLE;
1120 } 1117 }
1121 1118
1122 if (thread.get() == system.CurrentScheduler().GetCurrentThread()) { 1119 if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) {
1123 LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread"); 1120 LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
1124 return ERR_BUSY; 1121 return ERR_BUSY;
1125 } 1122 }
@@ -1475,7 +1472,7 @@ static void ExitProcess(Core::System& system) {
1475 current_process->PrepareForTermination(); 1472 current_process->PrepareForTermination();
1476 1473
1477 // Kill the current thread 1474 // Kill the current thread
1478 system.CurrentScheduler().GetCurrentThread()->Stop(); 1475 system.Kernel().CurrentScheduler()->GetCurrentThread()->Stop();
1479} 1476}
1480 1477
1481static void ExitProcess32(Core::System& system) { 1478static void ExitProcess32(Core::System& system) {
@@ -1575,8 +1572,8 @@ static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
1575static void ExitThread(Core::System& system) { 1572static void ExitThread(Core::System& system) {
1576 LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC()); 1573 LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
1577 1574
1578 auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); 1575 auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
1579 system.GlobalScheduler().RemoveThread(SharedFrom(current_thread)); 1576 system.GlobalSchedulerContext().RemoveThread(SharedFrom(current_thread));
1580 current_thread->Stop(); 1577 current_thread->Stop();
1581} 1578}
1582 1579
@@ -1589,44 +1586,31 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
1589 LOG_DEBUG(Kernel_SVC, "called nanoseconds={}", nanoseconds); 1586 LOG_DEBUG(Kernel_SVC, "called nanoseconds={}", nanoseconds);
1590 1587
1591 enum class SleepType : s64 { 1588 enum class SleepType : s64 {
1592 YieldWithoutLoadBalancing = 0, 1589 YieldWithoutCoreMigration = 0,
1593 YieldWithLoadBalancing = -1, 1590 YieldWithCoreMigration = -1,
1594 YieldAndWaitForLoadBalancing = -2, 1591 YieldAndWaitForLoadBalancing = -2,
1595 }; 1592 };
1596 1593
1597 auto& scheduler = system.CurrentScheduler(); 1594 auto& scheduler = *system.Kernel().CurrentScheduler();
1598 auto* const current_thread = scheduler.GetCurrentThread();
1599 bool is_redundant = false;
1600
1601 if (nanoseconds <= 0) { 1595 if (nanoseconds <= 0) {
1602 switch (static_cast<SleepType>(nanoseconds)) { 1596 switch (static_cast<SleepType>(nanoseconds)) {
1603 case SleepType::YieldWithoutLoadBalancing: { 1597 case SleepType::YieldWithoutCoreMigration: {
1604 auto pair = current_thread->YieldSimple(); 1598 scheduler.YieldWithoutCoreMigration();
1605 is_redundant = pair.second;
1606 break; 1599 break;
1607 } 1600 }
1608 case SleepType::YieldWithLoadBalancing: { 1601 case SleepType::YieldWithCoreMigration: {
1609 auto pair = current_thread->YieldAndBalanceLoad(); 1602 scheduler.YieldWithCoreMigration();
1610 is_redundant = pair.second;
1611 break; 1603 break;
1612 } 1604 }
1613 case SleepType::YieldAndWaitForLoadBalancing: { 1605 case SleepType::YieldAndWaitForLoadBalancing: {
1614 auto pair = current_thread->YieldAndWaitForLoadBalancing(); 1606 scheduler.YieldToAnyThread();
1615 is_redundant = pair.second;
1616 break; 1607 break;
1617 } 1608 }
1618 default: 1609 default:
1619 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); 1610 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
1620 } 1611 }
1621 } else { 1612 } else {
1622 current_thread->Sleep(nanoseconds); 1613 scheduler.GetCurrentThread()->Sleep(nanoseconds);
1623 }
1624
1625 if (is_redundant && !system.Kernel().IsMulticore()) {
1626 system.Kernel().ExitSVCProfile();
1627 system.CoreTiming().AddTicks(1000U);
1628 system.GetCpuManager().PreemptSingleCore();
1629 system.Kernel().EnterSVCProfile();
1630 } 1614 }
1631} 1615}
1632 1616
@@ -1661,10 +1645,10 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
1661 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); 1645 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
1662 auto& kernel = system.Kernel(); 1646 auto& kernel = system.Kernel();
1663 Handle event_handle; 1647 Handle event_handle;
1664 Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); 1648 Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
1665 auto* const current_process = system.Kernel().CurrentProcess(); 1649 auto* const current_process = kernel.CurrentProcess();
1666 { 1650 {
1667 SchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds); 1651 KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds);
1668 const auto& handle_table = current_process->GetHandleTable(); 1652 const auto& handle_table = current_process->GetHandleTable();
1669 std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle); 1653 std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
1670 ASSERT(thread); 1654 ASSERT(thread);
@@ -1700,7 +1684,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
1700 } 1684 }
1701 1685
1702 { 1686 {
1703 SchedulerLock lock(kernel); 1687 KScopedSchedulerLock lock(kernel);
1704 1688
1705 auto* owner = current_thread->GetLockOwner(); 1689 auto* owner = current_thread->GetLockOwner();
1706 if (owner != nullptr) { 1690 if (owner != nullptr) {
@@ -1731,7 +1715,7 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
1731 1715
1732 // Retrieve a list of all threads that are waiting for this condition variable. 1716 // Retrieve a list of all threads that are waiting for this condition variable.
1733 auto& kernel = system.Kernel(); 1717 auto& kernel = system.Kernel();
1734 SchedulerLock lock(kernel); 1718 KScopedSchedulerLock lock(kernel);
1735 auto* const current_process = kernel.CurrentProcess(); 1719 auto* const current_process = kernel.CurrentProcess();
1736 std::vector<std::shared_ptr<Thread>> waiting_threads = 1720 std::vector<std::shared_ptr<Thread>> waiting_threads =
1737 current_process->GetConditionVariableThreads(condition_variable_addr); 1721 current_process->GetConditionVariableThreads(condition_variable_addr);
@@ -1993,7 +1977,7 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
1993 } 1977 }
1994 1978
1995 *core = thread->GetIdealCore(); 1979 *core = thread->GetIdealCore();
1996 *mask = thread->GetAffinityMask(); 1980 *mask = thread->GetAffinityMask().GetAffinityMask();
1997 1981
1998 return RESULT_SUCCESS; 1982 return RESULT_SUCCESS;
1999} 1983}
@@ -2629,7 +2613,7 @@ void Call(Core::System& system, u32 immediate) {
2629 auto& kernel = system.Kernel(); 2613 auto& kernel = system.Kernel();
2630 kernel.EnterSVCProfile(); 2614 kernel.EnterSVCProfile();
2631 2615
2632 auto* thread = system.CurrentScheduler().GetCurrentThread(); 2616 auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
2633 thread->SetContinuousOnSVC(true); 2617 thread->SetContinuousOnSVC(true);
2634 2618
2635 const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate) 2619 const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp
index 8b875d853..d3f520ea2 100644
--- a/src/core/hle/kernel/synchronization.cpp
+++ b/src/core/hle/kernel/synchronization.cpp
@@ -5,8 +5,9 @@
5#include "core/core.h" 5#include "core/core.h"
6#include "core/hle/kernel/errors.h" 6#include "core/hle/kernel/errors.h"
7#include "core/hle/kernel/handle_table.h" 7#include "core/hle/kernel/handle_table.h"
8#include "core/hle/kernel/k_scheduler.h"
9#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
8#include "core/hle/kernel/kernel.h" 10#include "core/hle/kernel/kernel.h"
9#include "core/hle/kernel/scheduler.h"
10#include "core/hle/kernel/synchronization.h" 11#include "core/hle/kernel/synchronization.h"
11#include "core/hle/kernel/synchronization_object.h" 12#include "core/hle/kernel/synchronization_object.h"
12#include "core/hle/kernel/thread.h" 13#include "core/hle/kernel/thread.h"
@@ -18,7 +19,7 @@ Synchronization::Synchronization(Core::System& system) : system{system} {}
18 19
19void Synchronization::SignalObject(SynchronizationObject& obj) const { 20void Synchronization::SignalObject(SynchronizationObject& obj) const {
20 auto& kernel = system.Kernel(); 21 auto& kernel = system.Kernel();
21 SchedulerLock lock(kernel); 22 KScopedSchedulerLock lock(kernel);
22 if (obj.IsSignaled()) { 23 if (obj.IsSignaled()) {
23 for (auto thread : obj.GetWaitingThreads()) { 24 for (auto thread : obj.GetWaitingThreads()) {
24 if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) { 25 if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) {
@@ -37,10 +38,10 @@ void Synchronization::SignalObject(SynchronizationObject& obj) const {
37std::pair<ResultCode, Handle> Synchronization::WaitFor( 38std::pair<ResultCode, Handle> Synchronization::WaitFor(
38 std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) { 39 std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) {
39 auto& kernel = system.Kernel(); 40 auto& kernel = system.Kernel();
40 auto* const thread = system.CurrentScheduler().GetCurrentThread(); 41 auto* const thread = kernel.CurrentScheduler()->GetCurrentThread();
41 Handle event_handle = InvalidHandle; 42 Handle event_handle = InvalidHandle;
42 { 43 {
43 SchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds); 44 KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds);
44 const auto itr = 45 const auto itr =
45 std::find_if(sync_objects.begin(), sync_objects.end(), 46 std::find_if(sync_objects.begin(), sync_objects.end(),
46 [thread](const std::shared_ptr<SynchronizationObject>& object) { 47 [thread](const std::shared_ptr<SynchronizationObject>& object) {
@@ -89,7 +90,7 @@ std::pair<ResultCode, Handle> Synchronization::WaitFor(
89 } 90 }
90 91
91 { 92 {
92 SchedulerLock lock(kernel); 93 KScopedSchedulerLock lock(kernel);
93 ResultCode signaling_result = thread->GetSignalingResult(); 94 ResultCode signaling_result = thread->GetSignalingResult();
94 SynchronizationObject* signaling_object = thread->GetSignalingObject(); 95 SynchronizationObject* signaling_object = thread->GetSignalingObject();
95 thread->SetSynchronizationObjects(nullptr); 96 thread->SetSynchronizationObjects(nullptr);
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 7d1eb2c6e..a4f9e0d97 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -17,10 +17,11 @@
17#include "core/hardware_properties.h" 17#include "core/hardware_properties.h"
18#include "core/hle/kernel/errors.h" 18#include "core/hle/kernel/errors.h"
19#include "core/hle/kernel/handle_table.h" 19#include "core/hle/kernel/handle_table.h"
20#include "core/hle/kernel/k_scheduler.h"
21#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
20#include "core/hle/kernel/kernel.h" 22#include "core/hle/kernel/kernel.h"
21#include "core/hle/kernel/object.h" 23#include "core/hle/kernel/object.h"
22#include "core/hle/kernel/process.h" 24#include "core/hle/kernel/process.h"
23#include "core/hle/kernel/scheduler.h"
24#include "core/hle/kernel/thread.h" 25#include "core/hle/kernel/thread.h"
25#include "core/hle/kernel/time_manager.h" 26#include "core/hle/kernel/time_manager.h"
26#include "core/hle/result.h" 27#include "core/hle/result.h"
@@ -50,7 +51,7 @@ Thread::~Thread() = default;
50 51
51void Thread::Stop() { 52void Thread::Stop() {
52 { 53 {
53 SchedulerLock lock(kernel); 54 KScopedSchedulerLock lock(kernel);
54 SetStatus(ThreadStatus::Dead); 55 SetStatus(ThreadStatus::Dead);
55 Signal(); 56 Signal();
56 kernel.GlobalHandleTable().Close(global_handle); 57 kernel.GlobalHandleTable().Close(global_handle);
@@ -67,7 +68,7 @@ void Thread::Stop() {
67} 68}
68 69
69void Thread::ResumeFromWait() { 70void Thread::ResumeFromWait() {
70 SchedulerLock lock(kernel); 71 KScopedSchedulerLock lock(kernel);
71 switch (status) { 72 switch (status) {
72 case ThreadStatus::Paused: 73 case ThreadStatus::Paused:
73 case ThreadStatus::WaitSynch: 74 case ThreadStatus::WaitSynch:
@@ -99,19 +100,18 @@ void Thread::ResumeFromWait() {
99} 100}
100 101
101void Thread::OnWakeUp() { 102void Thread::OnWakeUp() {
102 SchedulerLock lock(kernel); 103 KScopedSchedulerLock lock(kernel);
103
104 SetStatus(ThreadStatus::Ready); 104 SetStatus(ThreadStatus::Ready);
105} 105}
106 106
107ResultCode Thread::Start() { 107ResultCode Thread::Start() {
108 SchedulerLock lock(kernel); 108 KScopedSchedulerLock lock(kernel);
109 SetStatus(ThreadStatus::Ready); 109 SetStatus(ThreadStatus::Ready);
110 return RESULT_SUCCESS; 110 return RESULT_SUCCESS;
111} 111}
112 112
113void Thread::CancelWait() { 113void Thread::CancelWait() {
114 SchedulerLock lock(kernel); 114 KScopedSchedulerLock lock(kernel);
115 if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) { 115 if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) {
116 is_sync_cancelled = true; 116 is_sync_cancelled = true;
117 return; 117 return;
@@ -186,12 +186,14 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
186 thread->status = ThreadStatus::Dormant; 186 thread->status = ThreadStatus::Dormant;
187 thread->entry_point = entry_point; 187 thread->entry_point = entry_point;
188 thread->stack_top = stack_top; 188 thread->stack_top = stack_top;
189 thread->disable_count = 1;
189 thread->tpidr_el0 = 0; 190 thread->tpidr_el0 = 0;
190 thread->nominal_priority = thread->current_priority = priority; 191 thread->nominal_priority = thread->current_priority = priority;
191 thread->last_running_ticks = 0; 192 thread->schedule_count = -1;
193 thread->last_scheduled_tick = 0;
192 thread->processor_id = processor_id; 194 thread->processor_id = processor_id;
193 thread->ideal_core = processor_id; 195 thread->ideal_core = processor_id;
194 thread->affinity_mask = 1ULL << processor_id; 196 thread->affinity_mask.SetAffinity(processor_id, true);
195 thread->wait_objects = nullptr; 197 thread->wait_objects = nullptr;
196 thread->mutex_wait_address = 0; 198 thread->mutex_wait_address = 0;
197 thread->condvar_wait_address = 0; 199 thread->condvar_wait_address = 0;
@@ -201,7 +203,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
201 thread->owner_process = owner_process; 203 thread->owner_process = owner_process;
202 thread->type = type_flags; 204 thread->type = type_flags;
203 if ((type_flags & THREADTYPE_IDLE) == 0) { 205 if ((type_flags & THREADTYPE_IDLE) == 0) {
204 auto& scheduler = kernel.GlobalScheduler(); 206 auto& scheduler = kernel.GlobalSchedulerContext();
205 scheduler.AddThread(thread); 207 scheduler.AddThread(thread);
206 } 208 }
207 if (owner_process) { 209 if (owner_process) {
@@ -225,7 +227,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
225} 227}
226 228
227void Thread::SetPriority(u32 priority) { 229void Thread::SetPriority(u32 priority) {
228 SchedulerLock lock(kernel); 230 KScopedSchedulerLock lock(kernel);
229 ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, 231 ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
230 "Invalid priority value."); 232 "Invalid priority value.");
231 nominal_priority = priority; 233 nominal_priority = priority;
@@ -362,7 +364,7 @@ bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) {
362} 364}
363 365
364ResultCode Thread::SetActivity(ThreadActivity value) { 366ResultCode Thread::SetActivity(ThreadActivity value) {
365 SchedulerLock lock(kernel); 367 KScopedSchedulerLock lock(kernel);
366 368
367 auto sched_status = GetSchedulingStatus(); 369 auto sched_status = GetSchedulingStatus();
368 370
@@ -391,7 +393,7 @@ ResultCode Thread::SetActivity(ThreadActivity value) {
391ResultCode Thread::Sleep(s64 nanoseconds) { 393ResultCode Thread::Sleep(s64 nanoseconds) {
392 Handle event_handle{}; 394 Handle event_handle{};
393 { 395 {
394 SchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); 396 KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
395 SetStatus(ThreadStatus::WaitSleep); 397 SetStatus(ThreadStatus::WaitSleep);
396 } 398 }
397 399
@@ -402,39 +404,12 @@ ResultCode Thread::Sleep(s64 nanoseconds) {
402 return RESULT_SUCCESS; 404 return RESULT_SUCCESS;
403} 405}
404 406
405std::pair<ResultCode, bool> Thread::YieldSimple() {
406 bool is_redundant = false;
407 {
408 SchedulerLock lock(kernel);
409 is_redundant = kernel.GlobalScheduler().YieldThread(this);
410 }
411 return {RESULT_SUCCESS, is_redundant};
412}
413
414std::pair<ResultCode, bool> Thread::YieldAndBalanceLoad() {
415 bool is_redundant = false;
416 {
417 SchedulerLock lock(kernel);
418 is_redundant = kernel.GlobalScheduler().YieldThreadAndBalanceLoad(this);
419 }
420 return {RESULT_SUCCESS, is_redundant};
421}
422
423std::pair<ResultCode, bool> Thread::YieldAndWaitForLoadBalancing() {
424 bool is_redundant = false;
425 {
426 SchedulerLock lock(kernel);
427 is_redundant = kernel.GlobalScheduler().YieldThreadAndWaitForLoadBalancing(this);
428 }
429 return {RESULT_SUCCESS, is_redundant};
430}
431
432void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { 407void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
433 const u32 old_state = scheduling_state; 408 const u32 old_state = scheduling_state;
434 pausing_state |= static_cast<u32>(flag); 409 pausing_state |= static_cast<u32>(flag);
435 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); 410 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
436 scheduling_state = base_scheduling | pausing_state; 411 scheduling_state = base_scheduling | pausing_state;
437 kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); 412 KScheduler::OnThreadStateChanged(kernel, this, old_state);
438} 413}
439 414
440void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { 415void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
@@ -442,23 +417,24 @@ void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
442 pausing_state &= ~static_cast<u32>(flag); 417 pausing_state &= ~static_cast<u32>(flag);
443 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); 418 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
444 scheduling_state = base_scheduling | pausing_state; 419 scheduling_state = base_scheduling | pausing_state;
445 kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); 420 KScheduler::OnThreadStateChanged(kernel, this, old_state);
446} 421}
447 422
448void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { 423void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
449 const u32 old_state = scheduling_state; 424 const u32 old_state = scheduling_state;
450 scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) | 425 scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
451 static_cast<u32>(new_status); 426 static_cast<u32>(new_status);
452 kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); 427 KScheduler::OnThreadStateChanged(kernel, this, old_state);
453} 428}
454 429
455void Thread::SetCurrentPriority(u32 new_priority) { 430void Thread::SetCurrentPriority(u32 new_priority) {
456 const u32 old_priority = std::exchange(current_priority, new_priority); 431 const u32 old_priority = std::exchange(current_priority, new_priority);
457 kernel.GlobalScheduler().AdjustSchedulingOnPriority(this, old_priority); 432 KScheduler::OnThreadPriorityChanged(kernel, this, kernel.CurrentScheduler()->GetCurrentThread(),
433 old_priority);
458} 434}
459 435
460ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { 436ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
461 SchedulerLock lock(kernel); 437 KScopedSchedulerLock lock(kernel);
462 const auto HighestSetCore = [](u64 mask, u32 max_cores) { 438 const auto HighestSetCore = [](u64 mask, u32 max_cores) {
463 for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) { 439 for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) {
464 if (((mask >> core) & 1) != 0) { 440 if (((mask >> core) & 1) != 0) {
@@ -479,20 +455,21 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
479 } 455 }
480 if (use_override) { 456 if (use_override) {
481 ideal_core_override = new_core; 457 ideal_core_override = new_core;
482 affinity_mask_override = new_affinity_mask;
483 } else { 458 } else {
484 const u64 old_affinity_mask = std::exchange(affinity_mask, new_affinity_mask); 459 const auto old_affinity_mask = affinity_mask;
460 affinity_mask.SetAffinityMask(new_affinity_mask);
485 ideal_core = new_core; 461 ideal_core = new_core;
486 if (old_affinity_mask != new_affinity_mask) { 462 if (old_affinity_mask.GetAffinityMask() != new_affinity_mask) {
487 const s32 old_core = processor_id; 463 const s32 old_core = processor_id;
488 if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { 464 if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) {
489 if (static_cast<s32>(ideal_core) < 0) { 465 if (static_cast<s32>(ideal_core) < 0) {
490 processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES); 466 processor_id = HighestSetCore(affinity_mask.GetAffinityMask(),
467 Core::Hardware::NUM_CPU_CORES);
491 } else { 468 } else {
492 processor_id = ideal_core; 469 processor_id = ideal_core;
493 } 470 }
494 } 471 }
495 kernel.GlobalScheduler().AdjustSchedulingOnAffinity(this, old_affinity_mask, old_core); 472 KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_affinity_mask, old_core);
496 } 473 }
497 } 474 }
498 return RESULT_SUCCESS; 475 return RESULT_SUCCESS;
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index a75071e9b..11ef29888 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -4,6 +4,7 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <array>
7#include <functional> 8#include <functional>
8#include <string> 9#include <string>
9#include <utility> 10#include <utility>
@@ -12,6 +13,7 @@
12#include "common/common_types.h" 13#include "common/common_types.h"
13#include "common/spin_lock.h" 14#include "common/spin_lock.h"
14#include "core/arm/arm_interface.h" 15#include "core/arm/arm_interface.h"
16#include "core/hle/kernel/k_affinity_mask.h"
15#include "core/hle/kernel/object.h" 17#include "core/hle/kernel/object.h"
16#include "core/hle/kernel/synchronization_object.h" 18#include "core/hle/kernel/synchronization_object.h"
17#include "core/hle/result.h" 19#include "core/hle/result.h"
@@ -27,10 +29,10 @@ class System;
27 29
28namespace Kernel { 30namespace Kernel {
29 31
30class GlobalScheduler; 32class GlobalSchedulerContext;
31class KernelCore; 33class KernelCore;
32class Process; 34class Process;
33class Scheduler; 35class KScheduler;
34 36
35enum ThreadPriority : u32 { 37enum ThreadPriority : u32 {
36 THREADPRIO_HIGHEST = 0, ///< Highest thread priority 38 THREADPRIO_HIGHEST = 0, ///< Highest thread priority
@@ -345,8 +347,12 @@ public:
345 347
346 void SetStatus(ThreadStatus new_status); 348 void SetStatus(ThreadStatus new_status);
347 349
348 u64 GetLastRunningTicks() const { 350 s64 GetLastScheduledTick() const {
349 return last_running_ticks; 351 return this->last_scheduled_tick;
352 }
353
354 void SetLastScheduledTick(s64 tick) {
355 this->last_scheduled_tick = tick;
350 } 356 }
351 357
352 u64 GetTotalCPUTimeTicks() const { 358 u64 GetTotalCPUTimeTicks() const {
@@ -361,10 +367,18 @@ public:
361 return processor_id; 367 return processor_id;
362 } 368 }
363 369
370 s32 GetActiveCore() const {
371 return GetProcessorID();
372 }
373
364 void SetProcessorID(s32 new_core) { 374 void SetProcessorID(s32 new_core) {
365 processor_id = new_core; 375 processor_id = new_core;
366 } 376 }
367 377
378 void SetActiveCore(s32 new_core) {
379 processor_id = new_core;
380 }
381
368 Process* GetOwnerProcess() { 382 Process* GetOwnerProcess() {
369 return owner_process; 383 return owner_process;
370 } 384 }
@@ -469,7 +483,7 @@ public:
469 return ideal_core; 483 return ideal_core;
470 } 484 }
471 485
472 u64 GetAffinityMask() const { 486 const KAffinityMask& GetAffinityMask() const {
473 return affinity_mask; 487 return affinity_mask;
474 } 488 }
475 489
@@ -478,21 +492,12 @@ public:
478 /// Sleeps this thread for the given amount of nanoseconds. 492 /// Sleeps this thread for the given amount of nanoseconds.
479 ResultCode Sleep(s64 nanoseconds); 493 ResultCode Sleep(s64 nanoseconds);
480 494
481 /// Yields this thread without rebalancing loads. 495 s64 GetYieldScheduleCount() const {
482 std::pair<ResultCode, bool> YieldSimple(); 496 return this->schedule_count;
483
484 /// Yields this thread and does a load rebalancing.
485 std::pair<ResultCode, bool> YieldAndBalanceLoad();
486
487 /// Yields this thread and if the core is left idle, loads are rebalanced
488 std::pair<ResultCode, bool> YieldAndWaitForLoadBalancing();
489
490 void IncrementYieldCount() {
491 yield_count++;
492 } 497 }
493 498
494 u64 GetYieldCount() const { 499 void SetYieldScheduleCount(s64 count) {
495 return yield_count; 500 this->schedule_count = count;
496 } 501 }
497 502
498 ThreadSchedStatus GetSchedulingStatus() const { 503 ThreadSchedStatus GetSchedulingStatus() const {
@@ -568,9 +573,59 @@ public:
568 return has_exited; 573 return has_exited;
569 } 574 }
570 575
576 class QueueEntry {
577 public:
578 constexpr QueueEntry() = default;
579
580 constexpr void Initialize() {
581 this->prev = nullptr;
582 this->next = nullptr;
583 }
584
585 constexpr Thread* GetPrev() const {
586 return this->prev;
587 }
588 constexpr Thread* GetNext() const {
589 return this->next;
590 }
591 constexpr void SetPrev(Thread* thread) {
592 this->prev = thread;
593 }
594 constexpr void SetNext(Thread* thread) {
595 this->next = thread;
596 }
597
598 private:
599 Thread* prev{};
600 Thread* next{};
601 };
602
603 QueueEntry& GetPriorityQueueEntry(s32 core) {
604 return this->per_core_priority_queue_entry[core];
605 }
606
607 const QueueEntry& GetPriorityQueueEntry(s32 core) const {
608 return this->per_core_priority_queue_entry[core];
609 }
610
611 s32 GetDisableDispatchCount() const {
612 return disable_count;
613 }
614
615 void DisableDispatch() {
616 ASSERT(GetDisableDispatchCount() >= 0);
617 disable_count++;
618 }
619
620 void EnableDispatch() {
621 ASSERT(GetDisableDispatchCount() > 0);
622 disable_count--;
623 }
624
571private: 625private:
572 friend class GlobalScheduler; 626 friend class GlobalSchedulerContext;
573 friend class Scheduler; 627 friend class KScheduler;
628 friend class Process;
574 629
575 void SetSchedulingStatus(ThreadSchedStatus new_status); 630 void SetSchedulingStatus(ThreadSchedStatus new_status);
576 void AddSchedulingFlag(ThreadSchedFlags flag); 631 void AddSchedulingFlag(ThreadSchedFlags flag);
@@ -583,12 +638,14 @@ private:
583 ThreadContext64 context_64{}; 638 ThreadContext64 context_64{};
584 std::shared_ptr<Common::Fiber> host_context{}; 639 std::shared_ptr<Common::Fiber> host_context{};
585 640
586 u64 thread_id = 0;
587
588 ThreadStatus status = ThreadStatus::Dormant; 641 ThreadStatus status = ThreadStatus::Dormant;
642 u32 scheduling_state = 0;
643
644 u64 thread_id = 0;
589 645
590 VAddr entry_point = 0; 646 VAddr entry_point = 0;
591 VAddr stack_top = 0; 647 VAddr stack_top = 0;
648 std::atomic_int disable_count = 0;
592 649
593 ThreadType type; 650 ThreadType type;
594 651
@@ -602,9 +659,8 @@ private:
602 u32 current_priority = 0; 659 u32 current_priority = 0;
603 660
604 u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. 661 u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
605 u64 last_running_ticks = 0; ///< CPU tick when thread was last running 662 s64 schedule_count{};
606 u64 yield_count = 0; ///< Number of redundant yields carried by this thread. 663 s64 last_scheduled_tick{};
607 ///< a redundant yield is one where no scheduling is changed
608 664
609 s32 processor_id = 0; 665 s32 processor_id = 0;
610 666
@@ -646,16 +702,16 @@ private:
646 Handle hle_time_event; 702 Handle hle_time_event;
647 SynchronizationObject* hle_object; 703 SynchronizationObject* hle_object;
648 704
649 Scheduler* scheduler = nullptr; 705 KScheduler* scheduler = nullptr;
706
707 std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
650 708
651 u32 ideal_core{0xFFFFFFFF}; 709 u32 ideal_core{0xFFFFFFFF};
652 u64 affinity_mask{0x1}; 710 KAffinityMask affinity_mask{};
653 711
654 s32 ideal_core_override = -1; 712 s32 ideal_core_override = -1;
655 u64 affinity_mask_override = 0x1;
656 u32 affinity_override_count = 0; 713 u32 affinity_override_count = 0;
657 714
658 u32 scheduling_state = 0;
659 u32 pausing_state = 0; 715 u32 pausing_state = 0;
660 bool is_running = false; 716 bool is_running = false;
661 bool is_waiting_on_sync = false; 717 bool is_waiting_on_sync = false;
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
index caf329bfb..79628e2b4 100644
--- a/src/core/hle/kernel/time_manager.cpp
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -7,8 +7,8 @@
7#include "core/core_timing.h" 7#include "core/core_timing.h"
8#include "core/core_timing_util.h" 8#include "core/core_timing_util.h"
9#include "core/hle/kernel/handle_table.h" 9#include "core/hle/kernel/handle_table.h"
10#include "core/hle/kernel/k_scheduler.h"
10#include "core/hle/kernel/kernel.h" 11#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/scheduler.h"
12#include "core/hle/kernel/thread.h" 12#include "core/hle/kernel/thread.h"
13#include "core/hle/kernel/time_manager.h" 13#include "core/hle/kernel/time_manager.h"
14 14
@@ -18,12 +18,18 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
18 time_manager_event_type = Core::Timing::CreateEvent( 18 time_manager_event_type = Core::Timing::CreateEvent(
19 "Kernel::TimeManagerCallback", 19 "Kernel::TimeManagerCallback",
20 [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { 20 [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
21 const SchedulerLock lock(system.Kernel()); 21 const KScopedSchedulerLock lock(system.Kernel());
22 const auto proper_handle = static_cast<Handle>(thread_handle); 22 const auto proper_handle = static_cast<Handle>(thread_handle);
23 if (cancelled_events[proper_handle]) { 23
24 return; 24 std::shared_ptr<Thread> thread;
25 {
26 std::lock_guard lock{mutex};
27 if (cancelled_events[proper_handle]) {
28 return;
29 }
30 thread = system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
25 } 31 }
26 auto thread = this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle); 32
27 if (thread) { 33 if (thread) {
28 // Thread can be null if process has exited 34 // Thread can be null if process has exited
29 thread->OnWakeUp(); 35 thread->OnWakeUp();
@@ -56,6 +62,7 @@ void TimeManager::UnscheduleTimeEvent(Handle event_handle) {
56} 62}
57 63
58void TimeManager::CancelTimeEvent(Thread* time_task) { 64void TimeManager::CancelTimeEvent(Thread* time_task) {
65 std::lock_guard lock{mutex};
59 const Handle event_handle = time_task->GetGlobalHandle(); 66 const Handle event_handle = time_task->GetGlobalHandle();
60 UnscheduleTimeEvent(event_handle); 67 UnscheduleTimeEvent(event_handle);
61} 68}
diff --git a/src/core/hle/service/nvflinger/buffer_queue.cpp b/src/core/hle/service/nvflinger/buffer_queue.cpp
index 191286ce9..377f47e8e 100644
--- a/src/core/hle/service/nvflinger/buffer_queue.cpp
+++ b/src/core/hle/service/nvflinger/buffer_queue.cpp
@@ -22,10 +22,11 @@ BufferQueue::BufferQueue(Kernel::KernelCore& kernel, u32 id, u64 layer_id)
22BufferQueue::~BufferQueue() = default; 22BufferQueue::~BufferQueue() = default;
23 23
24void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) { 24void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) {
25 ASSERT(slot < buffer_slots);
25 LOG_WARNING(Service, "Adding graphics buffer {}", slot); 26 LOG_WARNING(Service, "Adding graphics buffer {}", slot);
26 27
27 free_buffers.push_back(slot); 28 free_buffers.push_back(slot);
28 queue.push_back({ 29 buffers[slot] = {
29 .slot = slot, 30 .slot = slot,
30 .status = Buffer::Status::Free, 31 .status = Buffer::Status::Free,
31 .igbp_buffer = igbp_buffer, 32 .igbp_buffer = igbp_buffer,
@@ -33,7 +34,7 @@ void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer)
33 .crop_rect = {}, 34 .crop_rect = {},
34 .swap_interval = 0, 35 .swap_interval = 0,
35 .multi_fence = {}, 36 .multi_fence = {},
36 }); 37 };
37 38
38 buffer_wait_event.writable->Signal(); 39 buffer_wait_event.writable->Signal();
39} 40}
@@ -44,73 +45,57 @@ std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> BufferQueue::Dequeue
44 if (free_buffers.empty()) { 45 if (free_buffers.empty()) {
45 return std::nullopt; 46 return std::nullopt;
46 } 47 }
47
48 auto f_itr = free_buffers.begin(); 48 auto f_itr = free_buffers.begin();
49 auto itr = queue.end(); 49 auto slot = buffers.size();
50 50
51 while (f_itr != free_buffers.end()) { 51 while (f_itr != free_buffers.end()) {
52 auto slot = *f_itr; 52 const Buffer& buffer = buffers[*f_itr];
53 itr = std::find_if(queue.begin(), queue.end(), [&](const Buffer& buffer) { 53 if (buffer.status == Buffer::Status::Free && buffer.igbp_buffer.width == width &&
54 // Only consider free buffers. Buffers become free once again after they've been 54 buffer.igbp_buffer.height == height) {
55 // Acquired and Released by the compositor, see the NVFlinger::Compose method. 55 slot = *f_itr;
56 if (buffer.status != Buffer::Status::Free) {
57 return false;
58 }
59
60 if (buffer.slot != slot) {
61 return false;
62 }
63
64 // Make sure that the parameters match.
65 return buffer.igbp_buffer.width == width && buffer.igbp_buffer.height == height;
66 });
67
68 if (itr != queue.end()) {
69 free_buffers.erase(f_itr); 56 free_buffers.erase(f_itr);
70 break; 57 break;
71 } 58 }
72 ++f_itr; 59 ++f_itr;
73 } 60 }
74 61 if (slot == buffers.size()) {
75 if (itr == queue.end()) {
76 return std::nullopt; 62 return std::nullopt;
77 } 63 }
78 64 buffers[slot].status = Buffer::Status::Dequeued;
79 itr->status = Buffer::Status::Dequeued; 65 return {{buffers[slot].slot, &buffers[slot].multi_fence}};
80 return {{itr->slot, &itr->multi_fence}};
81} 66}
82 67
83const IGBPBuffer& BufferQueue::RequestBuffer(u32 slot) const { 68const IGBPBuffer& BufferQueue::RequestBuffer(u32 slot) const {
84 auto itr = std::find_if(queue.begin(), queue.end(), 69 ASSERT(slot < buffers.size());
85 [&](const Buffer& buffer) { return buffer.slot == slot; }); 70 ASSERT(buffers[slot].status == Buffer::Status::Dequeued);
86 ASSERT(itr != queue.end()); 71 ASSERT(buffers[slot].slot == slot);
87 ASSERT(itr->status == Buffer::Status::Dequeued); 72
88 return itr->igbp_buffer; 73 return buffers[slot].igbp_buffer;
89} 74}
90 75
91void BufferQueue::QueueBuffer(u32 slot, BufferTransformFlags transform, 76void BufferQueue::QueueBuffer(u32 slot, BufferTransformFlags transform,
92 const Common::Rectangle<int>& crop_rect, u32 swap_interval, 77 const Common::Rectangle<int>& crop_rect, u32 swap_interval,
93 Service::Nvidia::MultiFence& multi_fence) { 78 Service::Nvidia::MultiFence& multi_fence) {
94 auto itr = std::find_if(queue.begin(), queue.end(), 79 ASSERT(slot < buffers.size());
95 [&](const Buffer& buffer) { return buffer.slot == slot; }); 80 ASSERT(buffers[slot].status == Buffer::Status::Dequeued);
96 ASSERT(itr != queue.end()); 81 ASSERT(buffers[slot].slot == slot);
97 ASSERT(itr->status == Buffer::Status::Dequeued); 82
98 itr->status = Buffer::Status::Queued; 83 buffers[slot].status = Buffer::Status::Queued;
99 itr->transform = transform; 84 buffers[slot].transform = transform;
100 itr->crop_rect = crop_rect; 85 buffers[slot].crop_rect = crop_rect;
101 itr->swap_interval = swap_interval; 86 buffers[slot].swap_interval = swap_interval;
102 itr->multi_fence = multi_fence; 87 buffers[slot].multi_fence = multi_fence;
103 queue_sequence.push_back(slot); 88 queue_sequence.push_back(slot);
104} 89}
105 90
106void BufferQueue::CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& multi_fence) { 91void BufferQueue::CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& multi_fence) {
107 const auto itr = std::find_if(queue.begin(), queue.end(), 92 ASSERT(slot < buffers.size());
108 [slot](const Buffer& buffer) { return buffer.slot == slot; }); 93 ASSERT(buffers[slot].status != Buffer::Status::Free);
109 ASSERT(itr != queue.end()); 94 ASSERT(buffers[slot].slot == slot);
110 ASSERT(itr->status != Buffer::Status::Free); 95
111 itr->status = Buffer::Status::Free; 96 buffers[slot].status = Buffer::Status::Free;
112 itr->multi_fence = multi_fence; 97 buffers[slot].multi_fence = multi_fence;
113 itr->swap_interval = 0; 98 buffers[slot].swap_interval = 0;
114 99
115 free_buffers.push_back(slot); 100 free_buffers.push_back(slot);
116 101
@@ -118,38 +103,39 @@ void BufferQueue::CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& mult
118} 103}
119 104
120std::optional<std::reference_wrapper<const BufferQueue::Buffer>> BufferQueue::AcquireBuffer() { 105std::optional<std::reference_wrapper<const BufferQueue::Buffer>> BufferQueue::AcquireBuffer() {
121 auto itr = queue.end(); 106 std::size_t buffer_slot = buffers.size();
122 // Iterate to find a queued buffer matching the requested slot. 107 // Iterate to find a queued buffer matching the requested slot.
123 while (itr == queue.end() && !queue_sequence.empty()) { 108 while (buffer_slot == buffers.size() && !queue_sequence.empty()) {
124 const u32 slot = queue_sequence.front(); 109 const auto slot = static_cast<std::size_t>(queue_sequence.front());
125 itr = std::find_if(queue.begin(), queue.end(), [&slot](const Buffer& buffer) { 110 ASSERT(slot < buffers.size());
126 return buffer.status == Buffer::Status::Queued && buffer.slot == slot; 111 if (buffers[slot].status == Buffer::Status::Queued) {
127 }); 112 ASSERT(buffers[slot].slot == slot);
113 buffer_slot = slot;
114 }
128 queue_sequence.pop_front(); 115 queue_sequence.pop_front();
129 } 116 }
130 if (itr == queue.end()) { 117 if (buffer_slot == buffers.size()) {
131 return std::nullopt; 118 return std::nullopt;
132 } 119 }
133 itr->status = Buffer::Status::Acquired; 120 buffers[buffer_slot].status = Buffer::Status::Acquired;
134 return *itr; 121 return {{buffers[buffer_slot]}};
135} 122}
136 123
137void BufferQueue::ReleaseBuffer(u32 slot) { 124void BufferQueue::ReleaseBuffer(u32 slot) {
138 auto itr = std::find_if(queue.begin(), queue.end(), 125 ASSERT(slot < buffers.size());
139 [&](const Buffer& buffer) { return buffer.slot == slot; }); 126 ASSERT(buffers[slot].status == Buffer::Status::Acquired);
140 ASSERT(itr != queue.end()); 127 ASSERT(buffers[slot].slot == slot);
141 ASSERT(itr->status == Buffer::Status::Acquired); 128
142 itr->status = Buffer::Status::Free; 129 buffers[slot].status = Buffer::Status::Free;
143 free_buffers.push_back(slot); 130 free_buffers.push_back(slot);
144 131
145 buffer_wait_event.writable->Signal(); 132 buffer_wait_event.writable->Signal();
146} 133}
147 134
148void BufferQueue::Disconnect() { 135void BufferQueue::Disconnect() {
149 queue.clear(); 136 buffers.fill({});
150 queue_sequence.clear(); 137 queue_sequence.clear();
151 id = 1; 138 buffer_wait_event.writable->Signal();
152 layer_id = 1;
153} 139}
154 140
155u32 BufferQueue::Query(QueryType type) { 141u32 BufferQueue::Query(QueryType type) {
diff --git a/src/core/hle/service/nvflinger/buffer_queue.h b/src/core/hle/service/nvflinger/buffer_queue.h
index e7517c7e1..e610923cb 100644
--- a/src/core/hle/service/nvflinger/buffer_queue.h
+++ b/src/core/hle/service/nvflinger/buffer_queue.h
@@ -21,6 +21,7 @@ class KernelCore;
21 21
22namespace Service::NVFlinger { 22namespace Service::NVFlinger {
23 23
24constexpr u32 buffer_slots = 0x40;
24struct IGBPBuffer { 25struct IGBPBuffer {
25 u32_le magic; 26 u32_le magic;
26 u32_le width; 27 u32_le width;
@@ -114,7 +115,7 @@ private:
114 u64 layer_id; 115 u64 layer_id;
115 116
116 std::list<u32> free_buffers; 117 std::list<u32> free_buffers;
117 std::vector<Buffer> queue; 118 std::array<Buffer, buffer_slots> buffers;
118 std::list<u32> queue_sequence; 119 std::list<u32> queue_sequence;
119 Kernel::EventPair buffer_wait_event; 120 Kernel::EventPair buffer_wait_event;
120}; 121};
diff --git a/src/core/hle/service/time/time.cpp b/src/core/hle/service/time/time.cpp
index 7b7ac282d..abc753d5d 100644
--- a/src/core/hle/service/time/time.cpp
+++ b/src/core/hle/service/time/time.cpp
@@ -10,8 +10,8 @@
10#include "core/hle/ipc_helpers.h" 10#include "core/hle/ipc_helpers.h"
11#include "core/hle/kernel/client_port.h" 11#include "core/hle/kernel/client_port.h"
12#include "core/hle/kernel/client_session.h" 12#include "core/hle/kernel/client_session.h"
13#include "core/hle/kernel/k_scheduler.h"
13#include "core/hle/kernel/kernel.h" 14#include "core/hle/kernel/kernel.h"
14#include "core/hle/kernel/scheduler.h"
15#include "core/hle/service/time/interface.h" 15#include "core/hle/service/time/interface.h"
16#include "core/hle/service/time/time.h" 16#include "core/hle/service/time/time.h"
17#include "core/hle/service/time/time_sharedmemory.h" 17#include "core/hle/service/time/time_sharedmemory.h"
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index 5d8841ae8..45cfffe06 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -282,18 +282,24 @@ public:
282 void DeserializeData() override { 282 void DeserializeData() override {
283 [[maybe_unused]] const std::u16string token = ReadInterfaceToken(); 283 [[maybe_unused]] const std::u16string token = ReadInterfaceToken();
284 data = Read<Data>(); 284 data = Read<Data>();
285 buffer = Read<NVFlinger::IGBPBuffer>(); 285 if (data.contains_object != 0) {
286 buffer_container = Read<BufferContainer>();
287 }
286 } 288 }
287 289
288 struct Data { 290 struct Data {
289 u32_le slot; 291 u32_le slot;
290 INSERT_PADDING_WORDS(1); 292 u32_le contains_object;
293 };
294
295 struct BufferContainer {
291 u32_le graphic_buffer_length; 296 u32_le graphic_buffer_length;
292 INSERT_PADDING_WORDS(1); 297 INSERT_PADDING_WORDS(1);
298 NVFlinger::IGBPBuffer buffer{};
293 }; 299 };
294 300
295 Data data; 301 Data data{};
296 NVFlinger::IGBPBuffer buffer; 302 BufferContainer buffer_container{};
297}; 303};
298 304
299class IGBPSetPreallocatedBufferResponseParcel : public Parcel { 305class IGBPSetPreallocatedBufferResponseParcel : public Parcel {
@@ -547,7 +553,7 @@ private:
547 case TransactionId::SetPreallocatedBuffer: { 553 case TransactionId::SetPreallocatedBuffer: {
548 IGBPSetPreallocatedBufferRequestParcel request{ctx.ReadBuffer()}; 554 IGBPSetPreallocatedBufferRequestParcel request{ctx.ReadBuffer()};
549 555
550 buffer_queue.SetPreallocatedBuffer(request.data.slot, request.buffer); 556 buffer_queue.SetPreallocatedBuffer(request.data.slot, request.buffer_container.buffer);
551 557
552 IGBPSetPreallocatedBufferResponseParcel response{}; 558 IGBPSetPreallocatedBufferResponseParcel response{};
553 ctx.WriteBuffer(response.Serialize()); 559 ctx.WriteBuffer(response.Serialize());
diff --git a/src/core/settings.cpp b/src/core/settings.cpp
index e9997a263..47d9ecf9a 100644
--- a/src/core/settings.cpp
+++ b/src/core/settings.cpp
@@ -72,8 +72,6 @@ void LogSettings() {
72 log_setting("DataStorage_UseVirtualSd", values.use_virtual_sd); 72 log_setting("DataStorage_UseVirtualSd", values.use_virtual_sd);
73 log_setting("DataStorage_NandDir", Common::FS::GetUserPath(Common::FS::UserPath::NANDDir)); 73 log_setting("DataStorage_NandDir", Common::FS::GetUserPath(Common::FS::UserPath::NANDDir));
74 log_setting("DataStorage_SdmcDir", Common::FS::GetUserPath(Common::FS::UserPath::SDMCDir)); 74 log_setting("DataStorage_SdmcDir", Common::FS::GetUserPath(Common::FS::UserPath::SDMCDir));
75 log_setting("Debugging_UseGdbstub", values.use_gdbstub);
76 log_setting("Debugging_GdbstubPort", values.gdbstub_port);
77 log_setting("Debugging_ProgramArgs", values.program_args); 75 log_setting("Debugging_ProgramArgs", values.program_args);
78 log_setting("Services_BCATBackend", values.bcat_backend); 76 log_setting("Services_BCATBackend", values.bcat_backend);
79 log_setting("Services_BCATBoxcatLocal", values.bcat_boxcat_local); 77 log_setting("Services_BCATBoxcatLocal", values.bcat_boxcat_local);
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt
index 47ef30aa9..d80b0b688 100644
--- a/src/tests/CMakeLists.txt
+++ b/src/tests/CMakeLists.txt
@@ -2,7 +2,6 @@ add_executable(tests
2 common/bit_field.cpp 2 common/bit_field.cpp
3 common/bit_utils.cpp 3 common/bit_utils.cpp
4 common/fibers.cpp 4 common/fibers.cpp
5 common/multi_level_queue.cpp
6 common/param_package.cpp 5 common/param_package.cpp
7 common/ring_buffer.cpp 6 common/ring_buffer.cpp
8 core/arm/arm_test_common.cpp 7 core/arm/arm_test_common.cpp
diff --git a/src/tests/common/multi_level_queue.cpp b/src/tests/common/multi_level_queue.cpp
deleted file mode 100644
index cca7ec7da..000000000
--- a/src/tests/common/multi_level_queue.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
1// Copyright 2019 Yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <catch2/catch.hpp>
6#include <math.h>
7#include "common/common_types.h"
8#include "common/multi_level_queue.h"
9
10namespace Common {
11
12TEST_CASE("MultiLevelQueue", "[common]") {
13 std::array<f32, 8> values = {0.0, 5.0, 1.0, 9.0, 8.0, 2.0, 6.0, 7.0};
14 Common::MultiLevelQueue<f32, 64> mlq;
15 REQUIRE(mlq.empty());
16 mlq.add(values[2], 2);
17 mlq.add(values[7], 7);
18 mlq.add(values[3], 3);
19 mlq.add(values[4], 4);
20 mlq.add(values[0], 0);
21 mlq.add(values[5], 5);
22 mlq.add(values[6], 6);
23 mlq.add(values[1], 1);
24 u32 index = 0;
25 bool all_set = true;
26 for (auto& f : mlq) {
27 all_set &= (f == values[index]);
28 index++;
29 }
30 REQUIRE(all_set);
31 REQUIRE(!mlq.empty());
32 f32 v = 8.0;
33 mlq.add(v, 2);
34 v = -7.0;
35 mlq.add(v, 2, false);
36 REQUIRE(mlq.front(2) == -7.0);
37 mlq.yield(2);
38 REQUIRE(mlq.front(2) == values[2]);
39 REQUIRE(mlq.back(2) == -7.0);
40 REQUIRE(mlq.empty(8));
41 v = 10.0;
42 mlq.add(v, 8);
43 mlq.adjust(v, 8, 9);
44 REQUIRE(mlq.front(9) == v);
45 REQUIRE(mlq.empty(8));
46 REQUIRE(!mlq.empty(9));
47 mlq.adjust(values[0], 0, 9);
48 REQUIRE(mlq.highest_priority_set() == 1);
49 REQUIRE(mlq.lowest_priority_set() == 9);
50 mlq.remove(values[1], 1);
51 REQUIRE(mlq.highest_priority_set() == 2);
52 REQUIRE(mlq.empty(1));
53}
54
55} // namespace Common
diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp
index 0ec5b861a..9fb254986 100644
--- a/src/yuzu/configuration/config.cpp
+++ b/src/yuzu/configuration/config.cpp
@@ -637,8 +637,6 @@ void Config::ReadDebuggingValues() {
637 // Intentionally not using the QT default setting as this is intended to be changed in the ini 637 // Intentionally not using the QT default setting as this is intended to be changed in the ini
638 Settings::values.record_frame_times = 638 Settings::values.record_frame_times =
639 qt_config->value(QStringLiteral("record_frame_times"), false).toBool(); 639 qt_config->value(QStringLiteral("record_frame_times"), false).toBool();
640 Settings::values.use_gdbstub = ReadSetting(QStringLiteral("use_gdbstub"), false).toBool();
641 Settings::values.gdbstub_port = ReadSetting(QStringLiteral("gdbstub_port"), 24689).toInt();
642 Settings::values.program_args = 640 Settings::values.program_args =
643 ReadSetting(QStringLiteral("program_args"), QString{}).toString().toStdString(); 641 ReadSetting(QStringLiteral("program_args"), QString{}).toString().toStdString();
644 Settings::values.dump_exefs = ReadSetting(QStringLiteral("dump_exefs"), false).toBool(); 642 Settings::values.dump_exefs = ReadSetting(QStringLiteral("dump_exefs"), false).toBool();
@@ -1236,8 +1234,6 @@ void Config::SaveDebuggingValues() {
1236 1234
1237 // Intentionally not using the QT default setting as this is intended to be changed in the ini 1235 // Intentionally not using the QT default setting as this is intended to be changed in the ini
1238 qt_config->setValue(QStringLiteral("record_frame_times"), Settings::values.record_frame_times); 1236 qt_config->setValue(QStringLiteral("record_frame_times"), Settings::values.record_frame_times);
1239 WriteSetting(QStringLiteral("use_gdbstub"), Settings::values.use_gdbstub, false);
1240 WriteSetting(QStringLiteral("gdbstub_port"), Settings::values.gdbstub_port, 24689);
1241 WriteSetting(QStringLiteral("program_args"), 1237 WriteSetting(QStringLiteral("program_args"),
1242 QString::fromStdString(Settings::values.program_args), QString{}); 1238 QString::fromStdString(Settings::values.program_args), QString{});
1243 WriteSetting(QStringLiteral("dump_exefs"), Settings::values.dump_exefs, false); 1239 WriteSetting(QStringLiteral("dump_exefs"), Settings::values.dump_exefs, false);
diff --git a/src/yuzu/configuration/configure_debug.cpp b/src/yuzu/configuration/configure_debug.cpp
index 027099ab7..121873f95 100644
--- a/src/yuzu/configuration/configure_debug.cpp
+++ b/src/yuzu/configuration/configure_debug.cpp
@@ -28,9 +28,6 @@ ConfigureDebug::ConfigureDebug(QWidget* parent) : QWidget(parent), ui(new Ui::Co
28ConfigureDebug::~ConfigureDebug() = default; 28ConfigureDebug::~ConfigureDebug() = default;
29 29
30void ConfigureDebug::SetConfiguration() { 30void ConfigureDebug::SetConfiguration() {
31 ui->toggle_gdbstub->setChecked(Settings::values.use_gdbstub);
32 ui->gdbport_spinbox->setEnabled(Settings::values.use_gdbstub);
33 ui->gdbport_spinbox->setValue(Settings::values.gdbstub_port);
34 ui->toggle_console->setEnabled(!Core::System::GetInstance().IsPoweredOn()); 31 ui->toggle_console->setEnabled(!Core::System::GetInstance().IsPoweredOn());
35 ui->toggle_console->setChecked(UISettings::values.show_console); 32 ui->toggle_console->setChecked(UISettings::values.show_console);
36 ui->log_filter_edit->setText(QString::fromStdString(Settings::values.log_filter)); 33 ui->log_filter_edit->setText(QString::fromStdString(Settings::values.log_filter));
@@ -45,8 +42,6 @@ void ConfigureDebug::SetConfiguration() {
45} 42}
46 43
47void ConfigureDebug::ApplyConfiguration() { 44void ConfigureDebug::ApplyConfiguration() {
48 Settings::values.use_gdbstub = ui->toggle_gdbstub->isChecked();
49 Settings::values.gdbstub_port = ui->gdbport_spinbox->value();
50 UISettings::values.show_console = ui->toggle_console->isChecked(); 45 UISettings::values.show_console = ui->toggle_console->isChecked();
51 Settings::values.log_filter = ui->log_filter_edit->text().toStdString(); 46 Settings::values.log_filter = ui->log_filter_edit->text().toStdString();
52 Settings::values.program_args = ui->homebrew_args_edit->text().toStdString(); 47 Settings::values.program_args = ui->homebrew_args_edit->text().toStdString();
diff --git a/src/yuzu/configuration/configure_debug.ui b/src/yuzu/configuration/configure_debug.ui
index 6f94fe304..9186aa732 100644
--- a/src/yuzu/configuration/configure_debug.ui
+++ b/src/yuzu/configuration/configure_debug.ui
@@ -7,7 +7,7 @@
7 <x>0</x> 7 <x>0</x>
8 <y>0</y> 8 <y>0</y>
9 <width>400</width> 9 <width>400</width>
10 <height>467</height> 10 <height>486</height>
11 </rect> 11 </rect>
12 </property> 12 </property>
13 <property name="windowTitle"> 13 <property name="windowTitle">
@@ -15,57 +15,6 @@
15 </property> 15 </property>
16 <layout class="QVBoxLayout" name="verticalLayout_1"> 16 <layout class="QVBoxLayout" name="verticalLayout_1">
17 <item> 17 <item>
18 <layout class="QVBoxLayout" name="verticalLayout_2">
19 <item>
20 <widget class="QGroupBox" name="groupBox">
21 <property name="title">
22 <string>GDB</string>
23 </property>
24 <layout class="QVBoxLayout" name="verticalLayout_3">
25 <item>
26 <layout class="QHBoxLayout" name="horizontalLayout_1">
27 <item>
28 <widget class="QCheckBox" name="toggle_gdbstub">
29 <property name="text">
30 <string>Enable GDB Stub</string>
31 </property>
32 </widget>
33 </item>
34 <item>
35 <spacer name="horizontalSpacer">
36 <property name="orientation">
37 <enum>Qt::Horizontal</enum>
38 </property>
39 <property name="sizeHint" stdset="0">
40 <size>
41 <width>40</width>
42 <height>20</height>
43 </size>
44 </property>
45 </spacer>
46 </item>
47 <item>
48 <widget class="QLabel" name="label_1">
49 <property name="text">
50 <string>Port:</string>
51 </property>
52 </widget>
53 </item>
54 <item>
55 <widget class="QSpinBox" name="gdbport_spinbox">
56 <property name="maximum">
57 <number>65536</number>
58 </property>
59 </widget>
60 </item>
61 </layout>
62 </item>
63 </layout>
64 </widget>
65 </item>
66 </layout>
67 </item>
68 <item>
69 <widget class="QGroupBox" name="groupBox_2"> 18 <widget class="QGroupBox" name="groupBox_2">
70 <property name="title"> 19 <property name="title">
71 <string>Logging</string> 20 <string>Logging</string>
@@ -258,8 +207,6 @@
258 </layout> 207 </layout>
259 </widget> 208 </widget>
260 <tabstops> 209 <tabstops>
261 <tabstop>toggle_gdbstub</tabstop>
262 <tabstop>gdbport_spinbox</tabstop>
263 <tabstop>log_filter_edit</tabstop> 210 <tabstop>log_filter_edit</tabstop>
264 <tabstop>toggle_console</tabstop> 211 <tabstop>toggle_console</tabstop>
265 <tabstop>open_log_button</tabstop> 212 <tabstop>open_log_button</tabstop>
@@ -269,22 +216,5 @@
269 <tabstop>quest_flag</tabstop> 216 <tabstop>quest_flag</tabstop>
270 </tabstops> 217 </tabstops>
271 <resources/> 218 <resources/>
272 <connections> 219 <connections/>
273 <connection>
274 <sender>toggle_gdbstub</sender>
275 <signal>toggled(bool)</signal>
276 <receiver>gdbport_spinbox</receiver>
277 <slot>setEnabled(bool)</slot>
278 <hints>
279 <hint type="sourcelabel">
280 <x>84</x>
281 <y>157</y>
282 </hint>
283 <hint type="destinationlabel">
284 <x>342</x>
285 <y>158</y>
286 </hint>
287 </hints>
288 </connection>
289 </connections>
290</ui> 220</ui>
diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp
index a20824719..546a2cd4d 100644
--- a/src/yuzu/debugger/wait_tree.cpp
+++ b/src/yuzu/debugger/wait_tree.cpp
@@ -13,10 +13,10 @@
13#include "core/arm/arm_interface.h" 13#include "core/arm/arm_interface.h"
14#include "core/core.h" 14#include "core/core.h"
15#include "core/hle/kernel/handle_table.h" 15#include "core/hle/kernel/handle_table.h"
16#include "core/hle/kernel/k_scheduler.h"
16#include "core/hle/kernel/mutex.h" 17#include "core/hle/kernel/mutex.h"
17#include "core/hle/kernel/process.h" 18#include "core/hle/kernel/process.h"
18#include "core/hle/kernel/readable_event.h" 19#include "core/hle/kernel/readable_event.h"
19#include "core/hle/kernel/scheduler.h"
20#include "core/hle/kernel/synchronization_object.h" 20#include "core/hle/kernel/synchronization_object.h"
21#include "core/hle/kernel/thread.h" 21#include "core/hle/kernel/thread.h"
22#include "core/memory.h" 22#include "core/memory.h"
@@ -101,7 +101,7 @@ std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList()
101 }; 101 };
102 102
103 const auto& system = Core::System::GetInstance(); 103 const auto& system = Core::System::GetInstance();
104 add_threads(system.GlobalScheduler().GetThreadList()); 104 add_threads(system.GlobalSchedulerContext().GetThreadList());
105 105
106 return item_list; 106 return item_list;
107} 107}
@@ -349,14 +349,14 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
349 list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor))); 349 list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor)));
350 list.push_back( 350 list.push_back(
351 std::make_unique<WaitTreeText>(tr("ideal core = %1").arg(thread.GetIdealCore()))); 351 std::make_unique<WaitTreeText>(tr("ideal core = %1").arg(thread.GetIdealCore())));
352 list.push_back( 352 list.push_back(std::make_unique<WaitTreeText>(
353 std::make_unique<WaitTreeText>(tr("affinity mask = %1").arg(thread.GetAffinityMask()))); 353 tr("affinity mask = %1").arg(thread.GetAffinityMask().GetAffinityMask())));
354 list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID()))); 354 list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID())));
355 list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)") 355 list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)")
356 .arg(thread.GetPriority()) 356 .arg(thread.GetPriority())
357 .arg(thread.GetNominalPriority()))); 357 .arg(thread.GetNominalPriority())));
358 list.push_back(std::make_unique<WaitTreeText>( 358 list.push_back(std::make_unique<WaitTreeText>(
359 tr("last running ticks = %1").arg(thread.GetLastRunningTicks()))); 359 tr("last running ticks = %1").arg(thread.GetLastScheduledTick())));
360 360
361 const VAddr mutex_wait_address = thread.GetMutexWaitAddress(); 361 const VAddr mutex_wait_address = thread.GetMutexWaitAddress();
362 if (mutex_wait_address != 0) { 362 if (mutex_wait_address != 0) {
diff --git a/src/yuzu_cmd/config.cpp b/src/yuzu_cmd/config.cpp
index b70f71a08..38075c345 100644
--- a/src/yuzu_cmd/config.cpp
+++ b/src/yuzu_cmd/config.cpp
@@ -429,9 +429,6 @@ void Config::ReadValues() {
429 // Debugging 429 // Debugging
430 Settings::values.record_frame_times = 430 Settings::values.record_frame_times =
431 sdl2_config->GetBoolean("Debugging", "record_frame_times", false); 431 sdl2_config->GetBoolean("Debugging", "record_frame_times", false);
432 Settings::values.use_gdbstub = sdl2_config->GetBoolean("Debugging", "use_gdbstub", false);
433 Settings::values.gdbstub_port =
434 static_cast<u16>(sdl2_config->GetInteger("Debugging", "gdbstub_port", 24689));
435 Settings::values.program_args = sdl2_config->Get("Debugging", "program_args", ""); 432 Settings::values.program_args = sdl2_config->Get("Debugging", "program_args", "");
436 Settings::values.dump_exefs = sdl2_config->GetBoolean("Debugging", "dump_exefs", false); 433 Settings::values.dump_exefs = sdl2_config->GetBoolean("Debugging", "dump_exefs", false);
437 Settings::values.dump_nso = sdl2_config->GetBoolean("Debugging", "dump_nso", false); 434 Settings::values.dump_nso = sdl2_config->GetBoolean("Debugging", "dump_nso", false);
diff --git a/src/yuzu_cmd/default_ini.h b/src/yuzu_cmd/default_ini.h
index bcbbcd4ca..2d4b98d9a 100644
--- a/src/yuzu_cmd/default_ini.h
+++ b/src/yuzu_cmd/default_ini.h
@@ -318,9 +318,6 @@ log_filter = *:Trace
318[Debugging] 318[Debugging]
319# Record frame time data, can be found in the log directory. Boolean value 319# Record frame time data, can be found in the log directory. Boolean value
320record_frame_times = 320record_frame_times =
321# Port for listening to GDB connections.
322use_gdbstub=false
323gdbstub_port=24689
324# Determines whether or not yuzu will dump the ExeFS of all games it attempts to load while loading them 321# Determines whether or not yuzu will dump the ExeFS of all games it attempts to load while loading them
325dump_exefs=false 322dump_exefs=false
326# Determines whether or not yuzu will dump all NSOs it attempts to load while loading them 323# Determines whether or not yuzu will dump all NSOs it attempts to load while loading them
diff --git a/src/yuzu_cmd/yuzu.cpp b/src/yuzu_cmd/yuzu.cpp
index c2efe1ee6..1ebc04af5 100644
--- a/src/yuzu_cmd/yuzu.cpp
+++ b/src/yuzu_cmd/yuzu.cpp
@@ -64,7 +64,6 @@ __declspec(dllexport) int AmdPowerXpressRequestHighPerformance = 1;
64static void PrintHelp(const char* argv0) { 64static void PrintHelp(const char* argv0) {
65 std::cout << "Usage: " << argv0 65 std::cout << "Usage: " << argv0
66 << " [options] <filename>\n" 66 << " [options] <filename>\n"
67 "-g, --gdbport=NUMBER Enable gdb stub on port NUMBER\n"
68 "-f, --fullscreen Start in fullscreen mode\n" 67 "-f, --fullscreen Start in fullscreen mode\n"
69 "-h, --help Display this help and exit\n" 68 "-h, --help Display this help and exit\n"
70 "-v, --version Output version information and exit\n" 69 "-v, --version Output version information and exit\n"
@@ -96,8 +95,6 @@ int main(int argc, char** argv) {
96 Config config; 95 Config config;
97 96
98 int option_index = 0; 97 int option_index = 0;
99 bool use_gdbstub = Settings::values.use_gdbstub;
100 u32 gdb_port = static_cast<u32>(Settings::values.gdbstub_port);
101 98
102 InitializeLogging(); 99 InitializeLogging();
103 100
@@ -116,26 +113,17 @@ int main(int argc, char** argv) {
116 bool fullscreen = false; 113 bool fullscreen = false;
117 114
118 static struct option long_options[] = { 115 static struct option long_options[] = {
119 {"gdbport", required_argument, 0, 'g'}, {"fullscreen", no_argument, 0, 'f'}, 116 {"fullscreen", no_argument, 0, 'f'},
120 {"help", no_argument, 0, 'h'}, {"version", no_argument, 0, 'v'}, 117 {"help", no_argument, 0, 'h'},
121 {"program", optional_argument, 0, 'p'}, {0, 0, 0, 0}, 118 {"version", no_argument, 0, 'v'},
119 {"program", optional_argument, 0, 'p'},
120 {0, 0, 0, 0},
122 }; 121 };
123 122
124 while (optind < argc) { 123 while (optind < argc) {
125 int arg = getopt_long(argc, argv, "g:fhvp::", long_options, &option_index); 124 int arg = getopt_long(argc, argv, "g:fhvp::", long_options, &option_index);
126 if (arg != -1) { 125 if (arg != -1) {
127 switch (static_cast<char>(arg)) { 126 switch (static_cast<char>(arg)) {
128 case 'g':
129 errno = 0;
130 gdb_port = strtoul(optarg, &endarg, 0);
131 use_gdbstub = true;
132 if (endarg == optarg)
133 errno = EINVAL;
134 if (errno != 0) {
135 perror("--gdbport");
136 exit(1);
137 }
138 break;
139 case 'f': 127 case 'f':
140 fullscreen = true; 128 fullscreen = true;
141 LOG_INFO(Frontend, "Starting in fullscreen mode..."); 129 LOG_INFO(Frontend, "Starting in fullscreen mode...");
@@ -177,8 +165,6 @@ int main(int argc, char** argv) {
177 InputCommon::InputSubsystem input_subsystem; 165 InputCommon::InputSubsystem input_subsystem;
178 166
179 // Apply the command line arguments 167 // Apply the command line arguments
180 Settings::values.gdbstub_port = gdb_port;
181 Settings::values.use_gdbstub = use_gdbstub;
182 Settings::Apply(system); 168 Settings::Apply(system);
183 169
184 std::unique_ptr<EmuWindow_SDL2> emu_window; 170 std::unique_ptr<EmuWindow_SDL2> emu_window;
diff --git a/src/yuzu_tester/config.cpp b/src/yuzu_tester/config.cpp
index b6cdc7c1c..91684e96e 100644
--- a/src/yuzu_tester/config.cpp
+++ b/src/yuzu_tester/config.cpp
@@ -158,7 +158,6 @@ void Config::ReadValues() {
158 Settings::values.use_dev_keys = sdl2_config->GetBoolean("Miscellaneous", "use_dev_keys", false); 158 Settings::values.use_dev_keys = sdl2_config->GetBoolean("Miscellaneous", "use_dev_keys", false);
159 159
160 // Debugging 160 // Debugging
161 Settings::values.use_gdbstub = false;
162 Settings::values.program_args = ""; 161 Settings::values.program_args = "";
163 Settings::values.dump_exefs = sdl2_config->GetBoolean("Debugging", "dump_exefs", false); 162 Settings::values.dump_exefs = sdl2_config->GetBoolean("Debugging", "dump_exefs", false);
164 Settings::values.dump_nso = sdl2_config->GetBoolean("Debugging", "dump_nso", false); 163 Settings::values.dump_nso = sdl2_config->GetBoolean("Debugging", "dump_nso", false);
diff --git a/src/yuzu_tester/yuzu.cpp b/src/yuzu_tester/yuzu.cpp
index 50bd7ae41..6435ffabb 100644
--- a/src/yuzu_tester/yuzu.cpp
+++ b/src/yuzu_tester/yuzu.cpp
@@ -162,7 +162,6 @@ int main(int argc, char** argv) {
162 162
163 Core::System& system{Core::System::GetInstance()}; 163 Core::System& system{Core::System::GetInstance()};
164 164
165 Settings::values.use_gdbstub = false;
166 Settings::Apply(system); 165 Settings::Apply(system);
167 166
168 const auto emu_window{std::make_unique<EmuWindow_SDL2_Hide>()}; 167 const auto emu_window{std::make_unique<EmuWindow_SDL2_Hide>()};