summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/hle/kernel/global_scheduler_context.cpp33
-rw-r--r--src/core/hle/kernel/global_scheduler_context.h25
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp111
-rw-r--r--src/core/hle/kernel/k_address_arbiter.h45
-rw-r--r--src/core/hle/kernel/k_scheduler.h12
-rw-r--r--src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h30
-rw-r--r--src/core/hle/kernel/k_thread_queue.cpp12
-rw-r--r--src/core/hle/kernel/k_thread_queue.h4
8 files changed, 130 insertions, 142 deletions
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
index fd911a3a5..7b090ccb5 100644
--- a/src/core/hle/kernel/global_scheduler_context.cpp
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -12,20 +12,19 @@
12 12
13namespace Kernel { 13namespace Kernel {
14 14
15GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel_) 15GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
16 : kernel{kernel_}, scheduler_lock{kernel_} {} 16 : m_kernel{kernel}, m_scheduler_lock{kernel} {}
17 17
18GlobalSchedulerContext::~GlobalSchedulerContext() = default; 18GlobalSchedulerContext::~GlobalSchedulerContext() = default;
19 19
20void GlobalSchedulerContext::AddThread(KThread* thread) { 20void GlobalSchedulerContext::AddThread(KThread* thread) {
21 std::scoped_lock lock{global_list_guard}; 21 std::scoped_lock lock{m_global_list_guard};
22 thread_list.push_back(thread); 22 m_thread_list.push_back(thread);
23} 23}
24 24
25void GlobalSchedulerContext::RemoveThread(KThread* thread) { 25void GlobalSchedulerContext::RemoveThread(KThread* thread) {
26 std::scoped_lock lock{global_list_guard}; 26 std::scoped_lock lock{m_global_list_guard};
27 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), 27 std::erase(m_thread_list, thread);
28 thread_list.end());
29} 28}
30 29
31void GlobalSchedulerContext::PreemptThreads() { 30void GlobalSchedulerContext::PreemptThreads() {
@@ -38,37 +37,37 @@ void GlobalSchedulerContext::PreemptThreads() {
38 63, 37 63,
39 }; 38 };
40 39
41 ASSERT(IsLocked()); 40 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
42 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { 41 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
43 const u32 priority = preemption_priorities[core_id]; 42 const u32 priority = preemption_priorities[core_id];
44 KScheduler::RotateScheduledQueue(kernel, core_id, priority); 43 KScheduler::RotateScheduledQueue(m_kernel, core_id, priority);
45 } 44 }
46} 45}
47 46
48bool GlobalSchedulerContext::IsLocked() const { 47bool GlobalSchedulerContext::IsLocked() const {
49 return scheduler_lock.IsLockedByCurrentThread(); 48 return m_scheduler_lock.IsLockedByCurrentThread();
50} 49}
51 50
52void GlobalSchedulerContext::RegisterDummyThreadForWakeup(KThread* thread) { 51void GlobalSchedulerContext::RegisterDummyThreadForWakeup(KThread* thread) {
53 ASSERT(IsLocked()); 52 ASSERT(this->IsLocked());
54 53
55 woken_dummy_threads.insert(thread); 54 m_woken_dummy_threads.insert(thread);
56} 55}
57 56
58void GlobalSchedulerContext::UnregisterDummyThreadForWakeup(KThread* thread) { 57void GlobalSchedulerContext::UnregisterDummyThreadForWakeup(KThread* thread) {
59 ASSERT(IsLocked()); 58 ASSERT(this->IsLocked());
60 59
61 woken_dummy_threads.erase(thread); 60 m_woken_dummy_threads.erase(thread);
62} 61}
63 62
64void GlobalSchedulerContext::WakeupWaitingDummyThreads() { 63void GlobalSchedulerContext::WakeupWaitingDummyThreads() {
65 ASSERT(IsLocked()); 64 ASSERT(this->IsLocked());
66 65
67 for (auto* thread : woken_dummy_threads) { 66 for (auto* thread : m_woken_dummy_threads) {
68 thread->DummyThreadEndWait(); 67 thread->DummyThreadEndWait();
69 } 68 }
70 69
71 woken_dummy_threads.clear(); 70 m_woken_dummy_threads.clear();
72} 71}
73 72
74} // namespace Kernel 73} // namespace Kernel
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
index 220ed6192..b7fb8caec 100644
--- a/src/core/hle/kernel/global_scheduler_context.h
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -33,7 +33,7 @@ class GlobalSchedulerContext final {
33public: 33public:
34 using LockType = KAbstractSchedulerLock<KScheduler>; 34 using LockType = KAbstractSchedulerLock<KScheduler>;
35 35
36 explicit GlobalSchedulerContext(KernelCore& kernel_); 36 explicit GlobalSchedulerContext(KernelCore& kernel);
37 ~GlobalSchedulerContext(); 37 ~GlobalSchedulerContext();
38 38
39 /// Adds a new thread to the scheduler 39 /// Adds a new thread to the scheduler
@@ -43,8 +43,9 @@ public:
43 void RemoveThread(KThread* thread); 43 void RemoveThread(KThread* thread);
44 44
45 /// Returns a list of all threads managed by the scheduler 45 /// Returns a list of all threads managed by the scheduler
46 /// This is only safe to iterate while holding the scheduler lock
46 [[nodiscard]] const std::vector<KThread*>& GetThreadList() const { 47 [[nodiscard]] const std::vector<KThread*>& GetThreadList() const {
47 return thread_list; 48 return m_thread_list;
48 } 49 }
49 50
50 /** 51 /**
@@ -64,29 +65,25 @@ public:
64 void WakeupWaitingDummyThreads(); 65 void WakeupWaitingDummyThreads();
65 66
66 [[nodiscard]] LockType& SchedulerLock() { 67 [[nodiscard]] LockType& SchedulerLock() {
67 return scheduler_lock; 68 return m_scheduler_lock;
68 }
69
70 [[nodiscard]] const LockType& SchedulerLock() const {
71 return scheduler_lock;
72 } 69 }
73 70
74private: 71private:
75 friend class KScopedSchedulerLock; 72 friend class KScopedSchedulerLock;
76 friend class KScopedSchedulerLockAndSleep; 73 friend class KScopedSchedulerLockAndSleep;
77 74
78 KernelCore& kernel; 75 KernelCore& m_kernel;
79 76
80 std::atomic_bool scheduler_update_needed{}; 77 std::atomic_bool m_scheduler_update_needed{};
81 KSchedulerPriorityQueue priority_queue; 78 KSchedulerPriorityQueue m_priority_queue;
82 LockType scheduler_lock; 79 LockType m_scheduler_lock;
83 80
84 /// Lists dummy threads pending wakeup on lock release 81 /// Lists dummy threads pending wakeup on lock release
85 std::set<KThread*> woken_dummy_threads; 82 std::set<KThread*> m_woken_dummy_threads;
86 83
87 /// Lists all thread ids that aren't deleted/etc. 84 /// Lists all thread ids that aren't deleted/etc.
88 std::vector<KThread*> thread_list; 85 std::vector<KThread*> m_thread_list;
89 std::mutex global_list_guard; 86 std::mutex m_global_list_guard;
90}; 87};
91 88
92} // namespace Kernel 89} // namespace Kernel
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
index a4c16eca9..47637a729 100644
--- a/src/core/hle/kernel/k_address_arbiter.cpp
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -14,8 +14,8 @@
14 14
15namespace Kernel { 15namespace Kernel {
16 16
17KAddressArbiter::KAddressArbiter(Core::System& system_) 17KAddressArbiter::KAddressArbiter(Core::System& system)
18 : system{system_}, kernel{system.Kernel()} {} 18 : m_system{system}, m_kernel{system.Kernel()} {}
19KAddressArbiter::~KAddressArbiter() = default; 19KAddressArbiter::~KAddressArbiter() = default;
20 20
21namespace { 21namespace {
@@ -90,8 +90,8 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32
90 90
91class ThreadQueueImplForKAddressArbiter final : public KThreadQueue { 91class ThreadQueueImplForKAddressArbiter final : public KThreadQueue {
92public: 92public:
93 explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t) 93 explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel, KAddressArbiter::ThreadTree* t)
94 : KThreadQueue(kernel_), m_tree(t) {} 94 : KThreadQueue(kernel), m_tree(t) {}
95 95
96 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override { 96 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
97 // If the thread is waiting on an address arbiter, remove it from the tree. 97 // If the thread is waiting on an address arbiter, remove it from the tree.
@@ -105,7 +105,7 @@ public:
105 } 105 }
106 106
107private: 107private:
108 KAddressArbiter::ThreadTree* m_tree; 108 KAddressArbiter::ThreadTree* m_tree{};
109}; 109};
110 110
111} // namespace 111} // namespace
@@ -114,10 +114,10 @@ Result KAddressArbiter::Signal(VAddr addr, s32 count) {
114 // Perform signaling. 114 // Perform signaling.
115 s32 num_waiters{}; 115 s32 num_waiters{};
116 { 116 {
117 KScopedSchedulerLock sl(kernel); 117 KScopedSchedulerLock sl(m_kernel);
118 118
119 auto it = thread_tree.nfind_key({addr, -1}); 119 auto it = m_tree.nfind_key({addr, -1});
120 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && 120 while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
121 (it->GetAddressArbiterKey() == addr)) { 121 (it->GetAddressArbiterKey() == addr)) {
122 // End the thread's wait. 122 // End the thread's wait.
123 KThread* target_thread = std::addressof(*it); 123 KThread* target_thread = std::addressof(*it);
@@ -126,31 +126,27 @@ Result KAddressArbiter::Signal(VAddr addr, s32 count) {
126 ASSERT(target_thread->IsWaitingForAddressArbiter()); 126 ASSERT(target_thread->IsWaitingForAddressArbiter());
127 target_thread->ClearAddressArbiter(); 127 target_thread->ClearAddressArbiter();
128 128
129 it = thread_tree.erase(it); 129 it = m_tree.erase(it);
130 ++num_waiters; 130 ++num_waiters;
131 } 131 }
132 } 132 }
133 return ResultSuccess; 133 R_SUCCEED();
134} 134}
135 135
136Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) { 136Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
137 // Perform signaling. 137 // Perform signaling.
138 s32 num_waiters{}; 138 s32 num_waiters{};
139 { 139 {
140 KScopedSchedulerLock sl(kernel); 140 KScopedSchedulerLock sl(m_kernel);
141 141
142 // Check the userspace value. 142 // Check the userspace value.
143 s32 user_value{}; 143 s32 user_value{};
144 if (!UpdateIfEqual(system, &user_value, addr, value, value + 1)) { 144 R_UNLESS(UpdateIfEqual(m_system, &user_value, addr, value, value + 1),
145 LOG_ERROR(Kernel, "Invalid current memory!"); 145 ResultInvalidCurrentMemory);
146 return ResultInvalidCurrentMemory; 146 R_UNLESS(user_value == value, ResultInvalidState);
147 }
148 if (user_value != value) {
149 return ResultInvalidState;
150 }
151 147
152 auto it = thread_tree.nfind_key({addr, -1}); 148 auto it = m_tree.nfind_key({addr, -1});
153 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && 149 while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
154 (it->GetAddressArbiterKey() == addr)) { 150 (it->GetAddressArbiterKey() == addr)) {
155 // End the thread's wait. 151 // End the thread's wait.
156 KThread* target_thread = std::addressof(*it); 152 KThread* target_thread = std::addressof(*it);
@@ -159,33 +155,33 @@ Result KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 cou
159 ASSERT(target_thread->IsWaitingForAddressArbiter()); 155 ASSERT(target_thread->IsWaitingForAddressArbiter());
160 target_thread->ClearAddressArbiter(); 156 target_thread->ClearAddressArbiter();
161 157
162 it = thread_tree.erase(it); 158 it = m_tree.erase(it);
163 ++num_waiters; 159 ++num_waiters;
164 } 160 }
165 } 161 }
166 return ResultSuccess; 162 R_SUCCEED();
167} 163}
168 164
169Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) { 165Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
170 // Perform signaling. 166 // Perform signaling.
171 s32 num_waiters{}; 167 s32 num_waiters{};
172 { 168 {
173 [[maybe_unused]] const KScopedSchedulerLock sl(kernel); 169 KScopedSchedulerLock sl(m_kernel);
174 170
175 auto it = thread_tree.nfind_key({addr, -1}); 171 auto it = m_tree.nfind_key({addr, -1});
176 // Determine the updated value. 172 // Determine the updated value.
177 s32 new_value{}; 173 s32 new_value{};
178 if (count <= 0) { 174 if (count <= 0) {
179 if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) { 175 if (it != m_tree.end() && it->GetAddressArbiterKey() == addr) {
180 new_value = value - 2; 176 new_value = value - 2;
181 } else { 177 } else {
182 new_value = value + 1; 178 new_value = value + 1;
183 } 179 }
184 } else { 180 } else {
185 if (it != thread_tree.end() && it->GetAddressArbiterKey() == addr) { 181 if (it != m_tree.end() && it->GetAddressArbiterKey() == addr) {
186 auto tmp_it = it; 182 auto tmp_it = it;
187 s32 tmp_num_waiters{}; 183 s32 tmp_num_waiters{};
188 while (++tmp_it != thread_tree.end() && tmp_it->GetAddressArbiterKey() == addr) { 184 while (++tmp_it != m_tree.end() && tmp_it->GetAddressArbiterKey() == addr) {
189 if (tmp_num_waiters++ >= count) { 185 if (tmp_num_waiters++ >= count) {
190 break; 186 break;
191 } 187 }
@@ -205,20 +201,15 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 val
205 s32 user_value{}; 201 s32 user_value{};
206 bool succeeded{}; 202 bool succeeded{};
207 if (value != new_value) { 203 if (value != new_value) {
208 succeeded = UpdateIfEqual(system, &user_value, addr, value, new_value); 204 succeeded = UpdateIfEqual(m_system, &user_value, addr, value, new_value);
209 } else { 205 } else {
210 succeeded = ReadFromUser(system, &user_value, addr); 206 succeeded = ReadFromUser(m_system, &user_value, addr);
211 } 207 }
212 208
213 if (!succeeded) { 209 R_UNLESS(succeeded, ResultInvalidCurrentMemory);
214 LOG_ERROR(Kernel, "Invalid current memory!"); 210 R_UNLESS(user_value == value, ResultInvalidState);
215 return ResultInvalidCurrentMemory;
216 }
217 if (user_value != value) {
218 return ResultInvalidState;
219 }
220 211
221 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && 212 while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) &&
222 (it->GetAddressArbiterKey() == addr)) { 213 (it->GetAddressArbiterKey() == addr)) {
223 // End the thread's wait. 214 // End the thread's wait.
224 KThread* target_thread = std::addressof(*it); 215 KThread* target_thread = std::addressof(*it);
@@ -227,57 +218,57 @@ Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 val
227 ASSERT(target_thread->IsWaitingForAddressArbiter()); 218 ASSERT(target_thread->IsWaitingForAddressArbiter());
228 target_thread->ClearAddressArbiter(); 219 target_thread->ClearAddressArbiter();
229 220
230 it = thread_tree.erase(it); 221 it = m_tree.erase(it);
231 ++num_waiters; 222 ++num_waiters;
232 } 223 }
233 } 224 }
234 return ResultSuccess; 225 R_SUCCEED();
235} 226}
236 227
237Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) { 228Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
238 // Prepare to wait. 229 // Prepare to wait.
239 KThread* cur_thread = GetCurrentThreadPointer(kernel); 230 KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
240 KHardwareTimer* timer{}; 231 KHardwareTimer* timer{};
241 ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree)); 232 ThreadQueueImplForKAddressArbiter wait_queue(m_kernel, std::addressof(m_tree));
242 233
243 { 234 {
244 KScopedSchedulerLockAndSleep slp{kernel, std::addressof(timer), cur_thread, timeout}; 235 KScopedSchedulerLockAndSleep slp{m_kernel, std::addressof(timer), cur_thread, timeout};
245 236
246 // Check that the thread isn't terminating. 237 // Check that the thread isn't terminating.
247 if (cur_thread->IsTerminationRequested()) { 238 if (cur_thread->IsTerminationRequested()) {
248 slp.CancelSleep(); 239 slp.CancelSleep();
249 return ResultTerminationRequested; 240 R_THROW(ResultTerminationRequested);
250 } 241 }
251 242
252 // Read the value from userspace. 243 // Read the value from userspace.
253 s32 user_value{}; 244 s32 user_value{};
254 bool succeeded{}; 245 bool succeeded{};
255 if (decrement) { 246 if (decrement) {
256 succeeded = DecrementIfLessThan(system, &user_value, addr, value); 247 succeeded = DecrementIfLessThan(m_system, &user_value, addr, value);
257 } else { 248 } else {
258 succeeded = ReadFromUser(system, &user_value, addr); 249 succeeded = ReadFromUser(m_system, &user_value, addr);
259 } 250 }
260 251
261 if (!succeeded) { 252 if (!succeeded) {
262 slp.CancelSleep(); 253 slp.CancelSleep();
263 return ResultInvalidCurrentMemory; 254 R_THROW(ResultInvalidCurrentMemory);
264 } 255 }
265 256
266 // Check that the value is less than the specified one. 257 // Check that the value is less than the specified one.
267 if (user_value >= value) { 258 if (user_value >= value) {
268 slp.CancelSleep(); 259 slp.CancelSleep();
269 return ResultInvalidState; 260 R_THROW(ResultInvalidState);
270 } 261 }
271 262
272 // Check that the timeout is non-zero. 263 // Check that the timeout is non-zero.
273 if (timeout == 0) { 264 if (timeout == 0) {
274 slp.CancelSleep(); 265 slp.CancelSleep();
275 return ResultTimedOut; 266 R_THROW(ResultTimedOut);
276 } 267 }
277 268
278 // Set the arbiter. 269 // Set the arbiter.
279 cur_thread->SetAddressArbiter(&thread_tree, addr); 270 cur_thread->SetAddressArbiter(std::addressof(m_tree), addr);
280 thread_tree.insert(*cur_thread); 271 m_tree.insert(*cur_thread);
281 272
282 // Wait for the thread to finish. 273 // Wait for the thread to finish.
283 wait_queue.SetHardwareTimer(timer); 274 wait_queue.SetHardwareTimer(timer);
@@ -291,41 +282,41 @@ Result KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s6
291 282
292Result KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { 283Result KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
293 // Prepare to wait. 284 // Prepare to wait.
294 KThread* cur_thread = GetCurrentThreadPointer(kernel); 285 KThread* cur_thread = GetCurrentThreadPointer(m_kernel);
295 KHardwareTimer* timer{}; 286 KHardwareTimer* timer{};
296 ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree)); 287 ThreadQueueImplForKAddressArbiter wait_queue(m_kernel, std::addressof(m_tree));
297 288
298 { 289 {
299 KScopedSchedulerLockAndSleep slp{kernel, std::addressof(timer), cur_thread, timeout}; 290 KScopedSchedulerLockAndSleep slp{m_kernel, std::addressof(timer), cur_thread, timeout};
300 291
301 // Check that the thread isn't terminating. 292 // Check that the thread isn't terminating.
302 if (cur_thread->IsTerminationRequested()) { 293 if (cur_thread->IsTerminationRequested()) {
303 slp.CancelSleep(); 294 slp.CancelSleep();
304 return ResultTerminationRequested; 295 R_THROW(ResultTerminationRequested);
305 } 296 }
306 297
307 // Read the value from userspace. 298 // Read the value from userspace.
308 s32 user_value{}; 299 s32 user_value{};
309 if (!ReadFromUser(system, &user_value, addr)) { 300 if (!ReadFromUser(m_system, &user_value, addr)) {
310 slp.CancelSleep(); 301 slp.CancelSleep();
311 return ResultInvalidCurrentMemory; 302 R_THROW(ResultInvalidCurrentMemory);
312 } 303 }
313 304
314 // Check that the value is equal. 305 // Check that the value is equal.
315 if (value != user_value) { 306 if (value != user_value) {
316 slp.CancelSleep(); 307 slp.CancelSleep();
317 return ResultInvalidState; 308 R_THROW(ResultInvalidState);
318 } 309 }
319 310
320 // Check that the timeout is non-zero. 311 // Check that the timeout is non-zero.
321 if (timeout == 0) { 312 if (timeout == 0) {
322 slp.CancelSleep(); 313 slp.CancelSleep();
323 return ResultTimedOut; 314 R_THROW(ResultTimedOut);
324 } 315 }
325 316
326 // Set the arbiter. 317 // Set the arbiter.
327 cur_thread->SetAddressArbiter(&thread_tree, addr); 318 cur_thread->SetAddressArbiter(std::addressof(m_tree), addr);
328 thread_tree.insert(*cur_thread); 319 m_tree.insert(*cur_thread);
329 320
330 // Wait for the thread to finish. 321 // Wait for the thread to finish.
331 wait_queue.SetHardwareTimer(timer); 322 wait_queue.SetHardwareTimer(timer);
diff --git a/src/core/hle/kernel/k_address_arbiter.h b/src/core/hle/kernel/k_address_arbiter.h
index e4085ae22..9a8c1ae94 100644
--- a/src/core/hle/kernel/k_address_arbiter.h
+++ b/src/core/hle/kernel/k_address_arbiter.h
@@ -22,47 +22,46 @@ class KAddressArbiter {
22public: 22public:
23 using ThreadTree = KConditionVariable::ThreadTree; 23 using ThreadTree = KConditionVariable::ThreadTree;
24 24
25 explicit KAddressArbiter(Core::System& system_); 25 explicit KAddressArbiter(Core::System& system);
26 ~KAddressArbiter(); 26 ~KAddressArbiter();
27 27
28 [[nodiscard]] Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) { 28 Result SignalToAddress(VAddr addr, Svc::SignalType type, s32 value, s32 count) {
29 switch (type) { 29 switch (type) {
30 case Svc::SignalType::Signal: 30 case Svc::SignalType::Signal:
31 return Signal(addr, count); 31 R_RETURN(this->Signal(addr, count));
32 case Svc::SignalType::SignalAndIncrementIfEqual: 32 case Svc::SignalType::SignalAndIncrementIfEqual:
33 return SignalAndIncrementIfEqual(addr, value, count); 33 R_RETURN(this->SignalAndIncrementIfEqual(addr, value, count));
34 case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual: 34 case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
35 return SignalAndModifyByWaitingCountIfEqual(addr, value, count); 35 R_RETURN(this->SignalAndModifyByWaitingCountIfEqual(addr, value, count));
36 default:
37 UNREACHABLE();
36 } 38 }
37 ASSERT(false);
38 return ResultUnknown;
39 } 39 }
40 40
41 [[nodiscard]] Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value, 41 Result WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value, s64 timeout) {
42 s64 timeout) {
43 switch (type) { 42 switch (type) {
44 case Svc::ArbitrationType::WaitIfLessThan: 43 case Svc::ArbitrationType::WaitIfLessThan:
45 return WaitIfLessThan(addr, value, false, timeout); 44 R_RETURN(WaitIfLessThan(addr, value, false, timeout));
46 case Svc::ArbitrationType::DecrementAndWaitIfLessThan: 45 case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
47 return WaitIfLessThan(addr, value, true, timeout); 46 R_RETURN(WaitIfLessThan(addr, value, true, timeout));
48 case Svc::ArbitrationType::WaitIfEqual: 47 case Svc::ArbitrationType::WaitIfEqual:
49 return WaitIfEqual(addr, value, timeout); 48 R_RETURN(WaitIfEqual(addr, value, timeout));
49 default:
50 UNREACHABLE();
50 } 51 }
51 ASSERT(false);
52 return ResultUnknown;
53 } 52 }
54 53
55private: 54private:
56 [[nodiscard]] Result Signal(VAddr addr, s32 count); 55 Result Signal(VAddr addr, s32 count);
57 [[nodiscard]] Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count); 56 Result SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
58 [[nodiscard]] Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count); 57 Result SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
59 [[nodiscard]] Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout); 58 Result WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
60 [[nodiscard]] Result WaitIfEqual(VAddr addr, s32 value, s64 timeout); 59 Result WaitIfEqual(VAddr addr, s32 value, s64 timeout);
61 60
62 ThreadTree thread_tree; 61private:
63 62 ThreadTree m_tree;
64 Core::System& system; 63 Core::System& m_system;
65 KernelCore& kernel; 64 KernelCore& m_kernel;
66}; 65};
67 66
68} // namespace Kernel 67} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 534321d8d..3f13b8193 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -80,17 +80,17 @@ public:
80 return GetCurrentThread(kernel).GetDisableDispatchCount() == 0; 80 return GetCurrentThread(kernel).GetDisableDispatchCount() == 0;
81 } 81 }
82 static bool IsSchedulerLockedByCurrentThread(KernelCore& kernel) { 82 static bool IsSchedulerLockedByCurrentThread(KernelCore& kernel) {
83 return kernel.GlobalSchedulerContext().scheduler_lock.IsLockedByCurrentThread(); 83 return kernel.GlobalSchedulerContext().m_scheduler_lock.IsLockedByCurrentThread();
84 } 84 }
85 85
86 static bool IsSchedulerUpdateNeeded(KernelCore& kernel) { 86 static bool IsSchedulerUpdateNeeded(KernelCore& kernel) {
87 return kernel.GlobalSchedulerContext().scheduler_update_needed; 87 return kernel.GlobalSchedulerContext().m_scheduler_update_needed;
88 } 88 }
89 static void SetSchedulerUpdateNeeded(KernelCore& kernel) { 89 static void SetSchedulerUpdateNeeded(KernelCore& kernel) {
90 kernel.GlobalSchedulerContext().scheduler_update_needed = true; 90 kernel.GlobalSchedulerContext().m_scheduler_update_needed = true;
91 } 91 }
92 static void ClearSchedulerUpdateNeeded(KernelCore& kernel) { 92 static void ClearSchedulerUpdateNeeded(KernelCore& kernel) {
93 kernel.GlobalSchedulerContext().scheduler_update_needed = false; 93 kernel.GlobalSchedulerContext().m_scheduler_update_needed = false;
94 } 94 }
95 95
96 static void DisableScheduling(KernelCore& kernel); 96 static void DisableScheduling(KernelCore& kernel);
@@ -115,7 +115,7 @@ public:
115private: 115private:
116 // Static private API. 116 // Static private API.
117 static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel) { 117 static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel) {
118 return kernel.GlobalSchedulerContext().priority_queue; 118 return kernel.GlobalSchedulerContext().m_priority_queue;
119 } 119 }
120 static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); 120 static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
121 121
@@ -166,7 +166,7 @@ private:
166class KScopedSchedulerLock : public KScopedLock<KScheduler::LockType> { 166class KScopedSchedulerLock : public KScopedLock<KScheduler::LockType> {
167public: 167public:
168 explicit KScopedSchedulerLock(KernelCore& kernel) 168 explicit KScopedSchedulerLock(KernelCore& kernel)
169 : KScopedLock(kernel.GlobalSchedulerContext().scheduler_lock) {} 169 : KScopedLock(kernel.GlobalSchedulerContext().m_scheduler_lock) {}
170 ~KScopedSchedulerLock() = default; 170 ~KScopedSchedulerLock() = default;
171}; 171};
172 172
diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
index 14b83a819..c485022f5 100644
--- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
+++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
@@ -11,39 +11,39 @@
11 11
12namespace Kernel { 12namespace Kernel {
13 13
14class [[nodiscard]] KScopedSchedulerLockAndSleep { 14class KScopedSchedulerLockAndSleep {
15public: 15public:
16 explicit KScopedSchedulerLockAndSleep(KernelCore& kernel_, KHardwareTimer** out_timer, 16 explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, KHardwareTimer** out_timer,
17 KThread* t, s64 timeout) 17 KThread* thread, s64 timeout_tick)
18 : kernel(kernel_), timeout_tick(timeout), thread(t), timer() { 18 : m_kernel(kernel), m_timeout_tick(timeout_tick), m_thread(thread), m_timer() {
19 // Lock the scheduler. 19 // Lock the scheduler.
20 kernel.GlobalSchedulerContext().scheduler_lock.Lock(); 20 kernel.GlobalSchedulerContext().m_scheduler_lock.Lock();
21 21
22 // Set our timer only if the time is positive. 22 // Set our timer only if the time is positive.
23 timer = (timeout_tick > 0) ? std::addressof(kernel.HardwareTimer()) : nullptr; 23 m_timer = (timeout_tick > 0) ? std::addressof(kernel.HardwareTimer()) : nullptr;
24 24
25 *out_timer = timer; 25 *out_timer = m_timer;
26 } 26 }
27 27
28 ~KScopedSchedulerLockAndSleep() { 28 ~KScopedSchedulerLockAndSleep() {
29 // Register the sleep. 29 // Register the sleep.
30 if (timeout_tick > 0) { 30 if (m_timeout_tick > 0) {
31 timer->RegisterTask(thread, timeout_tick); 31 m_timer->RegisterTask(m_thread, m_timeout_tick);
32 } 32 }
33 33
34 // Unlock the scheduler. 34 // Unlock the scheduler.
35 kernel.GlobalSchedulerContext().scheduler_lock.Unlock(); 35 m_kernel.GlobalSchedulerContext().m_scheduler_lock.Unlock();
36 } 36 }
37 37
38 void CancelSleep() { 38 void CancelSleep() {
39 timeout_tick = 0; 39 m_timeout_tick = 0;
40 } 40 }
41 41
42private: 42private:
43 KernelCore& kernel; 43 KernelCore& m_kernel;
44 s64 timeout_tick{}; 44 s64 m_timeout_tick{};
45 KThread* thread{}; 45 KThread* m_thread{};
46 KHardwareTimer* timer{}; 46 KHardwareTimer* m_timer{};
47}; 47};
48 48
49} // namespace Kernel 49} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread_queue.cpp b/src/core/hle/kernel/k_thread_queue.cpp
index fe648447b..61488f4ce 100644
--- a/src/core/hle/kernel/k_thread_queue.cpp
+++ b/src/core/hle/kernel/k_thread_queue.cpp
@@ -7,9 +7,10 @@
7 7
8namespace Kernel { 8namespace Kernel {
9 9
10void KThreadQueue::NotifyAvailable([[maybe_unused]] KThread* waiting_thread, 10void KThreadQueue::NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
11 [[maybe_unused]] KSynchronizationObject* signaled_object, 11 Result wait_result) {
12 [[maybe_unused]] Result wait_result) {} 12 UNREACHABLE();
13}
13 14
14void KThreadQueue::EndWait(KThread* waiting_thread, Result wait_result) { 15void KThreadQueue::EndWait(KThread* waiting_thread, Result wait_result) {
15 // Set the thread's wait result. 16 // Set the thread's wait result.
@@ -43,7 +44,8 @@ void KThreadQueue::CancelWait(KThread* waiting_thread, Result wait_result, bool
43 } 44 }
44} 45}
45 46
46void KThreadQueueWithoutEndWait::EndWait([[maybe_unused]] KThread* waiting_thread, 47void KThreadQueueWithoutEndWait::EndWait(KThread* waiting_thread, Result wait_result) {
47 [[maybe_unused]] Result wait_result) {} 48 UNREACHABLE();
49}
48 50
49} // namespace Kernel 51} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread_queue.h b/src/core/hle/kernel/k_thread_queue.h
index 01e330e2e..8ec2f900b 100644
--- a/src/core/hle/kernel/k_thread_queue.h
+++ b/src/core/hle/kernel/k_thread_queue.h
@@ -12,7 +12,7 @@ class KHardwareTimer;
12 12
13class KThreadQueue { 13class KThreadQueue {
14public: 14public:
15 explicit KThreadQueue(KernelCore& kernel_) : kernel{kernel_}, m_hardware_timer{} {} 15 explicit KThreadQueue(KernelCore& kernel) : m_kernel{kernel}, m_hardware_timer{} {}
16 virtual ~KThreadQueue() = default; 16 virtual ~KThreadQueue() = default;
17 17
18 void SetHardwareTimer(KHardwareTimer* timer) { 18 void SetHardwareTimer(KHardwareTimer* timer) {
@@ -25,7 +25,7 @@ public:
25 virtual void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task); 25 virtual void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task);
26 26
27private: 27private:
28 KernelCore& kernel; 28 KernelCore& m_kernel;
29 KHardwareTimer* m_hardware_timer{}; 29 KHardwareTimer* m_hardware_timer{};
30}; 30};
31 31