summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/thread.cpp
diff options
context:
space:
mode:
authorGravatar bunnei2020-12-30 01:14:02 -0800
committerGravatar bunnei2021-01-11 14:23:16 -0800
commit912dd501465ffaabd149cc3532839e346982b337 (patch)
tree54f650b18baf040bf9a0555e386989ef2189c223 /src/core/hle/kernel/thread.cpp
parentcore: hle: kernel: Update KAddressArbiter. (diff)
downloadyuzu-912dd501465ffaabd149cc3532839e346982b337.tar.gz
yuzu-912dd501465ffaabd149cc3532839e346982b337.tar.xz
yuzu-912dd501465ffaabd149cc3532839e346982b337.zip
core: hle: Integrate new KConditionVariable and KAddressArbiter implementations.
Diffstat (limited to 'src/core/hle/kernel/thread.cpp')
-rw-r--r--src/core/hle/kernel/thread.cpp227
1 files changed, 127 insertions, 100 deletions
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 33a4e1fa3..eda56c31c 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -17,9 +17,11 @@
17#include "core/hardware_properties.h" 17#include "core/hardware_properties.h"
18#include "core/hle/kernel/errors.h" 18#include "core/hle/kernel/errors.h"
19#include "core/hle/kernel/handle_table.h" 19#include "core/hle/kernel/handle_table.h"
20#include "core/hle/kernel/k_condition_variable.h"
20#include "core/hle/kernel/k_scheduler.h" 21#include "core/hle/kernel/k_scheduler.h"
21#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 22#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
22#include "core/hle/kernel/kernel.h" 23#include "core/hle/kernel/kernel.h"
24#include "core/hle/kernel/memory/memory_layout.h"
23#include "core/hle/kernel/object.h" 25#include "core/hle/kernel/object.h"
24#include "core/hle/kernel/process.h" 26#include "core/hle/kernel/process.h"
25#include "core/hle/kernel/thread.h" 27#include "core/hle/kernel/thread.h"
@@ -62,24 +64,6 @@ void Thread::Stop() {
62 64
63void Thread::Wakeup() { 65void Thread::Wakeup() {
64 KScopedSchedulerLock lock(kernel); 66 KScopedSchedulerLock lock(kernel);
65 switch (thread_state) {
66 case ThreadState::Runnable:
67 // If the thread is waiting on multiple wait objects, it might be awoken more than once
68 // before actually resuming. We can ignore subsequent wakeups if the thread status has
69 // already been set to ThreadStatus::Ready.
70 return;
71 case ThreadState::Terminated:
72 // This should never happen, as threads must complete before being stopped.
73 DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.",
74 GetObjectId());
75 return;
76 }
77
78 SetState(ThreadState::Runnable);
79}
80
81void Thread::OnWakeUp() {
82 KScopedSchedulerLock lock(kernel);
83 SetState(ThreadState::Runnable); 67 SetState(ThreadState::Runnable);
84} 68}
85 69
@@ -167,15 +151,14 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
167 thread->stack_top = stack_top; 151 thread->stack_top = stack_top;
168 thread->disable_count = 1; 152 thread->disable_count = 1;
169 thread->tpidr_el0 = 0; 153 thread->tpidr_el0 = 0;
170 thread->nominal_priority = thread->current_priority = priority; 154 thread->current_priority = priority;
155 thread->base_priority = priority;
156 thread->lock_owner = nullptr;
171 thread->schedule_count = -1; 157 thread->schedule_count = -1;
172 thread->last_scheduled_tick = 0; 158 thread->last_scheduled_tick = 0;
173 thread->processor_id = processor_id; 159 thread->processor_id = processor_id;
174 thread->ideal_core = processor_id; 160 thread->ideal_core = processor_id;
175 thread->affinity_mask.SetAffinity(processor_id, true); 161 thread->affinity_mask.SetAffinity(processor_id, true);
176 thread->mutex_wait_address = 0;
177 thread->condvar_wait_address = 0;
178 thread->wait_handle = 0;
179 thread->name = std::move(name); 162 thread->name = std::move(name);
180 thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap(); 163 thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap();
181 thread->owner_process = owner_process; 164 thread->owner_process = owner_process;
@@ -205,12 +188,17 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
205 return MakeResult<std::shared_ptr<Thread>>(std::move(thread)); 188 return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
206} 189}
207 190
208void Thread::SetPriority(u32 priority) { 191void Thread::SetBasePriority(u32 priority) {
209 KScopedSchedulerLock lock(kernel);
210 ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, 192 ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
211 "Invalid priority value."); 193 "Invalid priority value.");
212 nominal_priority = priority; 194
213 UpdatePriority(); 195 KScopedSchedulerLock lock(kernel);
196
197 // Change our base priority.
198 base_priority = priority;
199
200 // Perform a priority restoration.
201 RestorePriority(kernel, this);
214} 202}
215 203
216void Thread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) { 204void Thread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) {
@@ -224,95 +212,146 @@ VAddr Thread::GetCommandBufferAddress() const {
224 return GetTLSAddress() + command_header_offset; 212 return GetTLSAddress() + command_header_offset;
225} 213}
226 214
227void Thread::SetState(ThreadState new_status) { 215void Thread::SetState(ThreadState state) {
228 if (new_status == thread_state) { 216 KScopedSchedulerLock sl(kernel);
229 return; 217
218 SetMutexWaitAddressForDebugging(0);
219 const ThreadState old_state = thread_state;
220 thread_state =
221 static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask));
222 if (thread_state != old_state) {
223 KScheduler::OnThreadStateChanged(kernel, this, old_state);
230 } 224 }
225}
226
227void Thread::AddWaiterImpl(Thread* thread) {
228 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
231 229
232 if (new_status != ThreadState::Waiting) { 230 // Find the right spot to insert the waiter.
233 SetWaitingCondVar(false); 231 auto it = waiter_list.begin();
232 while (it != waiter_list.end()) {
233 if (it->GetPriority() > thread->GetPriority()) {
234 break;
235 }
236 it++;
234 } 237 }
235 238
236 SetSchedulingStatus(new_status); 239 // Keep track of how many kernel waiters we have.
240 if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
241 ASSERT((num_kernel_waiters++) >= 0);
242 }
237 243
238 thread_state = new_status; 244 // Insert the waiter.
245 waiter_list.insert(it, *thread);
246 thread->SetLockOwner(this);
239} 247}
240 248
241void Thread::AddMutexWaiter(std::shared_ptr<Thread> thread) { 249void Thread::RemoveWaiterImpl(Thread* thread) {
242 if (thread->lock_owner.get() == this) { 250 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
243 // If the thread is already waiting for this thread to release the mutex, ensure that the 251
244 // waiters list is consistent and return without doing anything. 252 // Keep track of how many kernel waiters we have.
245 const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); 253 if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
246 ASSERT(iter != wait_mutex_threads.end()); 254 ASSERT((num_kernel_waiters--) > 0);
247 return;
248 } 255 }
249 256
250 // A thread can't wait on two different mutexes at the same time. 257 // Remove the waiter.
251 ASSERT(thread->lock_owner == nullptr); 258 waiter_list.erase(waiter_list.iterator_to(*thread));
259 thread->SetLockOwner(nullptr);
260}
252 261
253 // Ensure that the thread is not already in the list of mutex waiters 262void Thread::RestorePriority(KernelCore& kernel, Thread* thread) {
254 const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); 263 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
255 ASSERT(iter == wait_mutex_threads.end());
256 264
257 // Keep the list in an ordered fashion 265 while (true) {
258 const auto insertion_point = std::find_if( 266 // We want to inherit priority where possible.
259 wait_mutex_threads.begin(), wait_mutex_threads.end(), 267 s32 new_priority = thread->GetBasePriority();
260 [&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); }); 268 if (thread->HasWaiters()) {
261 wait_mutex_threads.insert(insertion_point, thread); 269 new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority());
262 thread->lock_owner = SharedFrom(this); 270 }
263 271
264 UpdatePriority(); 272 // If the priority we would inherit is not different from ours, don't do anything.
265} 273 if (new_priority == thread->GetPriority()) {
274 return;
275 }
266 276
267void Thread::RemoveMutexWaiter(std::shared_ptr<Thread> thread) { 277 // Ensure we don't violate condition variable red black tree invariants.
268 ASSERT(thread->lock_owner.get() == this); 278 if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
279 BeforeUpdatePriority(kernel, cv_tree, thread);
280 }
269 281
270 // Ensure that the thread is in the list of mutex waiters 282 // Change the priority.
271 const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); 283 const s32 old_priority = thread->GetPriority();
272 ASSERT(iter != wait_mutex_threads.end()); 284 thread->SetPriority(new_priority);
273 285
274 wait_mutex_threads.erase(iter); 286 // Restore the condition variable, if relevant.
287 if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
288 AfterUpdatePriority(kernel, cv_tree, thread);
289 }
275 290
276 thread->lock_owner = nullptr; 291 // Update the scheduler.
277 UpdatePriority(); 292 KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority);
278}
279 293
280void Thread::UpdatePriority() { 294 // Keep the lock owner up to date.
281 // If any of the threads waiting on the mutex have a higher priority 295 Thread* lock_owner = thread->GetLockOwner();
282 // (taking into account priority inheritance), then this thread inherits 296 if (lock_owner == nullptr) {
283 // that thread's priority. 297 return;
284 u32 new_priority = nominal_priority;
285 if (!wait_mutex_threads.empty()) {
286 if (wait_mutex_threads.front()->current_priority < new_priority) {
287 new_priority = wait_mutex_threads.front()->current_priority;
288 } 298 }
289 }
290 299
291 if (new_priority == current_priority) { 300 // Update the thread in the lock owner's sorted list, and continue inheriting.
292 return; 301 lock_owner->RemoveWaiterImpl(thread);
302 lock_owner->AddWaiterImpl(thread);
303 thread = lock_owner;
293 } 304 }
305}
294 306
295 if (GetState() == ThreadState::Waiting && is_waiting_on_condvar) { 307void Thread::AddWaiter(Thread* thread) {
296 owner_process->RemoveConditionVariableThread(SharedFrom(this)); 308 AddWaiterImpl(thread);
297 } 309 RestorePriority(kernel, this);
310}
298 311
299 SetCurrentPriority(new_priority); 312void Thread::RemoveWaiter(Thread* thread) {
313 RemoveWaiterImpl(thread);
314 RestorePriority(kernel, this);
315}
300 316
301 if (GetState() == ThreadState::Waiting && is_waiting_on_condvar) { 317Thread* Thread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
302 owner_process->InsertConditionVariableThread(SharedFrom(this)); 318 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
303 }
304 319
305 if (!lock_owner) { 320 s32 num_waiters{};
306 return; 321 Thread* next_lock_owner{};
322 auto it = waiter_list.begin();
323 while (it != waiter_list.end()) {
324 if (it->GetAddressKey() == key) {
325 Thread* thread = std::addressof(*it);
326
327 // Keep track of how many kernel waiters we have.
328 if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
329 ASSERT((num_kernel_waiters--) > 0);
330 }
331 it = waiter_list.erase(it);
332
333 // Update the next lock owner.
334 if (next_lock_owner == nullptr) {
335 next_lock_owner = thread;
336 next_lock_owner->SetLockOwner(nullptr);
337 } else {
338 next_lock_owner->AddWaiterImpl(thread);
339 }
340 num_waiters++;
341 } else {
342 it++;
343 }
307 } 344 }
308 345
309 // Ensure that the thread is within the correct location in the waiting list. 346 // Do priority updates, if we have a next owner.
310 auto old_owner = lock_owner; 347 if (next_lock_owner) {
311 lock_owner->RemoveMutexWaiter(SharedFrom(this)); 348 RestorePriority(kernel, this);
312 old_owner->AddMutexWaiter(SharedFrom(this)); 349 RestorePriority(kernel, next_lock_owner);
350 }
313 351
314 // Recursively update the priority of the thread that depends on the priority of this one. 352 // Return output.
315 lock_owner->UpdatePriority(); 353 *out_num_waiters = num_waiters;
354 return next_lock_owner;
316} 355}
317 356
318ResultCode Thread::SetActivity(ThreadActivity value) { 357ResultCode Thread::SetActivity(ThreadActivity value) {
@@ -372,18 +411,6 @@ void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
372 KScheduler::OnThreadStateChanged(kernel, this, old_state); 411 KScheduler::OnThreadStateChanged(kernel, this, old_state);
373} 412}
374 413
375void Thread::SetSchedulingStatus(ThreadState new_status) {
376 const auto old_state = GetRawState();
377 thread_state = (thread_state & ThreadState::HighMask) | new_status;
378 KScheduler::OnThreadStateChanged(kernel, this, old_state);
379}
380
381void Thread::SetCurrentPriority(u32 new_priority) {
382 const u32 old_priority = std::exchange(current_priority, new_priority);
383 KScheduler::OnThreadPriorityChanged(kernel, this, kernel.CurrentScheduler()->GetCurrentThread(),
384 old_priority);
385}
386
387ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { 414ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
388 KScopedSchedulerLock lock(kernel); 415 KScopedSchedulerLock lock(kernel);
389 const auto HighestSetCore = [](u64 mask, u32 max_cores) { 416 const auto HighestSetCore = [](u64 mask, u32 max_cores) {