diff options
| author | 2021-01-11 14:36:26 -0800 | |
|---|---|---|
| committer | 2021-01-11 14:36:26 -0800 | |
| commit | eb3cb54aa53e23af61afb9b7e35af28c9d37ae2a (patch) | |
| tree | 56a80760bd0ba8ecd85dc8d9f09fb9e2068c91d4 /src/core/hle/kernel/thread.cpp | |
| parent | Merge pull request #5229 from Morph1984/fullscreen-opt (diff) | |
| parent | hle: kernel: thread: Preserve thread wait reason for debugging only. (diff) | |
| download | yuzu-eb3cb54aa53e23af61afb9b7e35af28c9d37ae2a.tar.gz yuzu-eb3cb54aa53e23af61afb9b7e35af28c9d37ae2a.tar.xz yuzu-eb3cb54aa53e23af61afb9b7e35af28c9d37ae2a.zip | |
Merge pull request #5266 from bunnei/kernel-synch
Rewrite KSynchronizationObject, KConditonVariable, and KAddressArbiter
Diffstat (limited to 'src/core/hle/kernel/thread.cpp')
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 328 |
1 files changed, 155 insertions, 173 deletions
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index a4f9e0d97..d97323255 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -17,9 +17,11 @@ | |||
| 17 | #include "core/hardware_properties.h" | 17 | #include "core/hardware_properties.h" |
| 18 | #include "core/hle/kernel/errors.h" | 18 | #include "core/hle/kernel/errors.h" |
| 19 | #include "core/hle/kernel/handle_table.h" | 19 | #include "core/hle/kernel/handle_table.h" |
| 20 | #include "core/hle/kernel/k_condition_variable.h" | ||
| 20 | #include "core/hle/kernel/k_scheduler.h" | 21 | #include "core/hle/kernel/k_scheduler.h" |
| 21 | #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" | 22 | #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" |
| 22 | #include "core/hle/kernel/kernel.h" | 23 | #include "core/hle/kernel/kernel.h" |
| 24 | #include "core/hle/kernel/memory/memory_layout.h" | ||
| 23 | #include "core/hle/kernel/object.h" | 25 | #include "core/hle/kernel/object.h" |
| 24 | #include "core/hle/kernel/process.h" | 26 | #include "core/hle/kernel/process.h" |
| 25 | #include "core/hle/kernel/thread.h" | 27 | #include "core/hle/kernel/thread.h" |
| @@ -34,26 +36,19 @@ | |||
| 34 | 36 | ||
| 35 | namespace Kernel { | 37 | namespace Kernel { |
| 36 | 38 | ||
| 37 | bool Thread::ShouldWait(const Thread* thread) const { | ||
| 38 | return status != ThreadStatus::Dead; | ||
| 39 | } | ||
| 40 | |||
| 41 | bool Thread::IsSignaled() const { | 39 | bool Thread::IsSignaled() const { |
| 42 | return status == ThreadStatus::Dead; | 40 | return signaled; |
| 43 | } | ||
| 44 | |||
| 45 | void Thread::Acquire(Thread* thread) { | ||
| 46 | ASSERT_MSG(!ShouldWait(thread), "object unavailable!"); | ||
| 47 | } | 41 | } |
| 48 | 42 | ||
| 49 | Thread::Thread(KernelCore& kernel) : SynchronizationObject{kernel} {} | 43 | Thread::Thread(KernelCore& kernel) : KSynchronizationObject{kernel} {} |
| 50 | Thread::~Thread() = default; | 44 | Thread::~Thread() = default; |
| 51 | 45 | ||
| 52 | void Thread::Stop() { | 46 | void Thread::Stop() { |
| 53 | { | 47 | { |
| 54 | KScopedSchedulerLock lock(kernel); | 48 | KScopedSchedulerLock lock(kernel); |
| 55 | SetStatus(ThreadStatus::Dead); | 49 | SetState(ThreadState::Terminated); |
| 56 | Signal(); | 50 | signaled = true; |
| 51 | NotifyAvailable(); | ||
| 57 | kernel.GlobalHandleTable().Close(global_handle); | 52 | kernel.GlobalHandleTable().Close(global_handle); |
| 58 | 53 | ||
| 59 | if (owner_process) { | 54 | if (owner_process) { |
| @@ -67,59 +62,27 @@ void Thread::Stop() { | |||
| 67 | global_handle = 0; | 62 | global_handle = 0; |
| 68 | } | 63 | } |
| 69 | 64 | ||
| 70 | void Thread::ResumeFromWait() { | 65 | void Thread::Wakeup() { |
| 71 | KScopedSchedulerLock lock(kernel); | 66 | KScopedSchedulerLock lock(kernel); |
| 72 | switch (status) { | 67 | SetState(ThreadState::Runnable); |
| 73 | case ThreadStatus::Paused: | ||
| 74 | case ThreadStatus::WaitSynch: | ||
| 75 | case ThreadStatus::WaitHLEEvent: | ||
| 76 | case ThreadStatus::WaitSleep: | ||
| 77 | case ThreadStatus::WaitIPC: | ||
| 78 | case ThreadStatus::WaitMutex: | ||
| 79 | case ThreadStatus::WaitCondVar: | ||
| 80 | case ThreadStatus::WaitArb: | ||
| 81 | case ThreadStatus::Dormant: | ||
| 82 | break; | ||
| 83 | |||
| 84 | case ThreadStatus::Ready: | ||
| 85 | // The thread's wakeup callback must have already been cleared when the thread was first | ||
| 86 | // awoken. | ||
| 87 | ASSERT(hle_callback == nullptr); | ||
| 88 | // If the thread is waiting on multiple wait objects, it might be awoken more than once | ||
| 89 | // before actually resuming. We can ignore subsequent wakeups if the thread status has | ||
| 90 | // already been set to ThreadStatus::Ready. | ||
| 91 | return; | ||
| 92 | case ThreadStatus::Dead: | ||
| 93 | // This should never happen, as threads must complete before being stopped. | ||
| 94 | DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.", | ||
| 95 | GetObjectId()); | ||
| 96 | return; | ||
| 97 | } | ||
| 98 | |||
| 99 | SetStatus(ThreadStatus::Ready); | ||
| 100 | } | ||
| 101 | |||
| 102 | void Thread::OnWakeUp() { | ||
| 103 | KScopedSchedulerLock lock(kernel); | ||
| 104 | SetStatus(ThreadStatus::Ready); | ||
| 105 | } | 68 | } |
| 106 | 69 | ||
| 107 | ResultCode Thread::Start() { | 70 | ResultCode Thread::Start() { |
| 108 | KScopedSchedulerLock lock(kernel); | 71 | KScopedSchedulerLock lock(kernel); |
| 109 | SetStatus(ThreadStatus::Ready); | 72 | SetState(ThreadState::Runnable); |
| 110 | return RESULT_SUCCESS; | 73 | return RESULT_SUCCESS; |
| 111 | } | 74 | } |
| 112 | 75 | ||
| 113 | void Thread::CancelWait() { | 76 | void Thread::CancelWait() { |
| 114 | KScopedSchedulerLock lock(kernel); | 77 | KScopedSchedulerLock lock(kernel); |
| 115 | if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) { | 78 | if (GetState() != ThreadState::Waiting || !is_cancellable) { |
| 116 | is_sync_cancelled = true; | 79 | is_sync_cancelled = true; |
| 117 | return; | 80 | return; |
| 118 | } | 81 | } |
| 119 | // TODO(Blinkhawk): Implement cancel of server session | 82 | // TODO(Blinkhawk): Implement cancel of server session |
| 120 | is_sync_cancelled = false; | 83 | is_sync_cancelled = false; |
| 121 | SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED); | 84 | SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED); |
| 122 | SetStatus(ThreadStatus::Ready); | 85 | SetState(ThreadState::Runnable); |
| 123 | } | 86 | } |
| 124 | 87 | ||
| 125 | static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, | 88 | static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, |
| @@ -183,25 +146,24 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy | |||
| 183 | std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel); | 146 | std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel); |
| 184 | 147 | ||
| 185 | thread->thread_id = kernel.CreateNewThreadID(); | 148 | thread->thread_id = kernel.CreateNewThreadID(); |
| 186 | thread->status = ThreadStatus::Dormant; | 149 | thread->thread_state = ThreadState::Initialized; |
| 187 | thread->entry_point = entry_point; | 150 | thread->entry_point = entry_point; |
| 188 | thread->stack_top = stack_top; | 151 | thread->stack_top = stack_top; |
| 189 | thread->disable_count = 1; | 152 | thread->disable_count = 1; |
| 190 | thread->tpidr_el0 = 0; | 153 | thread->tpidr_el0 = 0; |
| 191 | thread->nominal_priority = thread->current_priority = priority; | 154 | thread->current_priority = priority; |
| 155 | thread->base_priority = priority; | ||
| 156 | thread->lock_owner = nullptr; | ||
| 192 | thread->schedule_count = -1; | 157 | thread->schedule_count = -1; |
| 193 | thread->last_scheduled_tick = 0; | 158 | thread->last_scheduled_tick = 0; |
| 194 | thread->processor_id = processor_id; | 159 | thread->processor_id = processor_id; |
| 195 | thread->ideal_core = processor_id; | 160 | thread->ideal_core = processor_id; |
| 196 | thread->affinity_mask.SetAffinity(processor_id, true); | 161 | thread->affinity_mask.SetAffinity(processor_id, true); |
| 197 | thread->wait_objects = nullptr; | ||
| 198 | thread->mutex_wait_address = 0; | ||
| 199 | thread->condvar_wait_address = 0; | ||
| 200 | thread->wait_handle = 0; | ||
| 201 | thread->name = std::move(name); | 162 | thread->name = std::move(name); |
| 202 | thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap(); | 163 | thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap(); |
| 203 | thread->owner_process = owner_process; | 164 | thread->owner_process = owner_process; |
| 204 | thread->type = type_flags; | 165 | thread->type = type_flags; |
| 166 | thread->signaled = false; | ||
| 205 | if ((type_flags & THREADTYPE_IDLE) == 0) { | 167 | if ((type_flags & THREADTYPE_IDLE) == 0) { |
| 206 | auto& scheduler = kernel.GlobalSchedulerContext(); | 168 | auto& scheduler = kernel.GlobalSchedulerContext(); |
| 207 | scheduler.AddThread(thread); | 169 | scheduler.AddThread(thread); |
| @@ -226,153 +188,185 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy | |||
| 226 | return MakeResult<std::shared_ptr<Thread>>(std::move(thread)); | 188 | return MakeResult<std::shared_ptr<Thread>>(std::move(thread)); |
| 227 | } | 189 | } |
| 228 | 190 | ||
| 229 | void Thread::SetPriority(u32 priority) { | 191 | void Thread::SetBasePriority(u32 priority) { |
| 230 | KScopedSchedulerLock lock(kernel); | ||
| 231 | ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, | 192 | ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, |
| 232 | "Invalid priority value."); | 193 | "Invalid priority value."); |
| 233 | nominal_priority = priority; | 194 | |
| 234 | UpdatePriority(); | 195 | KScopedSchedulerLock lock(kernel); |
| 196 | |||
| 197 | // Change our base priority. | ||
| 198 | base_priority = priority; | ||
| 199 | |||
| 200 | // Perform a priority restoration. | ||
| 201 | RestorePriority(kernel, this); | ||
| 235 | } | 202 | } |
| 236 | 203 | ||
| 237 | void Thread::SetSynchronizationResults(SynchronizationObject* object, ResultCode result) { | 204 | void Thread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) { |
| 238 | signaling_object = object; | 205 | signaling_object = object; |
| 239 | signaling_result = result; | 206 | signaling_result = result; |
| 240 | } | 207 | } |
| 241 | 208 | ||
| 242 | s32 Thread::GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const { | ||
| 243 | ASSERT_MSG(!wait_objects->empty(), "Thread is not waiting for anything"); | ||
| 244 | const auto match = std::find(wait_objects->rbegin(), wait_objects->rend(), object); | ||
| 245 | return static_cast<s32>(std::distance(match, wait_objects->rend()) - 1); | ||
| 246 | } | ||
| 247 | |||
| 248 | VAddr Thread::GetCommandBufferAddress() const { | 209 | VAddr Thread::GetCommandBufferAddress() const { |
| 249 | // Offset from the start of TLS at which the IPC command buffer begins. | 210 | // Offset from the start of TLS at which the IPC command buffer begins. |
| 250 | constexpr u64 command_header_offset = 0x80; | 211 | constexpr u64 command_header_offset = 0x80; |
| 251 | return GetTLSAddress() + command_header_offset; | 212 | return GetTLSAddress() + command_header_offset; |
| 252 | } | 213 | } |
| 253 | 214 | ||
| 254 | void Thread::SetStatus(ThreadStatus new_status) { | 215 | void Thread::SetState(ThreadState state) { |
| 255 | if (new_status == status) { | 216 | KScopedSchedulerLock sl(kernel); |
| 256 | return; | ||
| 257 | } | ||
| 258 | 217 | ||
| 259 | switch (new_status) { | 218 | // Clear debugging state |
| 260 | case ThreadStatus::Ready: | 219 | SetMutexWaitAddressForDebugging({}); |
| 261 | SetSchedulingStatus(ThreadSchedStatus::Runnable); | 220 | SetWaitReasonForDebugging({}); |
| 262 | break; | ||
| 263 | case ThreadStatus::Dormant: | ||
| 264 | SetSchedulingStatus(ThreadSchedStatus::None); | ||
| 265 | break; | ||
| 266 | case ThreadStatus::Dead: | ||
| 267 | SetSchedulingStatus(ThreadSchedStatus::Exited); | ||
| 268 | break; | ||
| 269 | default: | ||
| 270 | SetSchedulingStatus(ThreadSchedStatus::Paused); | ||
| 271 | break; | ||
| 272 | } | ||
| 273 | 221 | ||
| 274 | status = new_status; | 222 | const ThreadState old_state = thread_state; |
| 223 | thread_state = | ||
| 224 | static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)); | ||
| 225 | if (thread_state != old_state) { | ||
| 226 | KScheduler::OnThreadStateChanged(kernel, this, old_state); | ||
| 227 | } | ||
| 275 | } | 228 | } |
| 276 | 229 | ||
| 277 | void Thread::AddMutexWaiter(std::shared_ptr<Thread> thread) { | 230 | void Thread::AddWaiterImpl(Thread* thread) { |
| 278 | if (thread->lock_owner.get() == this) { | 231 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); |
| 279 | // If the thread is already waiting for this thread to release the mutex, ensure that the | 232 | |
| 280 | // waiters list is consistent and return without doing anything. | 233 | // Find the right spot to insert the waiter. |
| 281 | const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); | 234 | auto it = waiter_list.begin(); |
| 282 | ASSERT(iter != wait_mutex_threads.end()); | 235 | while (it != waiter_list.end()) { |
| 283 | return; | 236 | if (it->GetPriority() > thread->GetPriority()) { |
| 237 | break; | ||
| 238 | } | ||
| 239 | it++; | ||
| 284 | } | 240 | } |
| 285 | 241 | ||
| 286 | // A thread can't wait on two different mutexes at the same time. | 242 | // Keep track of how many kernel waiters we have. |
| 287 | ASSERT(thread->lock_owner == nullptr); | 243 | if (Memory::IsKernelAddressKey(thread->GetAddressKey())) { |
| 244 | ASSERT((num_kernel_waiters++) >= 0); | ||
| 245 | } | ||
| 288 | 246 | ||
| 289 | // Ensure that the thread is not already in the list of mutex waiters | 247 | // Insert the waiter. |
| 290 | const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); | 248 | waiter_list.insert(it, *thread); |
| 291 | ASSERT(iter == wait_mutex_threads.end()); | 249 | thread->SetLockOwner(this); |
| 250 | } | ||
| 292 | 251 | ||
| 293 | // Keep the list in an ordered fashion | 252 | void Thread::RemoveWaiterImpl(Thread* thread) { |
| 294 | const auto insertion_point = std::find_if( | 253 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); |
| 295 | wait_mutex_threads.begin(), wait_mutex_threads.end(), | ||
| 296 | [&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); }); | ||
| 297 | wait_mutex_threads.insert(insertion_point, thread); | ||
| 298 | thread->lock_owner = SharedFrom(this); | ||
| 299 | 254 | ||
| 300 | UpdatePriority(); | 255 | // Keep track of how many kernel waiters we have. |
| 301 | } | 256 | if (Memory::IsKernelAddressKey(thread->GetAddressKey())) { |
| 257 | ASSERT((num_kernel_waiters--) > 0); | ||
| 258 | } | ||
| 302 | 259 | ||
| 303 | void Thread::RemoveMutexWaiter(std::shared_ptr<Thread> thread) { | 260 | // Remove the waiter. |
| 304 | ASSERT(thread->lock_owner.get() == this); | 261 | waiter_list.erase(waiter_list.iterator_to(*thread)); |
| 262 | thread->SetLockOwner(nullptr); | ||
| 263 | } | ||
| 305 | 264 | ||
| 306 | // Ensure that the thread is in the list of mutex waiters | 265 | void Thread::RestorePriority(KernelCore& kernel, Thread* thread) { |
| 307 | const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); | 266 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); |
| 308 | ASSERT(iter != wait_mutex_threads.end()); | ||
| 309 | 267 | ||
| 310 | wait_mutex_threads.erase(iter); | 268 | while (true) { |
| 269 | // We want to inherit priority where possible. | ||
| 270 | s32 new_priority = thread->GetBasePriority(); | ||
| 271 | if (thread->HasWaiters()) { | ||
| 272 | new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority()); | ||
| 273 | } | ||
| 311 | 274 | ||
| 312 | thread->lock_owner = nullptr; | 275 | // If the priority we would inherit is not different from ours, don't do anything. |
| 313 | UpdatePriority(); | 276 | if (new_priority == thread->GetPriority()) { |
| 314 | } | 277 | return; |
| 278 | } | ||
| 315 | 279 | ||
| 316 | void Thread::UpdatePriority() { | 280 | // Ensure we don't violate condition variable red black tree invariants. |
| 317 | // If any of the threads waiting on the mutex have a higher priority | 281 | if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) { |
| 318 | // (taking into account priority inheritance), then this thread inherits | 282 | BeforeUpdatePriority(kernel, cv_tree, thread); |
| 319 | // that thread's priority. | ||
| 320 | u32 new_priority = nominal_priority; | ||
| 321 | if (!wait_mutex_threads.empty()) { | ||
| 322 | if (wait_mutex_threads.front()->current_priority < new_priority) { | ||
| 323 | new_priority = wait_mutex_threads.front()->current_priority; | ||
| 324 | } | 283 | } |
| 325 | } | ||
| 326 | 284 | ||
| 327 | if (new_priority == current_priority) { | 285 | // Change the priority. |
| 328 | return; | 286 | const s32 old_priority = thread->GetPriority(); |
| 329 | } | 287 | thread->SetPriority(new_priority); |
| 330 | 288 | ||
| 331 | if (GetStatus() == ThreadStatus::WaitCondVar) { | 289 | // Restore the condition variable, if relevant. |
| 332 | owner_process->RemoveConditionVariableThread(SharedFrom(this)); | 290 | if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) { |
| 333 | } | 291 | AfterUpdatePriority(kernel, cv_tree, thread); |
| 292 | } | ||
| 334 | 293 | ||
| 335 | SetCurrentPriority(new_priority); | 294 | // Update the scheduler. |
| 295 | KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority); | ||
| 336 | 296 | ||
| 337 | if (GetStatus() == ThreadStatus::WaitCondVar) { | 297 | // Keep the lock owner up to date. |
| 338 | owner_process->InsertConditionVariableThread(SharedFrom(this)); | 298 | Thread* lock_owner = thread->GetLockOwner(); |
| 339 | } | 299 | if (lock_owner == nullptr) { |
| 300 | return; | ||
| 301 | } | ||
| 340 | 302 | ||
| 341 | if (!lock_owner) { | 303 | // Update the thread in the lock owner's sorted list, and continue inheriting. |
| 342 | return; | 304 | lock_owner->RemoveWaiterImpl(thread); |
| 305 | lock_owner->AddWaiterImpl(thread); | ||
| 306 | thread = lock_owner; | ||
| 343 | } | 307 | } |
| 308 | } | ||
| 344 | 309 | ||
| 345 | // Ensure that the thread is within the correct location in the waiting list. | 310 | void Thread::AddWaiter(Thread* thread) { |
| 346 | auto old_owner = lock_owner; | 311 | AddWaiterImpl(thread); |
| 347 | lock_owner->RemoveMutexWaiter(SharedFrom(this)); | 312 | RestorePriority(kernel, this); |
| 348 | old_owner->AddMutexWaiter(SharedFrom(this)); | ||
| 349 | |||
| 350 | // Recursively update the priority of the thread that depends on the priority of this one. | ||
| 351 | lock_owner->UpdatePriority(); | ||
| 352 | } | 313 | } |
| 353 | 314 | ||
| 354 | bool Thread::AllSynchronizationObjectsReady() const { | 315 | void Thread::RemoveWaiter(Thread* thread) { |
| 355 | return std::none_of(wait_objects->begin(), wait_objects->end(), | 316 | RemoveWaiterImpl(thread); |
| 356 | [this](const std::shared_ptr<SynchronizationObject>& object) { | 317 | RestorePriority(kernel, this); |
| 357 | return object->ShouldWait(this); | ||
| 358 | }); | ||
| 359 | } | 318 | } |
| 360 | 319 | ||
| 361 | bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) { | 320 | Thread* Thread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) { |
| 362 | ASSERT(hle_callback); | 321 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); |
| 363 | return hle_callback(std::move(thread)); | 322 | |
| 323 | s32 num_waiters{}; | ||
| 324 | Thread* next_lock_owner{}; | ||
| 325 | auto it = waiter_list.begin(); | ||
| 326 | while (it != waiter_list.end()) { | ||
| 327 | if (it->GetAddressKey() == key) { | ||
| 328 | Thread* thread = std::addressof(*it); | ||
| 329 | |||
| 330 | // Keep track of how many kernel waiters we have. | ||
| 331 | if (Memory::IsKernelAddressKey(thread->GetAddressKey())) { | ||
| 332 | ASSERT((num_kernel_waiters--) > 0); | ||
| 333 | } | ||
| 334 | it = waiter_list.erase(it); | ||
| 335 | |||
| 336 | // Update the next lock owner. | ||
| 337 | if (next_lock_owner == nullptr) { | ||
| 338 | next_lock_owner = thread; | ||
| 339 | next_lock_owner->SetLockOwner(nullptr); | ||
| 340 | } else { | ||
| 341 | next_lock_owner->AddWaiterImpl(thread); | ||
| 342 | } | ||
| 343 | num_waiters++; | ||
| 344 | } else { | ||
| 345 | it++; | ||
| 346 | } | ||
| 347 | } | ||
| 348 | |||
| 349 | // Do priority updates, if we have a next owner. | ||
| 350 | if (next_lock_owner) { | ||
| 351 | RestorePriority(kernel, this); | ||
| 352 | RestorePriority(kernel, next_lock_owner); | ||
| 353 | } | ||
| 354 | |||
| 355 | // Return output. | ||
| 356 | *out_num_waiters = num_waiters; | ||
| 357 | return next_lock_owner; | ||
| 364 | } | 358 | } |
| 365 | 359 | ||
| 366 | ResultCode Thread::SetActivity(ThreadActivity value) { | 360 | ResultCode Thread::SetActivity(ThreadActivity value) { |
| 367 | KScopedSchedulerLock lock(kernel); | 361 | KScopedSchedulerLock lock(kernel); |
| 368 | 362 | ||
| 369 | auto sched_status = GetSchedulingStatus(); | 363 | auto sched_status = GetState(); |
| 370 | 364 | ||
| 371 | if (sched_status != ThreadSchedStatus::Runnable && sched_status != ThreadSchedStatus::Paused) { | 365 | if (sched_status != ThreadState::Runnable && sched_status != ThreadState::Waiting) { |
| 372 | return ERR_INVALID_STATE; | 366 | return ERR_INVALID_STATE; |
| 373 | } | 367 | } |
| 374 | 368 | ||
| 375 | if (IsPendingTermination()) { | 369 | if (IsTerminationRequested()) { |
| 376 | return RESULT_SUCCESS; | 370 | return RESULT_SUCCESS; |
| 377 | } | 371 | } |
| 378 | 372 | ||
| @@ -394,7 +388,8 @@ ResultCode Thread::Sleep(s64 nanoseconds) { | |||
| 394 | Handle event_handle{}; | 388 | Handle event_handle{}; |
| 395 | { | 389 | { |
| 396 | KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); | 390 | KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); |
| 397 | SetStatus(ThreadStatus::WaitSleep); | 391 | SetState(ThreadState::Waiting); |
| 392 | SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); | ||
| 398 | } | 393 | } |
| 399 | 394 | ||
| 400 | if (event_handle != InvalidHandle) { | 395 | if (event_handle != InvalidHandle) { |
| @@ -405,34 +400,21 @@ ResultCode Thread::Sleep(s64 nanoseconds) { | |||
| 405 | } | 400 | } |
| 406 | 401 | ||
| 407 | void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { | 402 | void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { |
| 408 | const u32 old_state = scheduling_state; | 403 | const auto old_state = GetRawState(); |
| 409 | pausing_state |= static_cast<u32>(flag); | 404 | pausing_state |= static_cast<u32>(flag); |
| 410 | const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); | 405 | const auto base_scheduling = GetState(); |
| 411 | scheduling_state = base_scheduling | pausing_state; | 406 | thread_state = base_scheduling | static_cast<ThreadState>(pausing_state); |
| 412 | KScheduler::OnThreadStateChanged(kernel, this, old_state); | 407 | KScheduler::OnThreadStateChanged(kernel, this, old_state); |
| 413 | } | 408 | } |
| 414 | 409 | ||
| 415 | void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { | 410 | void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { |
| 416 | const u32 old_state = scheduling_state; | 411 | const auto old_state = GetRawState(); |
| 417 | pausing_state &= ~static_cast<u32>(flag); | 412 | pausing_state &= ~static_cast<u32>(flag); |
| 418 | const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); | 413 | const auto base_scheduling = GetState(); |
| 419 | scheduling_state = base_scheduling | pausing_state; | 414 | thread_state = base_scheduling | static_cast<ThreadState>(pausing_state); |
| 420 | KScheduler::OnThreadStateChanged(kernel, this, old_state); | 415 | KScheduler::OnThreadStateChanged(kernel, this, old_state); |
| 421 | } | 416 | } |
| 422 | 417 | ||
| 423 | void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { | ||
| 424 | const u32 old_state = scheduling_state; | ||
| 425 | scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) | | ||
| 426 | static_cast<u32>(new_status); | ||
| 427 | KScheduler::OnThreadStateChanged(kernel, this, old_state); | ||
| 428 | } | ||
| 429 | |||
| 430 | void Thread::SetCurrentPriority(u32 new_priority) { | ||
| 431 | const u32 old_priority = std::exchange(current_priority, new_priority); | ||
| 432 | KScheduler::OnThreadPriorityChanged(kernel, this, kernel.CurrentScheduler()->GetCurrentThread(), | ||
| 433 | old_priority); | ||
| 434 | } | ||
| 435 | |||
| 436 | ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { | 418 | ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { |
| 437 | KScopedSchedulerLock lock(kernel); | 419 | KScopedSchedulerLock lock(kernel); |
| 438 | const auto HighestSetCore = [](u64 mask, u32 max_cores) { | 420 | const auto HighestSetCore = [](u64 mask, u32 max_cores) { |