diff options
Diffstat (limited to 'src/core/hle/kernel/thread.cpp')
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 66 |
1 files changed, 19 insertions, 47 deletions
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index a4f9e0d97..ac19e2997 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -34,26 +34,19 @@ | |||
| 34 | 34 | ||
| 35 | namespace Kernel { | 35 | namespace Kernel { |
| 36 | 36 | ||
| 37 | bool Thread::ShouldWait(const Thread* thread) const { | ||
| 38 | return status != ThreadStatus::Dead; | ||
| 39 | } | ||
| 40 | |||
| 41 | bool Thread::IsSignaled() const { | 37 | bool Thread::IsSignaled() const { |
| 42 | return status == ThreadStatus::Dead; | 38 | return signaled; |
| 43 | } | 39 | } |
| 44 | 40 | ||
| 45 | void Thread::Acquire(Thread* thread) { | 41 | Thread::Thread(KernelCore& kernel) : KSynchronizationObject{kernel} {} |
| 46 | ASSERT_MSG(!ShouldWait(thread), "object unavailable!"); | ||
| 47 | } | ||
| 48 | |||
| 49 | Thread::Thread(KernelCore& kernel) : SynchronizationObject{kernel} {} | ||
| 50 | Thread::~Thread() = default; | 42 | Thread::~Thread() = default; |
| 51 | 43 | ||
| 52 | void Thread::Stop() { | 44 | void Thread::Stop() { |
| 53 | { | 45 | { |
| 54 | KScopedSchedulerLock lock(kernel); | 46 | KScopedSchedulerLock lock(kernel); |
| 55 | SetStatus(ThreadStatus::Dead); | 47 | SetState(ThreadStatus::Dead); |
| 56 | Signal(); | 48 | signaled = true; |
| 49 | NotifyAvailable(); | ||
| 57 | kernel.GlobalHandleTable().Close(global_handle); | 50 | kernel.GlobalHandleTable().Close(global_handle); |
| 58 | 51 | ||
| 59 | if (owner_process) { | 52 | if (owner_process) { |
| @@ -67,7 +60,7 @@ void Thread::Stop() { | |||
| 67 | global_handle = 0; | 60 | global_handle = 0; |
| 68 | } | 61 | } |
| 69 | 62 | ||
| 70 | void Thread::ResumeFromWait() { | 63 | void Thread::Wakeup() { |
| 71 | KScopedSchedulerLock lock(kernel); | 64 | KScopedSchedulerLock lock(kernel); |
| 72 | switch (status) { | 65 | switch (status) { |
| 73 | case ThreadStatus::Paused: | 66 | case ThreadStatus::Paused: |
| @@ -82,9 +75,6 @@ void Thread::ResumeFromWait() { | |||
| 82 | break; | 75 | break; |
| 83 | 76 | ||
| 84 | case ThreadStatus::Ready: | 77 | case ThreadStatus::Ready: |
| 85 | // The thread's wakeup callback must have already been cleared when the thread was first | ||
| 86 | // awoken. | ||
| 87 | ASSERT(hle_callback == nullptr); | ||
| 88 | // If the thread is waiting on multiple wait objects, it might be awoken more than once | 78 | // If the thread is waiting on multiple wait objects, it might be awoken more than once |
| 89 | // before actually resuming. We can ignore subsequent wakeups if the thread status has | 79 | // before actually resuming. We can ignore subsequent wakeups if the thread status has |
| 90 | // already been set to ThreadStatus::Ready. | 80 | // already been set to ThreadStatus::Ready. |
| @@ -96,30 +86,30 @@ void Thread::ResumeFromWait() { | |||
| 96 | return; | 86 | return; |
| 97 | } | 87 | } |
| 98 | 88 | ||
| 99 | SetStatus(ThreadStatus::Ready); | 89 | SetState(ThreadStatus::Ready); |
| 100 | } | 90 | } |
| 101 | 91 | ||
| 102 | void Thread::OnWakeUp() { | 92 | void Thread::OnWakeUp() { |
| 103 | KScopedSchedulerLock lock(kernel); | 93 | KScopedSchedulerLock lock(kernel); |
| 104 | SetStatus(ThreadStatus::Ready); | 94 | SetState(ThreadStatus::Ready); |
| 105 | } | 95 | } |
| 106 | 96 | ||
| 107 | ResultCode Thread::Start() { | 97 | ResultCode Thread::Start() { |
| 108 | KScopedSchedulerLock lock(kernel); | 98 | KScopedSchedulerLock lock(kernel); |
| 109 | SetStatus(ThreadStatus::Ready); | 99 | SetState(ThreadStatus::Ready); |
| 110 | return RESULT_SUCCESS; | 100 | return RESULT_SUCCESS; |
| 111 | } | 101 | } |
| 112 | 102 | ||
| 113 | void Thread::CancelWait() { | 103 | void Thread::CancelWait() { |
| 114 | KScopedSchedulerLock lock(kernel); | 104 | KScopedSchedulerLock lock(kernel); |
| 115 | if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) { | 105 | if (GetState() != ThreadSchedStatus::Paused || !is_cancellable) { |
| 116 | is_sync_cancelled = true; | 106 | is_sync_cancelled = true; |
| 117 | return; | 107 | return; |
| 118 | } | 108 | } |
| 119 | // TODO(Blinkhawk): Implement cancel of server session | 109 | // TODO(Blinkhawk): Implement cancel of server session |
| 120 | is_sync_cancelled = false; | 110 | is_sync_cancelled = false; |
| 121 | SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED); | 111 | SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED); |
| 122 | SetStatus(ThreadStatus::Ready); | 112 | SetState(ThreadStatus::Ready); |
| 123 | } | 113 | } |
| 124 | 114 | ||
| 125 | static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, | 115 | static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, |
| @@ -194,7 +184,6 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy | |||
| 194 | thread->processor_id = processor_id; | 184 | thread->processor_id = processor_id; |
| 195 | thread->ideal_core = processor_id; | 185 | thread->ideal_core = processor_id; |
| 196 | thread->affinity_mask.SetAffinity(processor_id, true); | 186 | thread->affinity_mask.SetAffinity(processor_id, true); |
| 197 | thread->wait_objects = nullptr; | ||
| 198 | thread->mutex_wait_address = 0; | 187 | thread->mutex_wait_address = 0; |
| 199 | thread->condvar_wait_address = 0; | 188 | thread->condvar_wait_address = 0; |
| 200 | thread->wait_handle = 0; | 189 | thread->wait_handle = 0; |
| @@ -202,6 +191,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy | |||
| 202 | thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap(); | 191 | thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap(); |
| 203 | thread->owner_process = owner_process; | 192 | thread->owner_process = owner_process; |
| 204 | thread->type = type_flags; | 193 | thread->type = type_flags; |
| 194 | thread->signaled = false; | ||
| 205 | if ((type_flags & THREADTYPE_IDLE) == 0) { | 195 | if ((type_flags & THREADTYPE_IDLE) == 0) { |
| 206 | auto& scheduler = kernel.GlobalSchedulerContext(); | 196 | auto& scheduler = kernel.GlobalSchedulerContext(); |
| 207 | scheduler.AddThread(thread); | 197 | scheduler.AddThread(thread); |
| @@ -234,24 +224,18 @@ void Thread::SetPriority(u32 priority) { | |||
| 234 | UpdatePriority(); | 224 | UpdatePriority(); |
| 235 | } | 225 | } |
| 236 | 226 | ||
| 237 | void Thread::SetSynchronizationResults(SynchronizationObject* object, ResultCode result) { | 227 | void Thread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) { |
| 238 | signaling_object = object; | 228 | signaling_object = object; |
| 239 | signaling_result = result; | 229 | signaling_result = result; |
| 240 | } | 230 | } |
| 241 | 231 | ||
| 242 | s32 Thread::GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const { | ||
| 243 | ASSERT_MSG(!wait_objects->empty(), "Thread is not waiting for anything"); | ||
| 244 | const auto match = std::find(wait_objects->rbegin(), wait_objects->rend(), object); | ||
| 245 | return static_cast<s32>(std::distance(match, wait_objects->rend()) - 1); | ||
| 246 | } | ||
| 247 | |||
| 248 | VAddr Thread::GetCommandBufferAddress() const { | 232 | VAddr Thread::GetCommandBufferAddress() const { |
| 249 | // Offset from the start of TLS at which the IPC command buffer begins. | 233 | // Offset from the start of TLS at which the IPC command buffer begins. |
| 250 | constexpr u64 command_header_offset = 0x80; | 234 | constexpr u64 command_header_offset = 0x80; |
| 251 | return GetTLSAddress() + command_header_offset; | 235 | return GetTLSAddress() + command_header_offset; |
| 252 | } | 236 | } |
| 253 | 237 | ||
| 254 | void Thread::SetStatus(ThreadStatus new_status) { | 238 | void Thread::SetState(ThreadStatus new_status) { |
| 255 | if (new_status == status) { | 239 | if (new_status == status) { |
| 256 | return; | 240 | return; |
| 257 | } | 241 | } |
| @@ -351,28 +335,16 @@ void Thread::UpdatePriority() { | |||
| 351 | lock_owner->UpdatePriority(); | 335 | lock_owner->UpdatePriority(); |
| 352 | } | 336 | } |
| 353 | 337 | ||
| 354 | bool Thread::AllSynchronizationObjectsReady() const { | ||
| 355 | return std::none_of(wait_objects->begin(), wait_objects->end(), | ||
| 356 | [this](const std::shared_ptr<SynchronizationObject>& object) { | ||
| 357 | return object->ShouldWait(this); | ||
| 358 | }); | ||
| 359 | } | ||
| 360 | |||
| 361 | bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) { | ||
| 362 | ASSERT(hle_callback); | ||
| 363 | return hle_callback(std::move(thread)); | ||
| 364 | } | ||
| 365 | |||
| 366 | ResultCode Thread::SetActivity(ThreadActivity value) { | 338 | ResultCode Thread::SetActivity(ThreadActivity value) { |
| 367 | KScopedSchedulerLock lock(kernel); | 339 | KScopedSchedulerLock lock(kernel); |
| 368 | 340 | ||
| 369 | auto sched_status = GetSchedulingStatus(); | 341 | auto sched_status = GetState(); |
| 370 | 342 | ||
| 371 | if (sched_status != ThreadSchedStatus::Runnable && sched_status != ThreadSchedStatus::Paused) { | 343 | if (sched_status != ThreadSchedStatus::Runnable && sched_status != ThreadSchedStatus::Paused) { |
| 372 | return ERR_INVALID_STATE; | 344 | return ERR_INVALID_STATE; |
| 373 | } | 345 | } |
| 374 | 346 | ||
| 375 | if (IsPendingTermination()) { | 347 | if (IsTerminationRequested()) { |
| 376 | return RESULT_SUCCESS; | 348 | return RESULT_SUCCESS; |
| 377 | } | 349 | } |
| 378 | 350 | ||
| @@ -394,7 +366,7 @@ ResultCode Thread::Sleep(s64 nanoseconds) { | |||
| 394 | Handle event_handle{}; | 366 | Handle event_handle{}; |
| 395 | { | 367 | { |
| 396 | KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); | 368 | KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); |
| 397 | SetStatus(ThreadStatus::WaitSleep); | 369 | SetState(ThreadStatus::WaitSleep); |
| 398 | } | 370 | } |
| 399 | 371 | ||
| 400 | if (event_handle != InvalidHandle) { | 372 | if (event_handle != InvalidHandle) { |
| @@ -407,7 +379,7 @@ ResultCode Thread::Sleep(s64 nanoseconds) { | |||
| 407 | void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { | 379 | void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { |
| 408 | const u32 old_state = scheduling_state; | 380 | const u32 old_state = scheduling_state; |
| 409 | pausing_state |= static_cast<u32>(flag); | 381 | pausing_state |= static_cast<u32>(flag); |
| 410 | const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); | 382 | const u32 base_scheduling = static_cast<u32>(GetState()); |
| 411 | scheduling_state = base_scheduling | pausing_state; | 383 | scheduling_state = base_scheduling | pausing_state; |
| 412 | KScheduler::OnThreadStateChanged(kernel, this, old_state); | 384 | KScheduler::OnThreadStateChanged(kernel, this, old_state); |
| 413 | } | 385 | } |
| @@ -415,7 +387,7 @@ void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { | |||
| 415 | void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { | 387 | void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { |
| 416 | const u32 old_state = scheduling_state; | 388 | const u32 old_state = scheduling_state; |
| 417 | pausing_state &= ~static_cast<u32>(flag); | 389 | pausing_state &= ~static_cast<u32>(flag); |
| 418 | const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); | 390 | const u32 base_scheduling = static_cast<u32>(GetState()); |
| 419 | scheduling_state = base_scheduling | pausing_state; | 391 | scheduling_state = base_scheduling | pausing_state; |
| 420 | KScheduler::OnThreadStateChanged(kernel, this, old_state); | 392 | KScheduler::OnThreadStateChanged(kernel, this, old_state); |
| 421 | } | 393 | } |