diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/core/arm/dynarmic/arm_dynarmic.cpp | 2 | ||||
| -rw-r--r-- | src/core/arm/unicorn/arm_unicorn.cpp | 2 | ||||
| -rw-r--r-- | src/core/gdbstub/gdbstub.cpp | 44 | ||||
| -rw-r--r-- | src/core/hle/kernel/address_arbiter.cpp | 20 | ||||
| -rw-r--r-- | src/core/hle/kernel/hle_ipc.cpp | 14 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 36 | ||||
| -rw-r--r-- | src/core/hle/kernel/mutex.cpp | 34 | ||||
| -rw-r--r-- | src/core/hle/kernel/process.cpp | 6 | ||||
| -rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 36 | ||||
| -rw-r--r-- | src/core/hle/kernel/server_session.cpp | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc.cpp | 121 | ||||
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 32 | ||||
| -rw-r--r-- | src/core/hle/kernel/thread.h | 216 | ||||
| -rw-r--r-- | src/core/hle/kernel/wait_object.cpp | 31 | ||||
| -rw-r--r-- | src/yuzu/debugger/wait_tree.cpp | 47 |
15 files changed, 418 insertions, 227 deletions
diff --git a/src/core/arm/dynarmic/arm_dynarmic.cpp b/src/core/arm/dynarmic/arm_dynarmic.cpp index 05cc84458..7e978cf7a 100644 --- a/src/core/arm/dynarmic/arm_dynarmic.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic.cpp | |||
| @@ -86,7 +86,7 @@ public: | |||
| 86 | parent.jit->HaltExecution(); | 86 | parent.jit->HaltExecution(); |
| 87 | parent.SetPC(pc); | 87 | parent.SetPC(pc); |
| 88 | Kernel::Thread* thread = Kernel::GetCurrentThread(); | 88 | Kernel::Thread* thread = Kernel::GetCurrentThread(); |
| 89 | parent.SaveContext(thread->context); | 89 | parent.SaveContext(thread->GetContext()); |
| 90 | GDBStub::Break(); | 90 | GDBStub::Break(); |
| 91 | GDBStub::SendTrap(thread, 5); | 91 | GDBStub::SendTrap(thread, 5); |
| 92 | return; | 92 | return; |
diff --git a/src/core/arm/unicorn/arm_unicorn.cpp b/src/core/arm/unicorn/arm_unicorn.cpp index e218a0b15..ded4dd359 100644 --- a/src/core/arm/unicorn/arm_unicorn.cpp +++ b/src/core/arm/unicorn/arm_unicorn.cpp | |||
| @@ -195,7 +195,7 @@ void ARM_Unicorn::ExecuteInstructions(int num_instructions) { | |||
| 195 | uc_reg_write(uc, UC_ARM64_REG_PC, &last_bkpt.address); | 195 | uc_reg_write(uc, UC_ARM64_REG_PC, &last_bkpt.address); |
| 196 | } | 196 | } |
| 197 | Kernel::Thread* thread = Kernel::GetCurrentThread(); | 197 | Kernel::Thread* thread = Kernel::GetCurrentThread(); |
| 198 | SaveContext(thread->context); | 198 | SaveContext(thread->GetContext()); |
| 199 | if (last_bkpt_hit || GDBStub::GetCpuStepFlag()) { | 199 | if (last_bkpt_hit || GDBStub::GetCpuStepFlag()) { |
| 200 | last_bkpt_hit = false; | 200 | last_bkpt_hit = false; |
| 201 | GDBStub::Break(); | 201 | GDBStub::Break(); |
diff --git a/src/core/gdbstub/gdbstub.cpp b/src/core/gdbstub/gdbstub.cpp index 5bc947010..e961ef121 100644 --- a/src/core/gdbstub/gdbstub.cpp +++ b/src/core/gdbstub/gdbstub.cpp | |||
| @@ -209,7 +209,7 @@ static Kernel::Thread* FindThreadById(int id) { | |||
| 209 | for (u32 core = 0; core < Core::NUM_CPU_CORES; core++) { | 209 | for (u32 core = 0; core < Core::NUM_CPU_CORES; core++) { |
| 210 | const auto& threads = Core::System::GetInstance().Scheduler(core)->GetThreadList(); | 210 | const auto& threads = Core::System::GetInstance().Scheduler(core)->GetThreadList(); |
| 211 | for (auto& thread : threads) { | 211 | for (auto& thread : threads) { |
| 212 | if (thread->GetThreadId() == static_cast<u32>(id)) { | 212 | if (thread->GetThreadID() == static_cast<u32>(id)) { |
| 213 | current_core = core; | 213 | current_core = core; |
| 214 | return thread.get(); | 214 | return thread.get(); |
| 215 | } | 215 | } |
| @@ -223,16 +223,18 @@ static u64 RegRead(std::size_t id, Kernel::Thread* thread = nullptr) { | |||
| 223 | return 0; | 223 | return 0; |
| 224 | } | 224 | } |
| 225 | 225 | ||
| 226 | const auto& thread_context = thread->GetContext(); | ||
| 227 | |||
| 226 | if (id < SP_REGISTER) { | 228 | if (id < SP_REGISTER) { |
| 227 | return thread->context.cpu_registers[id]; | 229 | return thread_context.cpu_registers[id]; |
| 228 | } else if (id == SP_REGISTER) { | 230 | } else if (id == SP_REGISTER) { |
| 229 | return thread->context.sp; | 231 | return thread_context.sp; |
| 230 | } else if (id == PC_REGISTER) { | 232 | } else if (id == PC_REGISTER) { |
| 231 | return thread->context.pc; | 233 | return thread_context.pc; |
| 232 | } else if (id == PSTATE_REGISTER) { | 234 | } else if (id == PSTATE_REGISTER) { |
| 233 | return thread->context.pstate; | 235 | return thread_context.pstate; |
| 234 | } else if (id > PSTATE_REGISTER && id < FPCR_REGISTER) { | 236 | } else if (id > PSTATE_REGISTER && id < FPCR_REGISTER) { |
| 235 | return thread->context.vector_registers[id - UC_ARM64_REG_Q0][0]; | 237 | return thread_context.vector_registers[id - UC_ARM64_REG_Q0][0]; |
| 236 | } else { | 238 | } else { |
| 237 | return 0; | 239 | return 0; |
| 238 | } | 240 | } |
| @@ -243,16 +245,18 @@ static void RegWrite(std::size_t id, u64 val, Kernel::Thread* thread = nullptr) | |||
| 243 | return; | 245 | return; |
| 244 | } | 246 | } |
| 245 | 247 | ||
| 248 | auto& thread_context = thread->GetContext(); | ||
| 249 | |||
| 246 | if (id < SP_REGISTER) { | 250 | if (id < SP_REGISTER) { |
| 247 | thread->context.cpu_registers[id] = val; | 251 | thread_context.cpu_registers[id] = val; |
| 248 | } else if (id == SP_REGISTER) { | 252 | } else if (id == SP_REGISTER) { |
| 249 | thread->context.sp = val; | 253 | thread_context.sp = val; |
| 250 | } else if (id == PC_REGISTER) { | 254 | } else if (id == PC_REGISTER) { |
| 251 | thread->context.pc = val; | 255 | thread_context.pc = val; |
| 252 | } else if (id == PSTATE_REGISTER) { | 256 | } else if (id == PSTATE_REGISTER) { |
| 253 | thread->context.pstate = static_cast<u32>(val); | 257 | thread_context.pstate = static_cast<u32>(val); |
| 254 | } else if (id > PSTATE_REGISTER && id < FPCR_REGISTER) { | 258 | } else if (id > PSTATE_REGISTER && id < FPCR_REGISTER) { |
| 255 | thread->context.vector_registers[id - (PSTATE_REGISTER + 1)][0] = val; | 259 | thread_context.vector_registers[id - (PSTATE_REGISTER + 1)][0] = val; |
| 256 | } | 260 | } |
| 257 | } | 261 | } |
| 258 | 262 | ||
| @@ -595,7 +599,7 @@ static void HandleQuery() { | |||
| 595 | for (u32 core = 0; core < Core::NUM_CPU_CORES; core++) { | 599 | for (u32 core = 0; core < Core::NUM_CPU_CORES; core++) { |
| 596 | const auto& threads = Core::System::GetInstance().Scheduler(core)->GetThreadList(); | 600 | const auto& threads = Core::System::GetInstance().Scheduler(core)->GetThreadList(); |
| 597 | for (const auto& thread : threads) { | 601 | for (const auto& thread : threads) { |
| 598 | val += fmt::format("{:x}", thread->GetThreadId()); | 602 | val += fmt::format("{:x}", thread->GetThreadID()); |
| 599 | val += ","; | 603 | val += ","; |
| 600 | } | 604 | } |
| 601 | } | 605 | } |
| @@ -612,7 +616,7 @@ static void HandleQuery() { | |||
| 612 | for (const auto& thread : threads) { | 616 | for (const auto& thread : threads) { |
| 613 | buffer += | 617 | buffer += |
| 614 | fmt::format(R"*(<thread id="{:x}" core="{:d}" name="Thread {:x}"></thread>)*", | 618 | fmt::format(R"*(<thread id="{:x}" core="{:d}" name="Thread {:x}"></thread>)*", |
| 615 | thread->GetThreadId(), core, thread->GetThreadId()); | 619 | thread->GetThreadID(), core, thread->GetThreadID()); |
| 616 | } | 620 | } |
| 617 | } | 621 | } |
| 618 | buffer += "</threads>"; | 622 | buffer += "</threads>"; |
| @@ -693,7 +697,7 @@ static void SendSignal(Kernel::Thread* thread, u32 signal, bool full = true) { | |||
| 693 | } | 697 | } |
| 694 | 698 | ||
| 695 | if (thread) { | 699 | if (thread) { |
| 696 | buffer += fmt::format(";thread:{:x};", thread->GetThreadId()); | 700 | buffer += fmt::format(";thread:{:x};", thread->GetThreadID()); |
| 697 | } | 701 | } |
| 698 | 702 | ||
| 699 | SendReply(buffer.c_str()); | 703 | SendReply(buffer.c_str()); |
| @@ -857,7 +861,9 @@ static void WriteRegister() { | |||
| 857 | } | 861 | } |
| 858 | 862 | ||
| 859 | // Update Unicorn context skipping scheduler, no running threads at this point | 863 | // Update Unicorn context skipping scheduler, no running threads at this point |
| 860 | Core::System::GetInstance().ArmInterface(current_core).LoadContext(current_thread->context); | 864 | Core::System::GetInstance() |
| 865 | .ArmInterface(current_core) | ||
| 866 | .LoadContext(current_thread->GetContext()); | ||
| 861 | 867 | ||
| 862 | SendReply("OK"); | 868 | SendReply("OK"); |
| 863 | } | 869 | } |
| @@ -886,7 +892,9 @@ static void WriteRegisters() { | |||
| 886 | } | 892 | } |
| 887 | 893 | ||
| 888 | // Update Unicorn context skipping scheduler, no running threads at this point | 894 | // Update Unicorn context skipping scheduler, no running threads at this point |
| 889 | Core::System::GetInstance().ArmInterface(current_core).LoadContext(current_thread->context); | 895 | Core::System::GetInstance() |
| 896 | .ArmInterface(current_core) | ||
| 897 | .LoadContext(current_thread->GetContext()); | ||
| 890 | 898 | ||
| 891 | SendReply("OK"); | 899 | SendReply("OK"); |
| 892 | } | 900 | } |
| @@ -960,7 +968,9 @@ static void Step() { | |||
| 960 | if (command_length > 1) { | 968 | if (command_length > 1) { |
| 961 | RegWrite(PC_REGISTER, GdbHexToLong(command_buffer + 1), current_thread); | 969 | RegWrite(PC_REGISTER, GdbHexToLong(command_buffer + 1), current_thread); |
| 962 | // Update Unicorn context skipping scheduler, no running threads at this point | 970 | // Update Unicorn context skipping scheduler, no running threads at this point |
| 963 | Core::System::GetInstance().ArmInterface(current_core).LoadContext(current_thread->context); | 971 | Core::System::GetInstance() |
| 972 | .ArmInterface(current_core) | ||
| 973 | .LoadContext(current_thread->GetContext()); | ||
| 964 | } | 974 | } |
| 965 | step_loop = true; | 975 | step_loop = true; |
| 966 | halt_loop = true; | 976 | halt_loop = true; |
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index 93577591f..ebf193930 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp | |||
| @@ -23,13 +23,13 @@ namespace AddressArbiter { | |||
| 23 | // Performs actual address waiting logic. | 23 | // Performs actual address waiting logic. |
| 24 | static ResultCode WaitForAddress(VAddr address, s64 timeout) { | 24 | static ResultCode WaitForAddress(VAddr address, s64 timeout) { |
| 25 | SharedPtr<Thread> current_thread = GetCurrentThread(); | 25 | SharedPtr<Thread> current_thread = GetCurrentThread(); |
| 26 | current_thread->arb_wait_address = address; | 26 | current_thread->SetArbiterWaitAddress(address); |
| 27 | current_thread->status = ThreadStatus::WaitArb; | 27 | current_thread->SetStatus(ThreadStatus::WaitArb); |
| 28 | current_thread->wakeup_callback = nullptr; | 28 | current_thread->InvalidateWakeupCallback(); |
| 29 | 29 | ||
| 30 | current_thread->WakeAfterDelay(timeout); | 30 | current_thread->WakeAfterDelay(timeout); |
| 31 | 31 | ||
| 32 | Core::System::GetInstance().CpuCore(current_thread->processor_id).PrepareReschedule(); | 32 | Core::System::GetInstance().CpuCore(current_thread->GetProcessorID()).PrepareReschedule(); |
| 33 | return RESULT_TIMEOUT; | 33 | return RESULT_TIMEOUT; |
| 34 | } | 34 | } |
| 35 | 35 | ||
| @@ -39,10 +39,10 @@ static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) | |||
| 39 | std::vector<SharedPtr<Thread>>& waiting_threads, | 39 | std::vector<SharedPtr<Thread>>& waiting_threads, |
| 40 | VAddr arb_addr) { | 40 | VAddr arb_addr) { |
| 41 | const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); | 41 | const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); |
| 42 | auto& thread_list = scheduler->GetThreadList(); | 42 | const auto& thread_list = scheduler->GetThreadList(); |
| 43 | 43 | ||
| 44 | for (auto& thread : thread_list) { | 44 | for (const auto& thread : thread_list) { |
| 45 | if (thread->arb_wait_address == arb_addr) | 45 | if (thread->GetArbiterWaitAddress() == arb_addr) |
| 46 | waiting_threads.push_back(thread); | 46 | waiting_threads.push_back(thread); |
| 47 | } | 47 | } |
| 48 | }; | 48 | }; |
| @@ -57,7 +57,7 @@ static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) | |||
| 57 | // Sort them by priority, such that the highest priority ones come first. | 57 | // Sort them by priority, such that the highest priority ones come first. |
| 58 | std::sort(threads.begin(), threads.end(), | 58 | std::sort(threads.begin(), threads.end(), |
| 59 | [](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) { | 59 | [](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) { |
| 60 | return lhs->current_priority < rhs->current_priority; | 60 | return lhs->GetPriority() < rhs->GetPriority(); |
| 61 | }); | 61 | }); |
| 62 | 62 | ||
| 63 | return threads; | 63 | return threads; |
| @@ -73,9 +73,9 @@ static void WakeThreads(std::vector<SharedPtr<Thread>>& waiting_threads, s32 num | |||
| 73 | 73 | ||
| 74 | // Signal the waiting threads. | 74 | // Signal the waiting threads. |
| 75 | for (std::size_t i = 0; i < last; i++) { | 75 | for (std::size_t i = 0; i < last; i++) { |
| 76 | ASSERT(waiting_threads[i]->status == ThreadStatus::WaitArb); | 76 | ASSERT(waiting_threads[i]->GetStatus() == ThreadStatus::WaitArb); |
| 77 | waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS); | 77 | waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS); |
| 78 | waiting_threads[i]->arb_wait_address = 0; | 78 | waiting_threads[i]->SetArbiterWaitAddress(0); |
| 79 | waiting_threads[i]->ResumeFromWait(); | 79 | waiting_threads[i]->ResumeFromWait(); |
| 80 | } | 80 | } |
| 81 | } | 81 | } |
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index 72fb9d250..edad5f1b1 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp | |||
| @@ -42,14 +42,14 @@ SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread, | |||
| 42 | Kernel::SharedPtr<Kernel::Event> event) { | 42 | Kernel::SharedPtr<Kernel::Event> event) { |
| 43 | 43 | ||
| 44 | // Put the client thread to sleep until the wait event is signaled or the timeout expires. | 44 | // Put the client thread to sleep until the wait event is signaled or the timeout expires. |
| 45 | thread->wakeup_callback = [context = *this, callback]( | 45 | thread->SetWakeupCallback([context = *this, callback]( |
| 46 | ThreadWakeupReason reason, SharedPtr<Thread> thread, | 46 | ThreadWakeupReason reason, SharedPtr<Thread> thread, |
| 47 | SharedPtr<WaitObject> object, std::size_t index) mutable -> bool { | 47 | SharedPtr<WaitObject> object, std::size_t index) mutable -> bool { |
| 48 | ASSERT(thread->status == ThreadStatus::WaitHLEEvent); | 48 | ASSERT(thread->GetStatus() == ThreadStatus::WaitHLEEvent); |
| 49 | callback(thread, context, reason); | 49 | callback(thread, context, reason); |
| 50 | context.WriteToOutgoingCommandBuffer(*thread); | 50 | context.WriteToOutgoingCommandBuffer(*thread); |
| 51 | return true; | 51 | return true; |
| 52 | }; | 52 | }); |
| 53 | 53 | ||
| 54 | if (!event) { | 54 | if (!event) { |
| 55 | // Create event if not provided | 55 | // Create event if not provided |
| @@ -59,8 +59,8 @@ SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread, | |||
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | event->Clear(); | 61 | event->Clear(); |
| 62 | thread->status = ThreadStatus::WaitHLEEvent; | 62 | thread->SetStatus(ThreadStatus::WaitHLEEvent); |
| 63 | thread->wait_objects = {event}; | 63 | thread->SetWaitObjects({event}); |
| 64 | event->AddWaitingThread(thread); | 64 | event->AddWaitingThread(thread); |
| 65 | 65 | ||
| 66 | if (timeout > 0) { | 66 | if (timeout > 0) { |
| @@ -209,7 +209,7 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(u32_le* src_cmdb | |||
| 209 | 209 | ||
| 210 | ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread) { | 210 | ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread) { |
| 211 | std::array<u32, IPC::COMMAND_BUFFER_LENGTH> dst_cmdbuf; | 211 | std::array<u32, IPC::COMMAND_BUFFER_LENGTH> dst_cmdbuf; |
| 212 | Memory::ReadBlock(*thread.owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(), | 212 | Memory::ReadBlock(*thread.GetOwnerProcess(), thread.GetTLSAddress(), dst_cmdbuf.data(), |
| 213 | dst_cmdbuf.size() * sizeof(u32)); | 213 | dst_cmdbuf.size() * sizeof(u32)); |
| 214 | 214 | ||
| 215 | // The header was already built in the internal command buffer. Attempt to parse it to verify | 215 | // The header was already built in the internal command buffer. Attempt to parse it to verify |
| @@ -268,7 +268,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread) | |||
| 268 | } | 268 | } |
| 269 | 269 | ||
| 270 | // Copy the translated command buffer back into the thread's command buffer area. | 270 | // Copy the translated command buffer back into the thread's command buffer area. |
| 271 | Memory::WriteBlock(*thread.owner_process, thread.GetTLSAddress(), dst_cmdbuf.data(), | 271 | Memory::WriteBlock(*thread.GetOwnerProcess(), thread.GetTLSAddress(), dst_cmdbuf.data(), |
| 272 | dst_cmdbuf.size() * sizeof(u32)); | 272 | dst_cmdbuf.size() * sizeof(u32)); |
| 273 | 273 | ||
| 274 | return RESULT_SUCCESS; | 274 | return RESULT_SUCCESS; |
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 3e0800a71..98eb74298 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -46,40 +46,40 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] int cycles_ | |||
| 46 | 46 | ||
| 47 | bool resume = true; | 47 | bool resume = true; |
| 48 | 48 | ||
| 49 | if (thread->status == ThreadStatus::WaitSynchAny || | 49 | if (thread->GetStatus() == ThreadStatus::WaitSynchAny || |
| 50 | thread->status == ThreadStatus::WaitSynchAll || | 50 | thread->GetStatus() == ThreadStatus::WaitSynchAll || |
| 51 | thread->status == ThreadStatus::WaitHLEEvent) { | 51 | thread->GetStatus() == ThreadStatus::WaitHLEEvent) { |
| 52 | // Remove the thread from each of its waiting objects' waitlists | 52 | // Remove the thread from each of its waiting objects' waitlists |
| 53 | for (auto& object : thread->wait_objects) { | 53 | for (const auto& object : thread->GetWaitObjects()) { |
| 54 | object->RemoveWaitingThread(thread.get()); | 54 | object->RemoveWaitingThread(thread.get()); |
| 55 | } | 55 | } |
| 56 | thread->wait_objects.clear(); | 56 | thread->ClearWaitObjects(); |
| 57 | 57 | ||
| 58 | // Invoke the wakeup callback before clearing the wait objects | 58 | // Invoke the wakeup callback before clearing the wait objects |
| 59 | if (thread->wakeup_callback) { | 59 | if (thread->HasWakeupCallback()) { |
| 60 | resume = thread->wakeup_callback(ThreadWakeupReason::Timeout, thread, nullptr, 0); | 60 | resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Timeout, thread, nullptr, 0); |
| 61 | } | 61 | } |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | if (thread->mutex_wait_address != 0 || thread->condvar_wait_address != 0 || | 64 | if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 || |
| 65 | thread->wait_handle) { | 65 | thread->GetWaitHandle() != 0) { |
| 66 | ASSERT(thread->status == ThreadStatus::WaitMutex); | 66 | ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); |
| 67 | thread->mutex_wait_address = 0; | 67 | thread->SetMutexWaitAddress(0); |
| 68 | thread->condvar_wait_address = 0; | 68 | thread->SetCondVarWaitAddress(0); |
| 69 | thread->wait_handle = 0; | 69 | thread->SetWaitHandle(0); |
| 70 | 70 | ||
| 71 | auto lock_owner = thread->lock_owner; | 71 | auto* const lock_owner = thread->GetLockOwner(); |
| 72 | // Threads waking up by timeout from WaitProcessWideKey do not perform priority inheritance | 72 | // Threads waking up by timeout from WaitProcessWideKey do not perform priority inheritance |
| 73 | // and don't have a lock owner unless SignalProcessWideKey was called first and the thread | 73 | // and don't have a lock owner unless SignalProcessWideKey was called first and the thread |
| 74 | // wasn't awakened due to the mutex already being acquired. | 74 | // wasn't awakened due to the mutex already being acquired. |
| 75 | if (lock_owner) { | 75 | if (lock_owner != nullptr) { |
| 76 | lock_owner->RemoveMutexWaiter(thread); | 76 | lock_owner->RemoveMutexWaiter(thread); |
| 77 | } | 77 | } |
| 78 | } | 78 | } |
| 79 | 79 | ||
| 80 | if (thread->arb_wait_address != 0) { | 80 | if (thread->GetArbiterWaitAddress() != 0) { |
| 81 | ASSERT(thread->status == ThreadStatus::WaitArb); | 81 | ASSERT(thread->GetStatus() == ThreadStatus::WaitArb); |
| 82 | thread->arb_wait_address = 0; | 82 | thread->SetArbiterWaitAddress(0); |
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | if (resume) { | 85 | if (resume) { |
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp index 81675eac5..78d8b74bb 100644 --- a/src/core/hle/kernel/mutex.cpp +++ b/src/core/hle/kernel/mutex.cpp | |||
| @@ -28,11 +28,11 @@ static std::pair<SharedPtr<Thread>, u32> GetHighestPriorityMutexWaitingThread( | |||
| 28 | SharedPtr<Thread> highest_priority_thread; | 28 | SharedPtr<Thread> highest_priority_thread; |
| 29 | u32 num_waiters = 0; | 29 | u32 num_waiters = 0; |
| 30 | 30 | ||
| 31 | for (auto& thread : current_thread->wait_mutex_threads) { | 31 | for (const auto& thread : current_thread->GetMutexWaitingThreads()) { |
| 32 | if (thread->mutex_wait_address != mutex_addr) | 32 | if (thread->GetMutexWaitAddress() != mutex_addr) |
| 33 | continue; | 33 | continue; |
| 34 | 34 | ||
| 35 | ASSERT(thread->status == ThreadStatus::WaitMutex); | 35 | ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); |
| 36 | 36 | ||
| 37 | ++num_waiters; | 37 | ++num_waiters; |
| 38 | if (highest_priority_thread == nullptr || | 38 | if (highest_priority_thread == nullptr || |
| @@ -47,12 +47,12 @@ static std::pair<SharedPtr<Thread>, u32> GetHighestPriorityMutexWaitingThread( | |||
| 47 | /// Update the mutex owner field of all threads waiting on the mutex to point to the new owner. | 47 | /// Update the mutex owner field of all threads waiting on the mutex to point to the new owner. |
| 48 | static void TransferMutexOwnership(VAddr mutex_addr, SharedPtr<Thread> current_thread, | 48 | static void TransferMutexOwnership(VAddr mutex_addr, SharedPtr<Thread> current_thread, |
| 49 | SharedPtr<Thread> new_owner) { | 49 | SharedPtr<Thread> new_owner) { |
| 50 | auto threads = current_thread->wait_mutex_threads; | 50 | const auto& threads = current_thread->GetMutexWaitingThreads(); |
| 51 | for (auto& thread : threads) { | 51 | for (const auto& thread : threads) { |
| 52 | if (thread->mutex_wait_address != mutex_addr) | 52 | if (thread->GetMutexWaitAddress() != mutex_addr) |
| 53 | continue; | 53 | continue; |
| 54 | 54 | ||
| 55 | ASSERT(thread->lock_owner == current_thread); | 55 | ASSERT(thread->GetLockOwner() == current_thread); |
| 56 | current_thread->RemoveMutexWaiter(thread); | 56 | current_thread->RemoveMutexWaiter(thread); |
| 57 | if (new_owner != thread) | 57 | if (new_owner != thread) |
| 58 | new_owner->AddMutexWaiter(thread); | 58 | new_owner->AddMutexWaiter(thread); |
| @@ -84,11 +84,11 @@ ResultCode Mutex::TryAcquire(HandleTable& handle_table, VAddr address, Handle ho | |||
| 84 | return ERR_INVALID_HANDLE; | 84 | return ERR_INVALID_HANDLE; |
| 85 | 85 | ||
| 86 | // Wait until the mutex is released | 86 | // Wait until the mutex is released |
| 87 | GetCurrentThread()->mutex_wait_address = address; | 87 | GetCurrentThread()->SetMutexWaitAddress(address); |
| 88 | GetCurrentThread()->wait_handle = requesting_thread_handle; | 88 | GetCurrentThread()->SetWaitHandle(requesting_thread_handle); |
| 89 | 89 | ||
| 90 | GetCurrentThread()->status = ThreadStatus::WaitMutex; | 90 | GetCurrentThread()->SetStatus(ThreadStatus::WaitMutex); |
| 91 | GetCurrentThread()->wakeup_callback = nullptr; | 91 | GetCurrentThread()->InvalidateWakeupCallback(); |
| 92 | 92 | ||
| 93 | // Update the lock holder thread's priority to prevent priority inversion. | 93 | // Update the lock holder thread's priority to prevent priority inversion. |
| 94 | holding_thread->AddMutexWaiter(GetCurrentThread()); | 94 | holding_thread->AddMutexWaiter(GetCurrentThread()); |
| @@ -115,7 +115,7 @@ ResultCode Mutex::Release(VAddr address) { | |||
| 115 | // Transfer the ownership of the mutex from the previous owner to the new one. | 115 | // Transfer the ownership of the mutex from the previous owner to the new one. |
| 116 | TransferMutexOwnership(address, GetCurrentThread(), thread); | 116 | TransferMutexOwnership(address, GetCurrentThread(), thread); |
| 117 | 117 | ||
| 118 | u32 mutex_value = thread->wait_handle; | 118 | u32 mutex_value = thread->GetWaitHandle(); |
| 119 | 119 | ||
| 120 | if (num_waiters >= 2) { | 120 | if (num_waiters >= 2) { |
| 121 | // Notify the guest that there are still some threads waiting for the mutex | 121 | // Notify the guest that there are still some threads waiting for the mutex |
| @@ -125,13 +125,13 @@ ResultCode Mutex::Release(VAddr address) { | |||
| 125 | // Grant the mutex to the next waiting thread and resume it. | 125 | // Grant the mutex to the next waiting thread and resume it. |
| 126 | Memory::Write32(address, mutex_value); | 126 | Memory::Write32(address, mutex_value); |
| 127 | 127 | ||
| 128 | ASSERT(thread->status == ThreadStatus::WaitMutex); | 128 | ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); |
| 129 | thread->ResumeFromWait(); | 129 | thread->ResumeFromWait(); |
| 130 | 130 | ||
| 131 | thread->lock_owner = nullptr; | 131 | thread->SetLockOwner(nullptr); |
| 132 | thread->condvar_wait_address = 0; | 132 | thread->SetCondVarWaitAddress(0); |
| 133 | thread->mutex_wait_address = 0; | 133 | thread->SetMutexWaitAddress(0); |
| 134 | thread->wait_handle = 0; | 134 | thread->SetWaitHandle(0); |
| 135 | 135 | ||
| 136 | return RESULT_SUCCESS; | 136 | return RESULT_SUCCESS; |
| 137 | } | 137 | } |
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index dc9fc8470..fb0027a71 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp | |||
| @@ -144,15 +144,15 @@ void Process::PrepareForTermination() { | |||
| 144 | 144 | ||
| 145 | const auto stop_threads = [this](const std::vector<SharedPtr<Thread>>& thread_list) { | 145 | const auto stop_threads = [this](const std::vector<SharedPtr<Thread>>& thread_list) { |
| 146 | for (auto& thread : thread_list) { | 146 | for (auto& thread : thread_list) { |
| 147 | if (thread->owner_process != this) | 147 | if (thread->GetOwnerProcess() != this) |
| 148 | continue; | 148 | continue; |
| 149 | 149 | ||
| 150 | if (thread == GetCurrentThread()) | 150 | if (thread == GetCurrentThread()) |
| 151 | continue; | 151 | continue; |
| 152 | 152 | ||
| 153 | // TODO(Subv): When are the other running/ready threads terminated? | 153 | // TODO(Subv): When are the other running/ready threads terminated? |
| 154 | ASSERT_MSG(thread->status == ThreadStatus::WaitSynchAny || | 154 | ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitSynchAny || |
| 155 | thread->status == ThreadStatus::WaitSynchAll, | 155 | thread->GetStatus() == ThreadStatus::WaitSynchAll, |
| 156 | "Exiting processes with non-waiting threads is currently unimplemented"); | 156 | "Exiting processes with non-waiting threads is currently unimplemented"); |
| 157 | 157 | ||
| 158 | thread->Stop(); | 158 | thread->Stop(); |
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 1e82cfffb..cfd6e1bad 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -38,10 +38,10 @@ Thread* Scheduler::PopNextReadyThread() { | |||
| 38 | Thread* next = nullptr; | 38 | Thread* next = nullptr; |
| 39 | Thread* thread = GetCurrentThread(); | 39 | Thread* thread = GetCurrentThread(); |
| 40 | 40 | ||
| 41 | if (thread && thread->status == ThreadStatus::Running) { | 41 | if (thread && thread->GetStatus() == ThreadStatus::Running) { |
| 42 | // We have to do better than the current thread. | 42 | // We have to do better than the current thread. |
| 43 | // This call returns null when that's not possible. | 43 | // This call returns null when that's not possible. |
| 44 | next = ready_queue.pop_first_better(thread->current_priority); | 44 | next = ready_queue.pop_first_better(thread->GetPriority()); |
| 45 | if (!next) { | 45 | if (!next) { |
| 46 | // Otherwise just keep going with the current thread | 46 | // Otherwise just keep going with the current thread |
| 47 | next = thread; | 47 | next = thread; |
| @@ -58,22 +58,21 @@ void Scheduler::SwitchContext(Thread* new_thread) { | |||
| 58 | 58 | ||
| 59 | // Save context for previous thread | 59 | // Save context for previous thread |
| 60 | if (previous_thread) { | 60 | if (previous_thread) { |
| 61 | previous_thread->last_running_ticks = CoreTiming::GetTicks(); | 61 | cpu_core.SaveContext(previous_thread->GetContext()); |
| 62 | cpu_core.SaveContext(previous_thread->context); | ||
| 63 | // Save the TPIDR_EL0 system register in case it was modified. | 62 | // Save the TPIDR_EL0 system register in case it was modified. |
| 64 | previous_thread->tpidr_el0 = cpu_core.GetTPIDR_EL0(); | 63 | previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); |
| 65 | 64 | ||
| 66 | if (previous_thread->status == ThreadStatus::Running) { | 65 | if (previous_thread->GetStatus() == ThreadStatus::Running) { |
| 67 | // This is only the case when a reschedule is triggered without the current thread | 66 | // This is only the case when a reschedule is triggered without the current thread |
| 68 | // yielding execution (i.e. an event triggered, system core time-sliced, etc) | 67 | // yielding execution (i.e. an event triggered, system core time-sliced, etc) |
| 69 | ready_queue.push_front(previous_thread->current_priority, previous_thread); | 68 | ready_queue.push_front(previous_thread->GetPriority(), previous_thread); |
| 70 | previous_thread->status = ThreadStatus::Ready; | 69 | previous_thread->SetStatus(ThreadStatus::Ready); |
| 71 | } | 70 | } |
| 72 | } | 71 | } |
| 73 | 72 | ||
| 74 | // Load context of new thread | 73 | // Load context of new thread |
| 75 | if (new_thread) { | 74 | if (new_thread) { |
| 76 | ASSERT_MSG(new_thread->status == ThreadStatus::Ready, | 75 | ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready, |
| 77 | "Thread must be ready to become running."); | 76 | "Thread must be ready to become running."); |
| 78 | 77 | ||
| 79 | // Cancel any outstanding wakeup events for this thread | 78 | // Cancel any outstanding wakeup events for this thread |
| @@ -83,15 +82,16 @@ void Scheduler::SwitchContext(Thread* new_thread) { | |||
| 83 | 82 | ||
| 84 | current_thread = new_thread; | 83 | current_thread = new_thread; |
| 85 | 84 | ||
| 86 | ready_queue.remove(new_thread->current_priority, new_thread); | 85 | ready_queue.remove(new_thread->GetPriority(), new_thread); |
| 87 | new_thread->status = ThreadStatus::Running; | 86 | new_thread->SetStatus(ThreadStatus::Running); |
| 88 | 87 | ||
| 89 | if (previous_process != current_thread->owner_process) { | 88 | const auto thread_owner_process = current_thread->GetOwnerProcess(); |
| 90 | Core::CurrentProcess() = current_thread->owner_process; | 89 | if (previous_process != thread_owner_process) { |
| 90 | Core::CurrentProcess() = thread_owner_process; | ||
| 91 | SetCurrentPageTable(&Core::CurrentProcess()->VMManager().page_table); | 91 | SetCurrentPageTable(&Core::CurrentProcess()->VMManager().page_table); |
| 92 | } | 92 | } |
| 93 | 93 | ||
| 94 | cpu_core.LoadContext(new_thread->context); | 94 | cpu_core.LoadContext(new_thread->GetContext()); |
| 95 | cpu_core.SetTlsAddress(new_thread->GetTLSAddress()); | 95 | cpu_core.SetTlsAddress(new_thread->GetTLSAddress()); |
| 96 | cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0()); | 96 | cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0()); |
| 97 | cpu_core.ClearExclusiveState(); | 97 | cpu_core.ClearExclusiveState(); |
| @@ -136,14 +136,14 @@ void Scheduler::RemoveThread(Thread* thread) { | |||
| 136 | void Scheduler::ScheduleThread(Thread* thread, u32 priority) { | 136 | void Scheduler::ScheduleThread(Thread* thread, u32 priority) { |
| 137 | std::lock_guard<std::mutex> lock(scheduler_mutex); | 137 | std::lock_guard<std::mutex> lock(scheduler_mutex); |
| 138 | 138 | ||
| 139 | ASSERT(thread->status == ThreadStatus::Ready); | 139 | ASSERT(thread->GetStatus() == ThreadStatus::Ready); |
| 140 | ready_queue.push_back(priority, thread); | 140 | ready_queue.push_back(priority, thread); |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { | 143 | void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { |
| 144 | std::lock_guard<std::mutex> lock(scheduler_mutex); | 144 | std::lock_guard<std::mutex> lock(scheduler_mutex); |
| 145 | 145 | ||
| 146 | ASSERT(thread->status == ThreadStatus::Ready); | 146 | ASSERT(thread->GetStatus() == ThreadStatus::Ready); |
| 147 | ready_queue.remove(priority, thread); | 147 | ready_queue.remove(priority, thread); |
| 148 | } | 148 | } |
| 149 | 149 | ||
| @@ -151,8 +151,8 @@ void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { | |||
| 151 | std::lock_guard<std::mutex> lock(scheduler_mutex); | 151 | std::lock_guard<std::mutex> lock(scheduler_mutex); |
| 152 | 152 | ||
| 153 | // If thread was ready, adjust queues | 153 | // If thread was ready, adjust queues |
| 154 | if (thread->status == ThreadStatus::Ready) | 154 | if (thread->GetStatus() == ThreadStatus::Ready) |
| 155 | ready_queue.move(thread, thread->current_priority, priority); | 155 | ready_queue.move(thread, thread->GetPriority(), priority); |
| 156 | else | 156 | else |
| 157 | ready_queue.prepare(priority); | 157 | ready_queue.prepare(priority); |
| 158 | } | 158 | } |
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index aba0cab96..1ece691c7 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp | |||
| @@ -120,10 +120,10 @@ ResultCode ServerSession::HandleSyncRequest(SharedPtr<Thread> thread) { | |||
| 120 | result = hle_handler->HandleSyncRequest(context); | 120 | result = hle_handler->HandleSyncRequest(context); |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | if (thread->status == ThreadStatus::Running) { | 123 | if (thread->GetStatus() == ThreadStatus::Running) { |
| 124 | // Put the thread to sleep until the server replies, it will be awoken in | 124 | // Put the thread to sleep until the server replies, it will be awoken in |
| 125 | // svcReplyAndReceive for LLE servers. | 125 | // svcReplyAndReceive for LLE servers. |
| 126 | thread->status = ThreadStatus::WaitIPC; | 126 | thread->SetStatus(ThreadStatus::WaitIPC); |
| 127 | 127 | ||
| 128 | if (hle_handler != nullptr) { | 128 | if (hle_handler != nullptr) { |
| 129 | // For HLE services, we put the request threads to sleep for a short duration to | 129 | // For HLE services, we put the request threads to sleep for a short duration to |
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 1cdaa740a..6c4af7e47 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -156,7 +156,7 @@ static ResultCode GetThreadId(u32* thread_id, Handle thread_handle) { | |||
| 156 | return ERR_INVALID_HANDLE; | 156 | return ERR_INVALID_HANDLE; |
| 157 | } | 157 | } |
| 158 | 158 | ||
| 159 | *thread_id = thread->GetThreadId(); | 159 | *thread_id = thread->GetThreadID(); |
| 160 | return RESULT_SUCCESS; | 160 | return RESULT_SUCCESS; |
| 161 | } | 161 | } |
| 162 | 162 | ||
| @@ -177,7 +177,7 @@ static ResultCode GetProcessId(u32* process_id, Handle process_handle) { | |||
| 177 | /// Default thread wakeup callback for WaitSynchronization | 177 | /// Default thread wakeup callback for WaitSynchronization |
| 178 | static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread, | 178 | static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread, |
| 179 | SharedPtr<WaitObject> object, std::size_t index) { | 179 | SharedPtr<WaitObject> object, std::size_t index) { |
| 180 | ASSERT(thread->status == ThreadStatus::WaitSynchAny); | 180 | ASSERT(thread->GetStatus() == ThreadStatus::WaitSynchAny); |
| 181 | 181 | ||
| 182 | if (reason == ThreadWakeupReason::Timeout) { | 182 | if (reason == ThreadWakeupReason::Timeout) { |
| 183 | thread->SetWaitSynchronizationResult(RESULT_TIMEOUT); | 183 | thread->SetWaitSynchronizationResult(RESULT_TIMEOUT); |
| @@ -204,10 +204,10 @@ static ResultCode WaitSynchronization(Handle* index, VAddr handles_address, u64 | |||
| 204 | if (handle_count > MaxHandles) | 204 | if (handle_count > MaxHandles) |
| 205 | return ResultCode(ErrorModule::Kernel, ErrCodes::TooLarge); | 205 | return ResultCode(ErrorModule::Kernel, ErrCodes::TooLarge); |
| 206 | 206 | ||
| 207 | auto thread = GetCurrentThread(); | 207 | auto* const thread = GetCurrentThread(); |
| 208 | 208 | ||
| 209 | using ObjectPtr = SharedPtr<WaitObject>; | 209 | using ObjectPtr = Thread::ThreadWaitObjects::value_type; |
| 210 | std::vector<ObjectPtr> objects(handle_count); | 210 | Thread::ThreadWaitObjects objects(handle_count); |
| 211 | auto& kernel = Core::System::GetInstance().Kernel(); | 211 | auto& kernel = Core::System::GetInstance().Kernel(); |
| 212 | 212 | ||
| 213 | for (u64 i = 0; i < handle_count; ++i) { | 213 | for (u64 i = 0; i < handle_count; ++i) { |
| @@ -244,14 +244,14 @@ static ResultCode WaitSynchronization(Handle* index, VAddr handles_address, u64 | |||
| 244 | for (auto& object : objects) | 244 | for (auto& object : objects) |
| 245 | object->AddWaitingThread(thread); | 245 | object->AddWaitingThread(thread); |
| 246 | 246 | ||
| 247 | thread->wait_objects = std::move(objects); | 247 | thread->SetWaitObjects(std::move(objects)); |
| 248 | thread->status = ThreadStatus::WaitSynchAny; | 248 | thread->SetStatus(ThreadStatus::WaitSynchAny); |
| 249 | 249 | ||
| 250 | // Create an event to wake the thread up after the specified nanosecond delay has passed | 250 | // Create an event to wake the thread up after the specified nanosecond delay has passed |
| 251 | thread->WakeAfterDelay(nano_seconds); | 251 | thread->WakeAfterDelay(nano_seconds); |
| 252 | thread->wakeup_callback = DefaultThreadWakeupCallback; | 252 | thread->SetWakeupCallback(DefaultThreadWakeupCallback); |
| 253 | 253 | ||
| 254 | Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule(); | 254 | Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule(); |
| 255 | 255 | ||
| 256 | return RESULT_TIMEOUT; | 256 | return RESULT_TIMEOUT; |
| 257 | } | 257 | } |
| @@ -266,7 +266,7 @@ static ResultCode CancelSynchronization(Handle thread_handle) { | |||
| 266 | return ERR_INVALID_HANDLE; | 266 | return ERR_INVALID_HANDLE; |
| 267 | } | 267 | } |
| 268 | 268 | ||
| 269 | ASSERT(thread->status == ThreadStatus::WaitSynchAny); | 269 | ASSERT(thread->GetStatus() == ThreadStatus::WaitSynchAny); |
| 270 | thread->SetWaitSynchronizationResult( | 270 | thread->SetWaitSynchronizationResult( |
| 271 | ResultCode(ErrorModule::Kernel, ErrCodes::SynchronizationCanceled)); | 271 | ResultCode(ErrorModule::Kernel, ErrCodes::SynchronizationCanceled)); |
| 272 | thread->ResumeFromWait(); | 272 | thread->ResumeFromWait(); |
| @@ -425,7 +425,7 @@ static ResultCode GetThreadContext(VAddr thread_context, Handle handle) { | |||
| 425 | } | 425 | } |
| 426 | 426 | ||
| 427 | const auto current_process = Core::CurrentProcess(); | 427 | const auto current_process = Core::CurrentProcess(); |
| 428 | if (thread->owner_process != current_process) { | 428 | if (thread->GetOwnerProcess() != current_process) { |
| 429 | return ERR_INVALID_HANDLE; | 429 | return ERR_INVALID_HANDLE; |
| 430 | } | 430 | } |
| 431 | 431 | ||
| @@ -433,7 +433,7 @@ static ResultCode GetThreadContext(VAddr thread_context, Handle handle) { | |||
| 433 | return ERR_ALREADY_REGISTERED; | 433 | return ERR_ALREADY_REGISTERED; |
| 434 | } | 434 | } |
| 435 | 435 | ||
| 436 | Core::ARM_Interface::ThreadContext ctx = thread->context; | 436 | Core::ARM_Interface::ThreadContext ctx = thread->GetContext(); |
| 437 | // Mask away mode bits, interrupt bits, IL bit, and other reserved bits. | 437 | // Mask away mode bits, interrupt bits, IL bit, and other reserved bits. |
| 438 | ctx.pstate &= 0xFF0FFE20; | 438 | ctx.pstate &= 0xFF0FFE20; |
| 439 | 439 | ||
| @@ -479,14 +479,14 @@ static ResultCode SetThreadPriority(Handle handle, u32 priority) { | |||
| 479 | 479 | ||
| 480 | thread->SetPriority(priority); | 480 | thread->SetPriority(priority); |
| 481 | 481 | ||
| 482 | Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule(); | 482 | Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule(); |
| 483 | return RESULT_SUCCESS; | 483 | return RESULT_SUCCESS; |
| 484 | } | 484 | } |
| 485 | 485 | ||
| 486 | /// Get which CPU core is executing the current thread | 486 | /// Get which CPU core is executing the current thread |
| 487 | static u32 GetCurrentProcessorNumber() { | 487 | static u32 GetCurrentProcessorNumber() { |
| 488 | LOG_TRACE(Kernel_SVC, "called"); | 488 | LOG_TRACE(Kernel_SVC, "called"); |
| 489 | return GetCurrentThread()->processor_id; | 489 | return GetCurrentThread()->GetProcessorID(); |
| 490 | } | 490 | } |
| 491 | 491 | ||
| 492 | static ResultCode MapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size, | 492 | static ResultCode MapSharedMemory(Handle shared_memory_handle, VAddr addr, u64 size, |
| @@ -622,10 +622,14 @@ static ResultCode CreateThread(Handle* out_handle, VAddr entry_point, u64 arg, V | |||
| 622 | CASCADE_RESULT(SharedPtr<Thread> thread, | 622 | CASCADE_RESULT(SharedPtr<Thread> thread, |
| 623 | Thread::Create(kernel, name, entry_point, priority, arg, processor_id, stack_top, | 623 | Thread::Create(kernel, name, entry_point, priority, arg, processor_id, stack_top, |
| 624 | Core::CurrentProcess())); | 624 | Core::CurrentProcess())); |
| 625 | CASCADE_RESULT(thread->guest_handle, kernel.HandleTable().Create(thread)); | 625 | const auto new_guest_handle = kernel.HandleTable().Create(thread); |
| 626 | *out_handle = thread->guest_handle; | 626 | if (new_guest_handle.Failed()) { |
| 627 | return new_guest_handle.Code(); | ||
| 628 | } | ||
| 629 | thread->SetGuestHandle(*new_guest_handle); | ||
| 630 | *out_handle = *new_guest_handle; | ||
| 627 | 631 | ||
| 628 | Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule(); | 632 | Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule(); |
| 629 | 633 | ||
| 630 | LOG_TRACE(Kernel_SVC, | 634 | LOG_TRACE(Kernel_SVC, |
| 631 | "called entrypoint=0x{:08X} ({}), arg=0x{:08X}, stacktop=0x{:08X}, " | 635 | "called entrypoint=0x{:08X} ({}), arg=0x{:08X}, stacktop=0x{:08X}, " |
| @@ -645,10 +649,10 @@ static ResultCode StartThread(Handle thread_handle) { | |||
| 645 | return ERR_INVALID_HANDLE; | 649 | return ERR_INVALID_HANDLE; |
| 646 | } | 650 | } |
| 647 | 651 | ||
| 648 | ASSERT(thread->status == ThreadStatus::Dormant); | 652 | ASSERT(thread->GetStatus() == ThreadStatus::Dormant); |
| 649 | 653 | ||
| 650 | thread->ResumeFromWait(); | 654 | thread->ResumeFromWait(); |
| 651 | Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule(); | 655 | Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule(); |
| 652 | 656 | ||
| 653 | return RESULT_SUCCESS; | 657 | return RESULT_SUCCESS; |
| 654 | } | 658 | } |
| @@ -694,17 +698,17 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_var | |||
| 694 | CASCADE_CODE(Mutex::Release(mutex_addr)); | 698 | CASCADE_CODE(Mutex::Release(mutex_addr)); |
| 695 | 699 | ||
| 696 | SharedPtr<Thread> current_thread = GetCurrentThread(); | 700 | SharedPtr<Thread> current_thread = GetCurrentThread(); |
| 697 | current_thread->condvar_wait_address = condition_variable_addr; | 701 | current_thread->SetCondVarWaitAddress(condition_variable_addr); |
| 698 | current_thread->mutex_wait_address = mutex_addr; | 702 | current_thread->SetMutexWaitAddress(mutex_addr); |
| 699 | current_thread->wait_handle = thread_handle; | 703 | current_thread->SetWaitHandle(thread_handle); |
| 700 | current_thread->status = ThreadStatus::WaitMutex; | 704 | current_thread->SetStatus(ThreadStatus::WaitMutex); |
| 701 | current_thread->wakeup_callback = nullptr; | 705 | current_thread->InvalidateWakeupCallback(); |
| 702 | 706 | ||
| 703 | current_thread->WakeAfterDelay(nano_seconds); | 707 | current_thread->WakeAfterDelay(nano_seconds); |
| 704 | 708 | ||
| 705 | // Note: Deliberately don't attempt to inherit the lock owner's priority. | 709 | // Note: Deliberately don't attempt to inherit the lock owner's priority. |
| 706 | 710 | ||
| 707 | Core::System::GetInstance().CpuCore(current_thread->processor_id).PrepareReschedule(); | 711 | Core::System::GetInstance().CpuCore(current_thread->GetProcessorID()).PrepareReschedule(); |
| 708 | return RESULT_SUCCESS; | 712 | return RESULT_SUCCESS; |
| 709 | } | 713 | } |
| 710 | 714 | ||
| @@ -713,14 +717,14 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target | |||
| 713 | LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}", | 717 | LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}", |
| 714 | condition_variable_addr, target); | 718 | condition_variable_addr, target); |
| 715 | 719 | ||
| 716 | auto RetrieveWaitingThreads = [](std::size_t core_index, | 720 | const auto RetrieveWaitingThreads = [](std::size_t core_index, |
| 717 | std::vector<SharedPtr<Thread>>& waiting_threads, | 721 | std::vector<SharedPtr<Thread>>& waiting_threads, |
| 718 | VAddr condvar_addr) { | 722 | VAddr condvar_addr) { |
| 719 | const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); | 723 | const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); |
| 720 | auto& thread_list = scheduler->GetThreadList(); | 724 | const auto& thread_list = scheduler->GetThreadList(); |
| 721 | 725 | ||
| 722 | for (auto& thread : thread_list) { | 726 | for (const auto& thread : thread_list) { |
| 723 | if (thread->condvar_wait_address == condvar_addr) | 727 | if (thread->GetCondVarWaitAddress() == condvar_addr) |
| 724 | waiting_threads.push_back(thread); | 728 | waiting_threads.push_back(thread); |
| 725 | } | 729 | } |
| 726 | }; | 730 | }; |
| @@ -734,7 +738,7 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target | |||
| 734 | // Sort them by priority, such that the highest priority ones come first. | 738 | // Sort them by priority, such that the highest priority ones come first. |
| 735 | std::sort(waiting_threads.begin(), waiting_threads.end(), | 739 | std::sort(waiting_threads.begin(), waiting_threads.end(), |
| 736 | [](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) { | 740 | [](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) { |
| 737 | return lhs->current_priority < rhs->current_priority; | 741 | return lhs->GetPriority() < rhs->GetPriority(); |
| 738 | }); | 742 | }); |
| 739 | 743 | ||
| 740 | // Only process up to 'target' threads, unless 'target' is -1, in which case process | 744 | // Only process up to 'target' threads, unless 'target' is -1, in which case process |
| @@ -750,7 +754,7 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target | |||
| 750 | for (std::size_t index = 0; index < last; ++index) { | 754 | for (std::size_t index = 0; index < last; ++index) { |
| 751 | auto& thread = waiting_threads[index]; | 755 | auto& thread = waiting_threads[index]; |
| 752 | 756 | ||
| 753 | ASSERT(thread->condvar_wait_address == condition_variable_addr); | 757 | ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr); |
| 754 | 758 | ||
| 755 | std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex(); | 759 | std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex(); |
| 756 | 760 | ||
| @@ -759,42 +763,43 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target | |||
| 759 | // Atomically read the value of the mutex. | 763 | // Atomically read the value of the mutex. |
| 760 | u32 mutex_val = 0; | 764 | u32 mutex_val = 0; |
| 761 | do { | 765 | do { |
| 762 | monitor.SetExclusive(current_core, thread->mutex_wait_address); | 766 | monitor.SetExclusive(current_core, thread->GetMutexWaitAddress()); |
| 763 | 767 | ||
| 764 | // If the mutex is not yet acquired, acquire it. | 768 | // If the mutex is not yet acquired, acquire it. |
| 765 | mutex_val = Memory::Read32(thread->mutex_wait_address); | 769 | mutex_val = Memory::Read32(thread->GetMutexWaitAddress()); |
| 766 | 770 | ||
| 767 | if (mutex_val != 0) { | 771 | if (mutex_val != 0) { |
| 768 | monitor.ClearExclusive(); | 772 | monitor.ClearExclusive(); |
| 769 | break; | 773 | break; |
| 770 | } | 774 | } |
| 771 | } while (!monitor.ExclusiveWrite32(current_core, thread->mutex_wait_address, | 775 | } while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(), |
| 772 | thread->wait_handle)); | 776 | thread->GetWaitHandle())); |
| 773 | 777 | ||
| 774 | if (mutex_val == 0) { | 778 | if (mutex_val == 0) { |
| 775 | // We were able to acquire the mutex, resume this thread. | 779 | // We were able to acquire the mutex, resume this thread. |
| 776 | ASSERT(thread->status == ThreadStatus::WaitMutex); | 780 | ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); |
| 777 | thread->ResumeFromWait(); | 781 | thread->ResumeFromWait(); |
| 778 | 782 | ||
| 779 | auto lock_owner = thread->lock_owner; | 783 | auto* const lock_owner = thread->GetLockOwner(); |
| 780 | if (lock_owner) | 784 | if (lock_owner != nullptr) { |
| 781 | lock_owner->RemoveMutexWaiter(thread); | 785 | lock_owner->RemoveMutexWaiter(thread); |
| 786 | } | ||
| 782 | 787 | ||
| 783 | thread->lock_owner = nullptr; | 788 | thread->SetLockOwner(nullptr); |
| 784 | thread->mutex_wait_address = 0; | 789 | thread->SetMutexWaitAddress(0); |
| 785 | thread->condvar_wait_address = 0; | 790 | thread->SetCondVarWaitAddress(0); |
| 786 | thread->wait_handle = 0; | 791 | thread->SetWaitHandle(0); |
| 787 | } else { | 792 | } else { |
| 788 | // Atomically signal that the mutex now has a waiting thread. | 793 | // Atomically signal that the mutex now has a waiting thread. |
| 789 | do { | 794 | do { |
| 790 | monitor.SetExclusive(current_core, thread->mutex_wait_address); | 795 | monitor.SetExclusive(current_core, thread->GetMutexWaitAddress()); |
| 791 | 796 | ||
| 792 | // Ensure that the mutex value is still what we expect. | 797 | // Ensure that the mutex value is still what we expect. |
| 793 | u32 value = Memory::Read32(thread->mutex_wait_address); | 798 | u32 value = Memory::Read32(thread->GetMutexWaitAddress()); |
| 794 | // TODO(Subv): When this happens, the kernel just clears the exclusive state and | 799 | // TODO(Subv): When this happens, the kernel just clears the exclusive state and |
| 795 | // retries the initial read for this thread. | 800 | // retries the initial read for this thread. |
| 796 | ASSERT_MSG(mutex_val == value, "Unhandled synchronization primitive case"); | 801 | ASSERT_MSG(mutex_val == value, "Unhandled synchronization primitive case"); |
| 797 | } while (!monitor.ExclusiveWrite32(current_core, thread->mutex_wait_address, | 802 | } while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(), |
| 798 | mutex_val | Mutex::MutexHasWaitersFlag)); | 803 | mutex_val | Mutex::MutexHasWaitersFlag)); |
| 799 | 804 | ||
| 800 | // The mutex is already owned by some other thread, make this thread wait on it. | 805 | // The mutex is already owned by some other thread, make this thread wait on it. |
| @@ -802,12 +807,12 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target | |||
| 802 | Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask); | 807 | Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask); |
| 803 | auto owner = kernel.HandleTable().Get<Thread>(owner_handle); | 808 | auto owner = kernel.HandleTable().Get<Thread>(owner_handle); |
| 804 | ASSERT(owner); | 809 | ASSERT(owner); |
| 805 | ASSERT(thread->status == ThreadStatus::WaitMutex); | 810 | ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); |
| 806 | thread->wakeup_callback = nullptr; | 811 | thread->InvalidateWakeupCallback(); |
| 807 | 812 | ||
| 808 | owner->AddMutexWaiter(thread); | 813 | owner->AddMutexWaiter(thread); |
| 809 | 814 | ||
| 810 | Core::System::GetInstance().CpuCore(thread->processor_id).PrepareReschedule(); | 815 | Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule(); |
| 811 | } | 816 | } |
| 812 | } | 817 | } |
| 813 | 818 | ||
| @@ -913,8 +918,8 @@ static ResultCode GetThreadCoreMask(Handle thread_handle, u32* core, u64* mask) | |||
| 913 | return ERR_INVALID_HANDLE; | 918 | return ERR_INVALID_HANDLE; |
| 914 | } | 919 | } |
| 915 | 920 | ||
| 916 | *core = thread->ideal_core; | 921 | *core = thread->GetIdealCore(); |
| 917 | *mask = thread->affinity_mask; | 922 | *mask = thread->GetAffinityMask(); |
| 918 | 923 | ||
| 919 | return RESULT_SUCCESS; | 924 | return RESULT_SUCCESS; |
| 920 | } | 925 | } |
| @@ -930,11 +935,13 @@ static ResultCode SetThreadCoreMask(Handle thread_handle, u32 core, u64 mask) { | |||
| 930 | } | 935 | } |
| 931 | 936 | ||
| 932 | if (core == static_cast<u32>(THREADPROCESSORID_DEFAULT)) { | 937 | if (core == static_cast<u32>(THREADPROCESSORID_DEFAULT)) { |
| 933 | ASSERT(thread->owner_process->GetDefaultProcessorID() != | 938 | const u8 default_processor_id = thread->GetOwnerProcess()->GetDefaultProcessorID(); |
| 934 | static_cast<u8>(THREADPROCESSORID_DEFAULT)); | 939 | |
| 940 | ASSERT(default_processor_id != static_cast<u8>(THREADPROCESSORID_DEFAULT)); | ||
| 941 | |||
| 935 | // Set the target CPU to the one specified in the process' exheader. | 942 | // Set the target CPU to the one specified in the process' exheader. |
| 936 | core = thread->owner_process->GetDefaultProcessorID(); | 943 | core = default_processor_id; |
| 937 | mask = 1ull << core; | 944 | mask = 1ULL << core; |
| 938 | } | 945 | } |
| 939 | 946 | ||
| 940 | if (mask == 0) { | 947 | if (mask == 0) { |
| @@ -945,7 +952,7 @@ static ResultCode SetThreadCoreMask(Handle thread_handle, u32 core, u64 mask) { | |||
| 945 | static constexpr u32 OnlyChangeMask = static_cast<u32>(-3); | 952 | static constexpr u32 OnlyChangeMask = static_cast<u32>(-3); |
| 946 | 953 | ||
| 947 | if (core == OnlyChangeMask) { | 954 | if (core == OnlyChangeMask) { |
| 948 | core = thread->ideal_core; | 955 | core = thread->GetIdealCore(); |
| 949 | } else if (core >= Core::NUM_CPU_CORES && core != static_cast<u32>(-1)) { | 956 | } else if (core >= Core::NUM_CPU_CORES && core != static_cast<u32>(-1)) { |
| 950 | return ResultCode(ErrorModule::Kernel, ErrCodes::InvalidProcessorId); | 957 | return ResultCode(ErrorModule::Kernel, ErrCodes::InvalidProcessorId); |
| 951 | } | 958 | } |
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index b5c16cfbb..354043c53 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -70,7 +70,7 @@ void Thread::Stop() { | |||
| 70 | 70 | ||
| 71 | void WaitCurrentThread_Sleep() { | 71 | void WaitCurrentThread_Sleep() { |
| 72 | Thread* thread = GetCurrentThread(); | 72 | Thread* thread = GetCurrentThread(); |
| 73 | thread->status = ThreadStatus::WaitSleep; | 73 | thread->SetStatus(ThreadStatus::WaitSleep); |
| 74 | } | 74 | } |
| 75 | 75 | ||
| 76 | void ExitCurrentThread() { | 76 | void ExitCurrentThread() { |
| @@ -269,9 +269,9 @@ SharedPtr<Thread> SetupMainThread(KernelCore& kernel, VAddr entry_point, u32 pri | |||
| 269 | SharedPtr<Thread> thread = std::move(thread_res).Unwrap(); | 269 | SharedPtr<Thread> thread = std::move(thread_res).Unwrap(); |
| 270 | 270 | ||
| 271 | // Register 1 must be a handle to the main thread | 271 | // Register 1 must be a handle to the main thread |
| 272 | thread->guest_handle = kernel.HandleTable().Create(thread).Unwrap(); | 272 | const Handle guest_handle = kernel.HandleTable().Create(thread).Unwrap(); |
| 273 | 273 | thread->SetGuestHandle(guest_handle); | |
| 274 | thread->context.cpu_registers[1] = thread->guest_handle; | 274 | thread->GetContext().cpu_registers[1] = guest_handle; |
| 275 | 275 | ||
| 276 | // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires | 276 | // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires |
| 277 | thread->ResumeFromWait(); | 277 | thread->ResumeFromWait(); |
| @@ -299,6 +299,18 @@ VAddr Thread::GetCommandBufferAddress() const { | |||
| 299 | return GetTLSAddress() + CommandHeaderOffset; | 299 | return GetTLSAddress() + CommandHeaderOffset; |
| 300 | } | 300 | } |
| 301 | 301 | ||
| 302 | void Thread::SetStatus(ThreadStatus new_status) { | ||
| 303 | if (new_status == status) { | ||
| 304 | return; | ||
| 305 | } | ||
| 306 | |||
| 307 | if (status == ThreadStatus::Running) { | ||
| 308 | last_running_ticks = CoreTiming::GetTicks(); | ||
| 309 | } | ||
| 310 | |||
| 311 | status = new_status; | ||
| 312 | } | ||
| 313 | |||
| 302 | void Thread::AddMutexWaiter(SharedPtr<Thread> thread) { | 314 | void Thread::AddMutexWaiter(SharedPtr<Thread> thread) { |
| 303 | if (thread->lock_owner == this) { | 315 | if (thread->lock_owner == this) { |
| 304 | // If the thread is already waiting for this thread to release the mutex, ensure that the | 316 | // If the thread is already waiting for this thread to release the mutex, ensure that the |
| @@ -393,6 +405,18 @@ void Thread::ChangeCore(u32 core, u64 mask) { | |||
| 393 | Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule(); | 405 | Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule(); |
| 394 | } | 406 | } |
| 395 | 407 | ||
| 408 | bool Thread::AllWaitObjectsReady() { | ||
| 409 | return std::none_of( | ||
| 410 | wait_objects.begin(), wait_objects.end(), | ||
| 411 | [this](const SharedPtr<WaitObject>& object) { return object->ShouldWait(this); }); | ||
| 412 | } | ||
| 413 | |||
| 414 | bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread, | ||
| 415 | SharedPtr<WaitObject> object, std::size_t index) { | ||
| 416 | ASSERT(wakeup_callback); | ||
| 417 | return wakeup_callback(reason, std::move(thread), std::move(object), index); | ||
| 418 | } | ||
| 419 | |||
| 396 | //////////////////////////////////////////////////////////////////////////////////////////////////// | 420 | //////////////////////////////////////////////////////////////////////////////////////////////////// |
| 397 | 421 | ||
| 398 | /** | 422 | /** |
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index 4250144c3..d2b191357 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h | |||
| @@ -65,6 +65,15 @@ public: | |||
| 65 | using TLSMemory = std::vector<u8>; | 65 | using TLSMemory = std::vector<u8>; |
| 66 | using TLSMemoryPtr = std::shared_ptr<TLSMemory>; | 66 | using TLSMemoryPtr = std::shared_ptr<TLSMemory>; |
| 67 | 67 | ||
| 68 | using MutexWaitingThreads = std::vector<SharedPtr<Thread>>; | ||
| 69 | |||
| 70 | using ThreadContext = Core::ARM_Interface::ThreadContext; | ||
| 71 | |||
| 72 | using ThreadWaitObjects = std::vector<SharedPtr<WaitObject>>; | ||
| 73 | |||
| 74 | using WakeupCallback = std::function<bool(ThreadWakeupReason reason, SharedPtr<Thread> thread, | ||
| 75 | SharedPtr<WaitObject> object, std::size_t index)>; | ||
| 76 | |||
| 68 | /** | 77 | /** |
| 69 | * Creates and returns a new thread. The new thread is immediately scheduled | 78 | * Creates and returns a new thread. The new thread is immediately scheduled |
| 70 | * @param kernel The kernel instance this thread will be created under. | 79 | * @param kernel The kernel instance this thread will be created under. |
| @@ -106,6 +115,14 @@ public: | |||
| 106 | } | 115 | } |
| 107 | 116 | ||
| 108 | /** | 117 | /** |
| 118 | * Gets the thread's nominal priority. | ||
| 119 | * @return The current thread's nominal priority. | ||
| 120 | */ | ||
| 121 | u32 GetNominalPriority() const { | ||
| 122 | return nominal_priority; | ||
| 123 | } | ||
| 124 | |||
| 125 | /** | ||
| 109 | * Sets the thread's current priority | 126 | * Sets the thread's current priority |
| 110 | * @param priority The new priority | 127 | * @param priority The new priority |
| 111 | */ | 128 | */ |
| @@ -133,7 +150,7 @@ public: | |||
| 133 | * Gets the thread's thread ID | 150 | * Gets the thread's thread ID |
| 134 | * @return The thread's ID | 151 | * @return The thread's ID |
| 135 | */ | 152 | */ |
| 136 | u32 GetThreadId() const { | 153 | u32 GetThreadID() const { |
| 137 | return thread_id; | 154 | return thread_id; |
| 138 | } | 155 | } |
| 139 | 156 | ||
| @@ -203,6 +220,11 @@ public: | |||
| 203 | return tpidr_el0; | 220 | return tpidr_el0; |
| 204 | } | 221 | } |
| 205 | 222 | ||
| 223 | /// Sets the value of the TPIDR_EL0 Read/Write system register for this thread. | ||
| 224 | void SetTPIDR_EL0(u64 value) { | ||
| 225 | tpidr_el0 = value; | ||
| 226 | } | ||
| 227 | |||
| 206 | /* | 228 | /* |
| 207 | * Returns the address of the current thread's command buffer, located in the TLS. | 229 | * Returns the address of the current thread's command buffer, located in the TLS. |
| 208 | * @returns VAddr of the thread's command buffer. | 230 | * @returns VAddr of the thread's command buffer. |
| @@ -218,69 +240,193 @@ public: | |||
| 218 | return status == ThreadStatus::WaitSynchAll; | 240 | return status == ThreadStatus::WaitSynchAll; |
| 219 | } | 241 | } |
| 220 | 242 | ||
| 221 | Core::ARM_Interface::ThreadContext context; | 243 | ThreadContext& GetContext() { |
| 244 | return context; | ||
| 245 | } | ||
| 246 | |||
| 247 | const ThreadContext& GetContext() const { | ||
| 248 | return context; | ||
| 249 | } | ||
| 250 | |||
| 251 | ThreadStatus GetStatus() const { | ||
| 252 | return status; | ||
| 253 | } | ||
| 254 | |||
| 255 | void SetStatus(ThreadStatus new_status); | ||
| 256 | |||
| 257 | u64 GetLastRunningTicks() const { | ||
| 258 | return last_running_ticks; | ||
| 259 | } | ||
| 260 | |||
| 261 | s32 GetProcessorID() const { | ||
| 262 | return processor_id; | ||
| 263 | } | ||
| 264 | |||
| 265 | SharedPtr<Process>& GetOwnerProcess() { | ||
| 266 | return owner_process; | ||
| 267 | } | ||
| 268 | |||
| 269 | const SharedPtr<Process>& GetOwnerProcess() const { | ||
| 270 | return owner_process; | ||
| 271 | } | ||
| 272 | |||
| 273 | const ThreadWaitObjects& GetWaitObjects() const { | ||
| 274 | return wait_objects; | ||
| 275 | } | ||
| 276 | |||
| 277 | void SetWaitObjects(ThreadWaitObjects objects) { | ||
| 278 | wait_objects = std::move(objects); | ||
| 279 | } | ||
| 280 | |||
| 281 | void ClearWaitObjects() { | ||
| 282 | wait_objects.clear(); | ||
| 283 | } | ||
| 284 | |||
| 285 | /// Determines whether all the objects this thread is waiting on are ready. | ||
| 286 | bool AllWaitObjectsReady(); | ||
| 287 | |||
| 288 | const MutexWaitingThreads& GetMutexWaitingThreads() const { | ||
| 289 | return wait_mutex_threads; | ||
| 290 | } | ||
| 291 | |||
| 292 | Thread* GetLockOwner() const { | ||
| 293 | return lock_owner.get(); | ||
| 294 | } | ||
| 295 | |||
| 296 | void SetLockOwner(SharedPtr<Thread> owner) { | ||
| 297 | lock_owner = std::move(owner); | ||
| 298 | } | ||
| 299 | |||
| 300 | VAddr GetCondVarWaitAddress() const { | ||
| 301 | return condvar_wait_address; | ||
| 302 | } | ||
| 303 | |||
| 304 | void SetCondVarWaitAddress(VAddr address) { | ||
| 305 | condvar_wait_address = address; | ||
| 306 | } | ||
| 307 | |||
| 308 | VAddr GetMutexWaitAddress() const { | ||
| 309 | return mutex_wait_address; | ||
| 310 | } | ||
| 222 | 311 | ||
| 223 | u32 thread_id; | 312 | void SetMutexWaitAddress(VAddr address) { |
| 313 | mutex_wait_address = address; | ||
| 314 | } | ||
| 224 | 315 | ||
| 225 | ThreadStatus status; | 316 | Handle GetWaitHandle() const { |
| 226 | VAddr entry_point; | 317 | return wait_handle; |
| 227 | VAddr stack_top; | 318 | } |
| 228 | 319 | ||
| 229 | u32 nominal_priority; ///< Nominal thread priority, as set by the emulated application | 320 | void SetWaitHandle(Handle handle) { |
| 230 | u32 current_priority; ///< Current thread priority, can be temporarily changed | 321 | wait_handle = handle; |
| 322 | } | ||
| 231 | 323 | ||
| 232 | u64 last_running_ticks; ///< CPU tick when thread was last running | 324 | VAddr GetArbiterWaitAddress() const { |
| 325 | return arb_wait_address; | ||
| 326 | } | ||
| 233 | 327 | ||
| 234 | s32 processor_id; | 328 | void SetArbiterWaitAddress(VAddr address) { |
| 329 | arb_wait_address = address; | ||
| 330 | } | ||
| 235 | 331 | ||
| 236 | VAddr tls_address; ///< Virtual address of the Thread Local Storage of the thread | 332 | void SetGuestHandle(Handle handle) { |
| 237 | u64 tpidr_el0; ///< TPIDR_EL0 read/write system register. | 333 | guest_handle = handle; |
| 334 | } | ||
| 238 | 335 | ||
| 239 | SharedPtr<Process> owner_process; ///< Process that owns this thread | 336 | bool HasWakeupCallback() const { |
| 337 | return wakeup_callback != nullptr; | ||
| 338 | } | ||
| 339 | |||
| 340 | void SetWakeupCallback(WakeupCallback callback) { | ||
| 341 | wakeup_callback = std::move(callback); | ||
| 342 | } | ||
| 343 | |||
| 344 | void InvalidateWakeupCallback() { | ||
| 345 | SetWakeupCallback(nullptr); | ||
| 346 | } | ||
| 347 | |||
| 348 | /** | ||
| 349 | * Invokes the thread's wakeup callback. | ||
| 350 | * | ||
| 351 | * @pre A valid wakeup callback has been set. Violating this precondition | ||
| 352 | * will cause an assertion to trigger. | ||
| 353 | */ | ||
| 354 | bool InvokeWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread, | ||
| 355 | SharedPtr<WaitObject> object, std::size_t index); | ||
| 356 | |||
| 357 | u32 GetIdealCore() const { | ||
| 358 | return ideal_core; | ||
| 359 | } | ||
| 360 | |||
| 361 | u64 GetAffinityMask() const { | ||
| 362 | return affinity_mask; | ||
| 363 | } | ||
| 364 | |||
| 365 | private: | ||
| 366 | explicit Thread(KernelCore& kernel); | ||
| 367 | ~Thread() override; | ||
| 368 | |||
| 369 | Core::ARM_Interface::ThreadContext context{}; | ||
| 370 | |||
| 371 | u32 thread_id = 0; | ||
| 372 | |||
| 373 | ThreadStatus status = ThreadStatus::Dormant; | ||
| 374 | |||
| 375 | VAddr entry_point = 0; | ||
| 376 | VAddr stack_top = 0; | ||
| 377 | |||
| 378 | u32 nominal_priority = 0; ///< Nominal thread priority, as set by the emulated application | ||
| 379 | u32 current_priority = 0; ///< Current thread priority, can be temporarily changed | ||
| 380 | |||
| 381 | u64 last_running_ticks = 0; ///< CPU tick when thread was last running | ||
| 382 | |||
| 383 | s32 processor_id = 0; | ||
| 384 | |||
| 385 | VAddr tls_address = 0; ///< Virtual address of the Thread Local Storage of the thread | ||
| 386 | u64 tpidr_el0 = 0; ///< TPIDR_EL0 read/write system register. | ||
| 387 | |||
| 388 | /// Process that owns this thread | ||
| 389 | SharedPtr<Process> owner_process; | ||
| 240 | 390 | ||
| 241 | /// Objects that the thread is waiting on, in the same order as they were | 391 | /// Objects that the thread is waiting on, in the same order as they were |
| 242 | // passed to WaitSynchronization1/N. | 392 | /// passed to WaitSynchronization1/N. |
| 243 | std::vector<SharedPtr<WaitObject>> wait_objects; | 393 | ThreadWaitObjects wait_objects; |
| 244 | 394 | ||
| 245 | /// List of threads that are waiting for a mutex that is held by this thread. | 395 | /// List of threads that are waiting for a mutex that is held by this thread. |
| 246 | std::vector<SharedPtr<Thread>> wait_mutex_threads; | 396 | MutexWaitingThreads wait_mutex_threads; |
| 247 | 397 | ||
| 248 | /// Thread that owns the lock that this thread is waiting for. | 398 | /// Thread that owns the lock that this thread is waiting for. |
| 249 | SharedPtr<Thread> lock_owner; | 399 | SharedPtr<Thread> lock_owner; |
| 250 | 400 | ||
| 251 | // If waiting on a ConditionVariable, this is the ConditionVariable address | 401 | /// If waiting on a ConditionVariable, this is the ConditionVariable address |
| 252 | VAddr condvar_wait_address; | 402 | VAddr condvar_wait_address = 0; |
| 253 | VAddr mutex_wait_address; ///< If waiting on a Mutex, this is the mutex address | 403 | /// If waiting on a Mutex, this is the mutex address |
| 254 | Handle wait_handle; ///< The handle used to wait for the mutex. | 404 | VAddr mutex_wait_address = 0; |
| 405 | /// The handle used to wait for the mutex. | ||
| 406 | Handle wait_handle = 0; | ||
| 255 | 407 | ||
| 256 | // If waiting for an AddressArbiter, this is the address being waited on. | 408 | /// If waiting for an AddressArbiter, this is the address being waited on. |
| 257 | VAddr arb_wait_address{0}; | 409 | VAddr arb_wait_address{0}; |
| 258 | 410 | ||
| 259 | std::string name; | ||
| 260 | |||
| 261 | /// Handle used by guest emulated application to access this thread | 411 | /// Handle used by guest emulated application to access this thread |
| 262 | Handle guest_handle; | 412 | Handle guest_handle = 0; |
| 263 | 413 | ||
| 264 | /// Handle used as userdata to reference this object when inserting into the CoreTiming queue. | 414 | /// Handle used as userdata to reference this object when inserting into the CoreTiming queue. |
| 265 | Handle callback_handle; | 415 | Handle callback_handle = 0; |
| 266 | 416 | ||
| 267 | using WakeupCallback = bool(ThreadWakeupReason reason, SharedPtr<Thread> thread, | 417 | /// Callback that will be invoked when the thread is resumed from a waiting state. If the thread |
| 268 | SharedPtr<WaitObject> object, std::size_t index); | 418 | /// was waiting via WaitSynchronizationN then the object will be the last object that became |
| 269 | // Callback that will be invoked when the thread is resumed from a waiting state. If the thread | 419 | /// available. In case of a timeout, the object will be nullptr. |
| 270 | // was waiting via WaitSynchronizationN then the object will be the last object that became | 420 | WakeupCallback wakeup_callback; |
| 271 | // available. In case of a timeout, the object will be nullptr. | ||
| 272 | std::function<WakeupCallback> wakeup_callback; | ||
| 273 | 421 | ||
| 274 | std::shared_ptr<Scheduler> scheduler; | 422 | std::shared_ptr<Scheduler> scheduler; |
| 275 | 423 | ||
| 276 | u32 ideal_core{0xFFFFFFFF}; | 424 | u32 ideal_core{0xFFFFFFFF}; |
| 277 | u64 affinity_mask{0x1}; | 425 | u64 affinity_mask{0x1}; |
| 278 | 426 | ||
| 279 | private: | ||
| 280 | explicit Thread(KernelCore& kernel); | ||
| 281 | ~Thread() override; | ||
| 282 | |||
| 283 | TLSMemoryPtr tls_memory = std::make_shared<TLSMemory>(); | 427 | TLSMemoryPtr tls_memory = std::make_shared<TLSMemory>(); |
| 428 | |||
| 429 | std::string name; | ||
| 284 | }; | 430 | }; |
| 285 | 431 | ||
| 286 | /** | 432 | /** |
diff --git a/src/core/hle/kernel/wait_object.cpp b/src/core/hle/kernel/wait_object.cpp index b190ceb98..530ee6af7 100644 --- a/src/core/hle/kernel/wait_object.cpp +++ b/src/core/hle/kernel/wait_object.cpp | |||
| @@ -35,13 +35,15 @@ SharedPtr<Thread> WaitObject::GetHighestPriorityReadyThread() { | |||
| 35 | u32 candidate_priority = THREADPRIO_LOWEST + 1; | 35 | u32 candidate_priority = THREADPRIO_LOWEST + 1; |
| 36 | 36 | ||
| 37 | for (const auto& thread : waiting_threads) { | 37 | for (const auto& thread : waiting_threads) { |
| 38 | const ThreadStatus thread_status = thread->GetStatus(); | ||
| 39 | |||
| 38 | // The list of waiting threads must not contain threads that are not waiting to be awakened. | 40 | // The list of waiting threads must not contain threads that are not waiting to be awakened. |
| 39 | ASSERT_MSG(thread->status == ThreadStatus::WaitSynchAny || | 41 | ASSERT_MSG(thread_status == ThreadStatus::WaitSynchAny || |
| 40 | thread->status == ThreadStatus::WaitSynchAll || | 42 | thread_status == ThreadStatus::WaitSynchAll || |
| 41 | thread->status == ThreadStatus::WaitHLEEvent, | 43 | thread_status == ThreadStatus::WaitHLEEvent, |
| 42 | "Inconsistent thread statuses in waiting_threads"); | 44 | "Inconsistent thread statuses in waiting_threads"); |
| 43 | 45 | ||
| 44 | if (thread->current_priority >= candidate_priority) | 46 | if (thread->GetPriority() >= candidate_priority) |
| 45 | continue; | 47 | continue; |
| 46 | 48 | ||
| 47 | if (ShouldWait(thread.get())) | 49 | if (ShouldWait(thread.get())) |
| @@ -50,16 +52,13 @@ SharedPtr<Thread> WaitObject::GetHighestPriorityReadyThread() { | |||
| 50 | // A thread is ready to run if it's either in ThreadStatus::WaitSynchAny or | 52 | // A thread is ready to run if it's either in ThreadStatus::WaitSynchAny or |
| 51 | // in ThreadStatus::WaitSynchAll and the rest of the objects it is waiting on are ready. | 53 | // in ThreadStatus::WaitSynchAll and the rest of the objects it is waiting on are ready. |
| 52 | bool ready_to_run = true; | 54 | bool ready_to_run = true; |
| 53 | if (thread->status == ThreadStatus::WaitSynchAll) { | 55 | if (thread_status == ThreadStatus::WaitSynchAll) { |
| 54 | ready_to_run = std::none_of(thread->wait_objects.begin(), thread->wait_objects.end(), | 56 | ready_to_run = thread->AllWaitObjectsReady(); |
| 55 | [&thread](const SharedPtr<WaitObject>& object) { | ||
| 56 | return object->ShouldWait(thread.get()); | ||
| 57 | }); | ||
| 58 | } | 57 | } |
| 59 | 58 | ||
| 60 | if (ready_to_run) { | 59 | if (ready_to_run) { |
| 61 | candidate = thread.get(); | 60 | candidate = thread.get(); |
| 62 | candidate_priority = thread->current_priority; | 61 | candidate_priority = thread->GetPriority(); |
| 63 | } | 62 | } |
| 64 | } | 63 | } |
| 65 | 64 | ||
| @@ -75,24 +74,24 @@ void WaitObject::WakeupWaitingThread(SharedPtr<Thread> thread) { | |||
| 75 | if (!thread->IsSleepingOnWaitAll()) { | 74 | if (!thread->IsSleepingOnWaitAll()) { |
| 76 | Acquire(thread.get()); | 75 | Acquire(thread.get()); |
| 77 | } else { | 76 | } else { |
| 78 | for (auto& object : thread->wait_objects) { | 77 | for (const auto& object : thread->GetWaitObjects()) { |
| 79 | ASSERT(!object->ShouldWait(thread.get())); | 78 | ASSERT(!object->ShouldWait(thread.get())); |
| 80 | object->Acquire(thread.get()); | 79 | object->Acquire(thread.get()); |
| 81 | } | 80 | } |
| 82 | } | 81 | } |
| 83 | 82 | ||
| 84 | std::size_t index = thread->GetWaitObjectIndex(this); | 83 | const std::size_t index = thread->GetWaitObjectIndex(this); |
| 85 | 84 | ||
| 86 | for (auto& object : thread->wait_objects) | 85 | for (const auto& object : thread->GetWaitObjects()) |
| 87 | object->RemoveWaitingThread(thread.get()); | 86 | object->RemoveWaitingThread(thread.get()); |
| 88 | thread->wait_objects.clear(); | 87 | thread->ClearWaitObjects(); |
| 89 | 88 | ||
| 90 | thread->CancelWakeupTimer(); | 89 | thread->CancelWakeupTimer(); |
| 91 | 90 | ||
| 92 | bool resume = true; | 91 | bool resume = true; |
| 93 | 92 | ||
| 94 | if (thread->wakeup_callback) | 93 | if (thread->HasWakeupCallback()) |
| 95 | resume = thread->wakeup_callback(ThreadWakeupReason::Signal, thread, this, index); | 94 | resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Signal, thread, this, index); |
| 96 | 95 | ||
| 97 | if (resume) | 96 | if (resume) |
| 98 | thread->ResumeFromWait(); | 97 | thread->ResumeFromWait(); |
diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp index a3b1fd357..4a09da685 100644 --- a/src/yuzu/debugger/wait_tree.cpp +++ b/src/yuzu/debugger/wait_tree.cpp | |||
| @@ -119,7 +119,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() cons | |||
| 119 | std::vector<std::unique_ptr<WaitTreeItem>> list; | 119 | std::vector<std::unique_ptr<WaitTreeItem>> list; |
| 120 | 120 | ||
| 121 | constexpr std::size_t BaseRegister = 29; | 121 | constexpr std::size_t BaseRegister = 29; |
| 122 | u64 base_pointer = thread.context.cpu_registers[BaseRegister]; | 122 | u64 base_pointer = thread.GetContext().cpu_registers[BaseRegister]; |
| 123 | 123 | ||
| 124 | while (base_pointer != 0) { | 124 | while (base_pointer != 0) { |
| 125 | u64 lr = Memory::Read64(base_pointer + sizeof(u64)); | 125 | u64 lr = Memory::Read64(base_pointer + sizeof(u64)); |
| @@ -213,7 +213,7 @@ WaitTreeThread::~WaitTreeThread() = default; | |||
| 213 | QString WaitTreeThread::GetText() const { | 213 | QString WaitTreeThread::GetText() const { |
| 214 | const auto& thread = static_cast<const Kernel::Thread&>(object); | 214 | const auto& thread = static_cast<const Kernel::Thread&>(object); |
| 215 | QString status; | 215 | QString status; |
| 216 | switch (thread.status) { | 216 | switch (thread.GetStatus()) { |
| 217 | case Kernel::ThreadStatus::Running: | 217 | case Kernel::ThreadStatus::Running: |
| 218 | status = tr("running"); | 218 | status = tr("running"); |
| 219 | break; | 219 | break; |
| @@ -246,15 +246,17 @@ QString WaitTreeThread::GetText() const { | |||
| 246 | status = tr("dead"); | 246 | status = tr("dead"); |
| 247 | break; | 247 | break; |
| 248 | } | 248 | } |
| 249 | QString pc_info = tr(" PC = 0x%1 LR = 0x%2") | 249 | |
| 250 | .arg(thread.context.pc, 8, 16, QLatin1Char('0')) | 250 | const auto& context = thread.GetContext(); |
| 251 | .arg(thread.context.cpu_registers[30], 8, 16, QLatin1Char('0')); | 251 | const QString pc_info = tr(" PC = 0x%1 LR = 0x%2") |
| 252 | .arg(context.pc, 8, 16, QLatin1Char('0')) | ||
| 253 | .arg(context.cpu_registers[30], 8, 16, QLatin1Char('0')); | ||
| 252 | return WaitTreeWaitObject::GetText() + pc_info + " (" + status + ") "; | 254 | return WaitTreeWaitObject::GetText() + pc_info + " (" + status + ") "; |
| 253 | } | 255 | } |
| 254 | 256 | ||
| 255 | QColor WaitTreeThread::GetColor() const { | 257 | QColor WaitTreeThread::GetColor() const { |
| 256 | const auto& thread = static_cast<const Kernel::Thread&>(object); | 258 | const auto& thread = static_cast<const Kernel::Thread&>(object); |
| 257 | switch (thread.status) { | 259 | switch (thread.GetStatus()) { |
| 258 | case Kernel::ThreadStatus::Running: | 260 | case Kernel::ThreadStatus::Running: |
| 259 | return QColor(Qt::GlobalColor::darkGreen); | 261 | return QColor(Qt::GlobalColor::darkGreen); |
| 260 | case Kernel::ThreadStatus::Ready: | 262 | case Kernel::ThreadStatus::Ready: |
| @@ -284,7 +286,7 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const { | |||
| 284 | const auto& thread = static_cast<const Kernel::Thread&>(object); | 286 | const auto& thread = static_cast<const Kernel::Thread&>(object); |
| 285 | 287 | ||
| 286 | QString processor; | 288 | QString processor; |
| 287 | switch (thread.processor_id) { | 289 | switch (thread.GetProcessorID()) { |
| 288 | case Kernel::ThreadProcessorId::THREADPROCESSORID_DEFAULT: | 290 | case Kernel::ThreadProcessorId::THREADPROCESSORID_DEFAULT: |
| 289 | processor = tr("default"); | 291 | processor = tr("default"); |
| 290 | break; | 292 | break; |
| @@ -292,32 +294,35 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const { | |||
| 292 | case Kernel::ThreadProcessorId::THREADPROCESSORID_1: | 294 | case Kernel::ThreadProcessorId::THREADPROCESSORID_1: |
| 293 | case Kernel::ThreadProcessorId::THREADPROCESSORID_2: | 295 | case Kernel::ThreadProcessorId::THREADPROCESSORID_2: |
| 294 | case Kernel::ThreadProcessorId::THREADPROCESSORID_3: | 296 | case Kernel::ThreadProcessorId::THREADPROCESSORID_3: |
| 295 | processor = tr("core %1").arg(thread.processor_id); | 297 | processor = tr("core %1").arg(thread.GetProcessorID()); |
| 296 | break; | 298 | break; |
| 297 | default: | 299 | default: |
| 298 | processor = tr("Unknown processor %1").arg(thread.processor_id); | 300 | processor = tr("Unknown processor %1").arg(thread.GetProcessorID()); |
| 299 | break; | 301 | break; |
| 300 | } | 302 | } |
| 301 | 303 | ||
| 302 | list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor))); | 304 | list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor))); |
| 303 | list.push_back(std::make_unique<WaitTreeText>(tr("ideal core = %1").arg(thread.ideal_core))); | ||
| 304 | list.push_back( | 305 | list.push_back( |
| 305 | std::make_unique<WaitTreeText>(tr("affinity mask = %1").arg(thread.affinity_mask))); | 306 | std::make_unique<WaitTreeText>(tr("ideal core = %1").arg(thread.GetIdealCore()))); |
| 306 | list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadId()))); | 307 | list.push_back( |
| 308 | std::make_unique<WaitTreeText>(tr("affinity mask = %1").arg(thread.GetAffinityMask()))); | ||
| 309 | list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID()))); | ||
| 307 | list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)") | 310 | list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)") |
| 308 | .arg(thread.current_priority) | 311 | .arg(thread.GetPriority()) |
| 309 | .arg(thread.nominal_priority))); | 312 | .arg(thread.GetNominalPriority()))); |
| 310 | list.push_back(std::make_unique<WaitTreeText>( | 313 | list.push_back(std::make_unique<WaitTreeText>( |
| 311 | tr("last running ticks = %1").arg(thread.last_running_ticks))); | 314 | tr("last running ticks = %1").arg(thread.GetLastRunningTicks()))); |
| 312 | 315 | ||
| 313 | if (thread.mutex_wait_address != 0) | 316 | const VAddr mutex_wait_address = thread.GetMutexWaitAddress(); |
| 314 | list.push_back(std::make_unique<WaitTreeMutexInfo>(thread.mutex_wait_address)); | 317 | if (mutex_wait_address != 0) { |
| 315 | else | 318 | list.push_back(std::make_unique<WaitTreeMutexInfo>(mutex_wait_address)); |
| 319 | } else { | ||
| 316 | list.push_back(std::make_unique<WaitTreeText>(tr("not waiting for mutex"))); | 320 | list.push_back(std::make_unique<WaitTreeText>(tr("not waiting for mutex"))); |
| 321 | } | ||
| 317 | 322 | ||
| 318 | if (thread.status == Kernel::ThreadStatus::WaitSynchAny || | 323 | if (thread.GetStatus() == Kernel::ThreadStatus::WaitSynchAny || |
| 319 | thread.status == Kernel::ThreadStatus::WaitSynchAll) { | 324 | thread.GetStatus() == Kernel::ThreadStatus::WaitSynchAll) { |
| 320 | list.push_back(std::make_unique<WaitTreeObjectList>(thread.wait_objects, | 325 | list.push_back(std::make_unique<WaitTreeObjectList>(thread.GetWaitObjects(), |
| 321 | thread.IsSleepingOnWaitAll())); | 326 | thread.IsSleepingOnWaitAll())); |
| 322 | } | 327 | } |
| 323 | 328 | ||