diff options
| author | 2023-03-07 16:11:50 -0500 | |
|---|---|---|
| committer | 2023-03-12 22:09:09 -0400 | |
| commit | 6bfb4c8f713323bb39b7e38a779c35583fc61bcc (patch) | |
| tree | 6208bf4bbd1c303811384c8fe3d600560a4d3bfe /src/core | |
| parent | kernel: prefer std::addressof (diff) | |
| download | yuzu-6bfb4c8f713323bb39b7e38a779c35583fc61bcc.tar.gz yuzu-6bfb4c8f713323bb39b7e38a779c35583fc61bcc.tar.xz yuzu-6bfb4c8f713323bb39b7e38a779c35583fc61bcc.zip | |
kernel: convert KThread to new style
Diffstat (limited to 'src/core')
| -rw-r--r-- | src/core/debugger/gdbstub.cpp | 12 | ||||
| -rw-r--r-- | src/core/debugger/gdbstub_arch.cpp | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_client_session.cpp | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_condition_variable.cpp | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_condition_variable.h | 8 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_process.cpp | 1 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 16 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_server_session.cpp | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_thread.cpp | 483 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_thread.h | 544 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 3 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc/svc_thread.cpp | 5 | ||||
| -rw-r--r-- | src/core/hle/service/hle_ipc.cpp | 2 |
13 files changed, 518 insertions, 568 deletions
diff --git a/src/core/debugger/gdbstub.cpp b/src/core/debugger/gdbstub.cpp index 18afe97e1..f39f2ca29 100644 --- a/src/core/debugger/gdbstub.cpp +++ b/src/core/debugger/gdbstub.cpp | |||
| @@ -421,7 +421,7 @@ void GDBStub::HandleBreakpointRemove(std::string_view command) { | |||
| 421 | static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& memory, | 421 | static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& memory, |
| 422 | const Kernel::KThread* thread) { | 422 | const Kernel::KThread* thread) { |
| 423 | // Read thread type from TLS | 423 | // Read thread type from TLS |
| 424 | const VAddr tls_thread_type{memory.Read32(thread->GetTLSAddress() + 0x1fc)}; | 424 | const VAddr tls_thread_type{memory.Read32(thread->GetTlsAddress() + 0x1fc)}; |
| 425 | const VAddr argument_thread_type{thread->GetArgument()}; | 425 | const VAddr argument_thread_type{thread->GetArgument()}; |
| 426 | 426 | ||
| 427 | if (argument_thread_type && tls_thread_type != argument_thread_type) { | 427 | if (argument_thread_type && tls_thread_type != argument_thread_type) { |
| @@ -452,7 +452,7 @@ static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& | |||
| 452 | static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory& memory, | 452 | static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory& memory, |
| 453 | const Kernel::KThread* thread) { | 453 | const Kernel::KThread* thread) { |
| 454 | // Read thread type from TLS | 454 | // Read thread type from TLS |
| 455 | const VAddr tls_thread_type{memory.Read64(thread->GetTLSAddress() + 0x1f8)}; | 455 | const VAddr tls_thread_type{memory.Read64(thread->GetTlsAddress() + 0x1f8)}; |
| 456 | const VAddr argument_thread_type{thread->GetArgument()}; | 456 | const VAddr argument_thread_type{thread->GetArgument()}; |
| 457 | 457 | ||
| 458 | if (argument_thread_type && tls_thread_type != argument_thread_type) { | 458 | if (argument_thread_type && tls_thread_type != argument_thread_type) { |
| @@ -576,7 +576,7 @@ void GDBStub::HandleQuery(std::string_view command) { | |||
| 576 | const auto& threads = system.ApplicationProcess()->GetThreadList(); | 576 | const auto& threads = system.ApplicationProcess()->GetThreadList(); |
| 577 | std::vector<std::string> thread_ids; | 577 | std::vector<std::string> thread_ids; |
| 578 | for (const auto& thread : threads) { | 578 | for (const auto& thread : threads) { |
| 579 | thread_ids.push_back(fmt::format("{:x}", thread->GetThreadID())); | 579 | thread_ids.push_back(fmt::format("{:x}", thread->GetThreadId())); |
| 580 | } | 580 | } |
| 581 | SendReply(fmt::format("m{}", fmt::join(thread_ids, ","))); | 581 | SendReply(fmt::format("m{}", fmt::join(thread_ids, ","))); |
| 582 | } else if (command.starts_with("sThreadInfo")) { | 582 | } else if (command.starts_with("sThreadInfo")) { |
| @@ -591,11 +591,11 @@ void GDBStub::HandleQuery(std::string_view command) { | |||
| 591 | for (const auto* thread : threads) { | 591 | for (const auto* thread : threads) { |
| 592 | auto thread_name{GetThreadName(system, thread)}; | 592 | auto thread_name{GetThreadName(system, thread)}; |
| 593 | if (!thread_name) { | 593 | if (!thread_name) { |
| 594 | thread_name = fmt::format("Thread {:d}", thread->GetThreadID()); | 594 | thread_name = fmt::format("Thread {:d}", thread->GetThreadId()); |
| 595 | } | 595 | } |
| 596 | 596 | ||
| 597 | buffer += fmt::format(R"(<thread id="{:x}" core="{:d}" name="{}">{}</thread>)", | 597 | buffer += fmt::format(R"(<thread id="{:x}" core="{:d}" name="{}">{}</thread>)", |
| 598 | thread->GetThreadID(), thread->GetActiveCore(), | 598 | thread->GetThreadId(), thread->GetActiveCore(), |
| 599 | EscapeXML(*thread_name), GetThreadState(thread)); | 599 | EscapeXML(*thread_name), GetThreadState(thread)); |
| 600 | } | 600 | } |
| 601 | 601 | ||
| @@ -819,7 +819,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) { | |||
| 819 | Kernel::KThread* GDBStub::GetThreadByID(u64 thread_id) { | 819 | Kernel::KThread* GDBStub::GetThreadByID(u64 thread_id) { |
| 820 | const auto& threads{system.ApplicationProcess()->GetThreadList()}; | 820 | const auto& threads{system.ApplicationProcess()->GetThreadList()}; |
| 821 | for (auto* thread : threads) { | 821 | for (auto* thread : threads) { |
| 822 | if (thread->GetThreadID() == thread_id) { | 822 | if (thread->GetThreadId() == thread_id) { |
| 823 | return thread; | 823 | return thread; |
| 824 | } | 824 | } |
| 825 | } | 825 | } |
diff --git a/src/core/debugger/gdbstub_arch.cpp b/src/core/debugger/gdbstub_arch.cpp index 831c48513..75c94a91a 100644 --- a/src/core/debugger/gdbstub_arch.cpp +++ b/src/core/debugger/gdbstub_arch.cpp | |||
| @@ -259,7 +259,7 @@ void GDBStubA64::WriteRegisters(Kernel::KThread* thread, std::string_view regist | |||
| 259 | std::string GDBStubA64::ThreadStatus(const Kernel::KThread* thread, u8 signal) const { | 259 | std::string GDBStubA64::ThreadStatus(const Kernel::KThread* thread, u8 signal) const { |
| 260 | return fmt::format("T{:02x}{:02x}:{};{:02x}:{};{:02x}:{};thread:{:x};", signal, PC_REGISTER, | 260 | return fmt::format("T{:02x}{:02x}:{};{:02x}:{};{:02x}:{};thread:{:x};", signal, PC_REGISTER, |
| 261 | RegRead(thread, PC_REGISTER), SP_REGISTER, RegRead(thread, SP_REGISTER), | 261 | RegRead(thread, PC_REGISTER), SP_REGISTER, RegRead(thread, SP_REGISTER), |
| 262 | LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadID()); | 262 | LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadId()); |
| 263 | } | 263 | } |
| 264 | 264 | ||
| 265 | u32 GDBStubA64::BreakpointInstruction() const { | 265 | u32 GDBStubA64::BreakpointInstruction() const { |
| @@ -469,7 +469,7 @@ void GDBStubA32::WriteRegisters(Kernel::KThread* thread, std::string_view regist | |||
| 469 | std::string GDBStubA32::ThreadStatus(const Kernel::KThread* thread, u8 signal) const { | 469 | std::string GDBStubA32::ThreadStatus(const Kernel::KThread* thread, u8 signal) const { |
| 470 | return fmt::format("T{:02x}{:02x}:{};{:02x}:{};{:02x}:{};thread:{:x};", signal, PC_REGISTER, | 470 | return fmt::format("T{:02x}{:02x}:{};{:02x}:{};{:02x}:{};thread:{:x};", signal, PC_REGISTER, |
| 471 | RegRead(thread, PC_REGISTER), SP_REGISTER, RegRead(thread, SP_REGISTER), | 471 | RegRead(thread, PC_REGISTER), SP_REGISTER, RegRead(thread, SP_REGISTER), |
| 472 | LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadID()); | 472 | LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadId()); |
| 473 | } | 473 | } |
| 474 | 474 | ||
| 475 | u32 GDBStubA32::BreakpointInstruction() const { | 475 | u32 GDBStubA32::BreakpointInstruction() const { |
diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp index 62a8fab45..d998b2be2 100644 --- a/src/core/hle/kernel/k_client_session.cpp +++ b/src/core/hle/kernel/k_client_session.cpp | |||
| @@ -29,7 +29,7 @@ Result KClientSession::SendSyncRequest() { | |||
| 29 | SCOPE_EXIT({ request->Close(); }); | 29 | SCOPE_EXIT({ request->Close(); }); |
| 30 | 30 | ||
| 31 | // Initialize the request. | 31 | // Initialize the request. |
| 32 | request->Initialize(nullptr, GetCurrentThread(m_kernel).GetTLSAddress(), MessageBufferSize); | 32 | request->Initialize(nullptr, GetCurrentThread(m_kernel).GetTlsAddress(), MessageBufferSize); |
| 33 | 33 | ||
| 34 | // Send the request. | 34 | // Send the request. |
| 35 | R_RETURN(m_parent->GetServerSession().OnRequest(request)); | 35 | R_RETURN(m_parent->GetServerSession().OnRequest(request)); |
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index 067f26fba..58b8609d8 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp | |||
| @@ -177,7 +177,6 @@ Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) | |||
| 177 | // Begin waiting. | 177 | // Begin waiting. |
| 178 | cur_thread->BeginWait(std::addressof(wait_queue)); | 178 | cur_thread->BeginWait(std::addressof(wait_queue)); |
| 179 | cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); | 179 | cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); |
| 180 | cur_thread->SetMutexWaitAddressForDebugging(addr); | ||
| 181 | } | 180 | } |
| 182 | 181 | ||
| 183 | // Close our reference to the owner thread, now that the wait is over. | 182 | // Close our reference to the owner thread, now that the wait is over. |
| @@ -324,7 +323,6 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { | |||
| 324 | wait_queue.SetHardwareTimer(timer); | 323 | wait_queue.SetHardwareTimer(timer); |
| 325 | cur_thread->BeginWait(std::addressof(wait_queue)); | 324 | cur_thread->BeginWait(std::addressof(wait_queue)); |
| 326 | cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); | 325 | cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); |
| 327 | cur_thread->SetMutexWaitAddressForDebugging(addr); | ||
| 328 | } | 326 | } |
| 329 | 327 | ||
| 330 | // Get the wait result. | 328 | // Get the wait result. |
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h index 41635a894..fbd2c1fc0 100644 --- a/src/core/hle/kernel/k_condition_variable.h +++ b/src/core/hle/kernel/k_condition_variable.h | |||
| @@ -41,16 +41,16 @@ private: | |||
| 41 | ThreadTree m_tree{}; | 41 | ThreadTree m_tree{}; |
| 42 | }; | 42 | }; |
| 43 | 43 | ||
| 44 | inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree, | 44 | inline void BeforeUpdatePriority(KernelCore& kernel, KConditionVariable::ThreadTree* tree, |
| 45 | KThread* thread) { | 45 | KThread* thread) { |
| 46 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | 46 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); |
| 47 | 47 | ||
| 48 | tree->erase(tree->iterator_to(*thread)); | 48 | tree->erase(tree->iterator_to(*thread)); |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree, | 51 | inline void AfterUpdatePriority(KernelCore& kernel, KConditionVariable::ThreadTree* tree, |
| 52 | KThread* thread) { | 52 | KThread* thread) { |
| 53 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | 53 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); |
| 54 | 54 | ||
| 55 | tree->insert(*thread); | 55 | tree->insert(*thread); |
| 56 | } | 56 | } |
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index b740fb1c3..fa3fc8c1c 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp | |||
| @@ -52,7 +52,6 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority | |||
| 52 | Handle thread_handle{}; | 52 | Handle thread_handle{}; |
| 53 | owner_process.GetHandleTable().Add(std::addressof(thread_handle), thread); | 53 | owner_process.GetHandleTable().Add(std::addressof(thread_handle), thread); |
| 54 | 54 | ||
| 55 | thread->SetName("main"); | ||
| 56 | thread->GetContext32().cpu_registers[0] = 0; | 55 | thread->GetContext32().cpu_registers[0] = 0; |
| 57 | thread->GetContext64().cpu_registers[0] = 0; | 56 | thread->GetContext64().cpu_registers[0] = 0; |
| 58 | thread->GetContext32().cpu_registers[1] = thread_handle; | 57 | thread->GetContext32().cpu_registers[1] = thread_handle; |
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index fe371726c..ecadf2916 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -411,7 +411,7 @@ void KScheduler::ScheduleImpl() { | |||
| 411 | m_switch_cur_thread = cur_thread; | 411 | m_switch_cur_thread = cur_thread; |
| 412 | m_switch_highest_priority_thread = highest_priority_thread; | 412 | m_switch_highest_priority_thread = highest_priority_thread; |
| 413 | m_switch_from_schedule = true; | 413 | m_switch_from_schedule = true; |
| 414 | Common::Fiber::YieldTo(cur_thread->host_context, *m_switch_fiber); | 414 | Common::Fiber::YieldTo(cur_thread->m_host_context, *m_switch_fiber); |
| 415 | 415 | ||
| 416 | // Returning from ScheduleImpl occurs after this thread has been scheduled again. | 416 | // Returning from ScheduleImpl occurs after this thread has been scheduled again. |
| 417 | } | 417 | } |
| @@ -450,7 +450,7 @@ void KScheduler::ScheduleImplFiber() { | |||
| 450 | 450 | ||
| 451 | // We want to try to lock the highest priority thread's context. | 451 | // We want to try to lock the highest priority thread's context. |
| 452 | // Try to take it. | 452 | // Try to take it. |
| 453 | while (!highest_priority_thread->context_guard.try_lock()) { | 453 | while (!highest_priority_thread->m_context_guard.try_lock()) { |
| 454 | // The highest priority thread's context is already locked. | 454 | // The highest priority thread's context is already locked. |
| 455 | // Check if we need scheduling. If we don't, we can retry directly. | 455 | // Check if we need scheduling. If we don't, we can retry directly. |
| 456 | if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { | 456 | if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { |
| @@ -468,7 +468,7 @@ void KScheduler::ScheduleImplFiber() { | |||
| 468 | if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { | 468 | if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { |
| 469 | // Our switch failed. | 469 | // Our switch failed. |
| 470 | // We should unlock the thread context, and then retry. | 470 | // We should unlock the thread context, and then retry. |
| 471 | highest_priority_thread->context_guard.unlock(); | 471 | highest_priority_thread->m_context_guard.unlock(); |
| 472 | goto retry; | 472 | goto retry; |
| 473 | } else { | 473 | } else { |
| 474 | break; | 474 | break; |
| @@ -489,7 +489,7 @@ void KScheduler::ScheduleImplFiber() { | |||
| 489 | Reload(highest_priority_thread); | 489 | Reload(highest_priority_thread); |
| 490 | 490 | ||
| 491 | // Reload the host thread. | 491 | // Reload the host thread. |
| 492 | Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->host_context); | 492 | Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->m_host_context); |
| 493 | } | 493 | } |
| 494 | 494 | ||
| 495 | void KScheduler::Unload(KThread* thread) { | 495 | void KScheduler::Unload(KThread* thread) { |
| @@ -497,13 +497,13 @@ void KScheduler::Unload(KThread* thread) { | |||
| 497 | cpu_core.SaveContext(thread->GetContext32()); | 497 | cpu_core.SaveContext(thread->GetContext32()); |
| 498 | cpu_core.SaveContext(thread->GetContext64()); | 498 | cpu_core.SaveContext(thread->GetContext64()); |
| 499 | // Save the TPIDR_EL0 system register in case it was modified. | 499 | // Save the TPIDR_EL0 system register in case it was modified. |
| 500 | thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | 500 | thread->SetTpidrEl0(cpu_core.GetTPIDR_EL0()); |
| 501 | cpu_core.ClearExclusiveState(); | 501 | cpu_core.ClearExclusiveState(); |
| 502 | 502 | ||
| 503 | // Check if the thread is terminated by checking the DPC flags. | 503 | // Check if the thread is terminated by checking the DPC flags. |
| 504 | if ((thread->GetStackParameters().dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) { | 504 | if ((thread->GetStackParameters().dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) { |
| 505 | // The thread isn't terminated, so we want to unlock it. | 505 | // The thread isn't terminated, so we want to unlock it. |
| 506 | thread->context_guard.unlock(); | 506 | thread->m_context_guard.unlock(); |
| 507 | } | 507 | } |
| 508 | } | 508 | } |
| 509 | 509 | ||
| @@ -511,8 +511,8 @@ void KScheduler::Reload(KThread* thread) { | |||
| 511 | auto& cpu_core = m_kernel.System().ArmInterface(m_core_id); | 511 | auto& cpu_core = m_kernel.System().ArmInterface(m_core_id); |
| 512 | cpu_core.LoadContext(thread->GetContext32()); | 512 | cpu_core.LoadContext(thread->GetContext32()); |
| 513 | cpu_core.LoadContext(thread->GetContext64()); | 513 | cpu_core.LoadContext(thread->GetContext64()); |
| 514 | cpu_core.SetTlsAddress(thread->GetTLSAddress()); | 514 | cpu_core.SetTlsAddress(thread->GetTlsAddress()); |
| 515 | cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); | 515 | cpu_core.SetTPIDR_EL0(thread->GetTpidrEl0()); |
| 516 | cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints()); | 516 | cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints()); |
| 517 | cpu_core.ClearExclusiveState(); | 517 | cpu_core.ClearExclusiveState(); |
| 518 | } | 518 | } |
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp index 8376c5d76..2288ee435 100644 --- a/src/core/hle/kernel/k_server_session.cpp +++ b/src/core/hle/kernel/k_server_session.cpp | |||
| @@ -226,7 +226,7 @@ Result KServerSession::SendReply(bool is_hle) { | |||
| 226 | KThread* server_thread{GetCurrentThreadPointer(m_kernel)}; | 226 | KThread* server_thread{GetCurrentThreadPointer(m_kernel)}; |
| 227 | UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); | 227 | UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); |
| 228 | 228 | ||
| 229 | auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); | 229 | auto* src_msg_buffer = memory.GetPointer(server_thread->GetTlsAddress()); |
| 230 | auto* dst_msg_buffer = memory.GetPointer(client_message); | 230 | auto* dst_msg_buffer = memory.GetPointer(client_message); |
| 231 | std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); | 231 | std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); |
| 232 | } | 232 | } |
| @@ -334,7 +334,7 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext | |||
| 334 | UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); | 334 | UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); |
| 335 | 335 | ||
| 336 | auto* src_msg_buffer = memory.GetPointer(client_message); | 336 | auto* src_msg_buffer = memory.GetPointer(client_message); |
| 337 | auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); | 337 | auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTlsAddress()); |
| 338 | std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); | 338 | std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); |
| 339 | } | 339 | } |
| 340 | 340 | ||
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 27616440c..2eee85258 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp | |||
| @@ -35,15 +35,11 @@ | |||
| 35 | #include "core/hle/result.h" | 35 | #include "core/hle/result.h" |
| 36 | #include "core/memory.h" | 36 | #include "core/memory.h" |
| 37 | 37 | ||
| 38 | #ifdef ARCHITECTURE_x86_64 | ||
| 39 | #include "core/arm/dynarmic/arm_dynarmic_32.h" | ||
| 40 | #endif | ||
| 41 | |||
| 42 | namespace { | 38 | namespace { |
| 43 | 39 | ||
| 44 | constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1; | 40 | constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1; |
| 45 | 41 | ||
| 46 | static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, | 42 | static void ResetThreadContext32(Kernel::KThread::ThreadContext32& context, u32 stack_top, |
| 47 | u32 entry_point, u32 arg) { | 43 | u32 entry_point, u32 arg) { |
| 48 | context = {}; | 44 | context = {}; |
| 49 | context.cpu_registers[0] = arg; | 45 | context.cpu_registers[0] = arg; |
| @@ -52,7 +48,7 @@ static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, | |||
| 52 | context.fpscr = 0; | 48 | context.fpscr = 0; |
| 53 | } | 49 | } |
| 54 | 50 | ||
| 55 | static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top, | 51 | static void ResetThreadContext64(Kernel::KThread::ThreadContext64& context, VAddr stack_top, |
| 56 | VAddr entry_point, u64 arg) { | 52 | VAddr entry_point, u64 arg) { |
| 57 | context = {}; | 53 | context = {}; |
| 58 | context.cpu_registers[0] = arg; | 54 | context.cpu_registers[0] = arg; |
| @@ -95,13 +91,13 @@ public: | |||
| 95 | } | 91 | } |
| 96 | 92 | ||
| 97 | private: | 93 | private: |
| 98 | KThread::WaiterList* m_wait_list; | 94 | KThread::WaiterList* m_wait_list{}; |
| 99 | }; | 95 | }; |
| 100 | 96 | ||
| 101 | } // namespace | 97 | } // namespace |
| 102 | 98 | ||
| 103 | KThread::KThread(KernelCore& kernel) | 99 | KThread::KThread(KernelCore& kernel) |
| 104 | : KAutoObjectWithSlabHeapAndContainer{kernel}, activity_pause_lock{kernel} {} | 100 | : KAutoObjectWithSlabHeapAndContainer{kernel}, m_activity_pause_lock{kernel} {} |
| 105 | KThread::~KThread() = default; | 101 | KThread::~KThread() = default; |
| 106 | 102 | ||
| 107 | Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, | 103 | Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, |
| @@ -117,7 +113,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack | |||
| 117 | ASSERT(0 <= phys_core && phys_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); | 113 | ASSERT(0 <= phys_core && phys_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); |
| 118 | 114 | ||
| 119 | // First, clear the TLS address. | 115 | // First, clear the TLS address. |
| 120 | tls_address = {}; | 116 | m_tls_address = {}; |
| 121 | 117 | ||
| 122 | // Next, assert things based on the type. | 118 | // Next, assert things based on the type. |
| 123 | switch (type) { | 119 | switch (type) { |
| @@ -141,73 +137,73 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack | |||
| 141 | ASSERT_MSG(false, "KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type)); | 137 | ASSERT_MSG(false, "KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type)); |
| 142 | break; | 138 | break; |
| 143 | } | 139 | } |
| 144 | thread_type = type; | 140 | m_thread_type = type; |
| 145 | 141 | ||
| 146 | // Set the ideal core ID and affinity mask. | 142 | // Set the ideal core ID and affinity mask. |
| 147 | virtual_ideal_core_id = virt_core; | 143 | m_virtual_ideal_core_id = virt_core; |
| 148 | physical_ideal_core_id = phys_core; | 144 | m_physical_ideal_core_id = phys_core; |
| 149 | virtual_affinity_mask = 1ULL << virt_core; | 145 | m_virtual_affinity_mask = 1ULL << virt_core; |
| 150 | physical_affinity_mask.SetAffinity(phys_core, true); | 146 | m_physical_affinity_mask.SetAffinity(phys_core, true); |
| 151 | 147 | ||
| 152 | // Set the thread state. | 148 | // Set the thread state. |
| 153 | thread_state = (type == ThreadType::Main || type == ThreadType::Dummy) | 149 | m_thread_state = (type == ThreadType::Main || type == ThreadType::Dummy) |
| 154 | ? ThreadState::Runnable | 150 | ? ThreadState::Runnable |
| 155 | : ThreadState::Initialized; | 151 | : ThreadState::Initialized; |
| 156 | 152 | ||
| 157 | // Set TLS address. | 153 | // Set TLS address. |
| 158 | tls_address = 0; | 154 | m_tls_address = 0; |
| 159 | 155 | ||
| 160 | // Set parent and condvar tree. | 156 | // Set parent and condvar tree. |
| 161 | parent = nullptr; | 157 | m_parent = nullptr; |
| 162 | condvar_tree = nullptr; | 158 | m_condvar_tree = nullptr; |
| 163 | 159 | ||
| 164 | // Set sync booleans. | 160 | // Set sync booleans. |
| 165 | signaled = false; | 161 | m_signaled = false; |
| 166 | termination_requested = false; | 162 | m_termination_requested = false; |
| 167 | wait_cancelled = false; | 163 | m_wait_cancelled = false; |
| 168 | cancellable = false; | 164 | m_cancellable = false; |
| 169 | 165 | ||
| 170 | // Set core ID and wait result. | 166 | // Set core ID and wait result. |
| 171 | core_id = phys_core; | 167 | m_core_id = phys_core; |
| 172 | wait_result = ResultNoSynchronizationObject; | 168 | m_wait_result = ResultNoSynchronizationObject; |
| 173 | 169 | ||
| 174 | // Set priorities. | 170 | // Set priorities. |
| 175 | priority = prio; | 171 | m_priority = prio; |
| 176 | base_priority = prio; | 172 | m_base_priority = prio; |
| 177 | 173 | ||
| 178 | // Initialize sleeping queue. | 174 | // Initialize sleeping queue. |
| 179 | wait_queue = nullptr; | 175 | m_wait_queue = nullptr; |
| 180 | 176 | ||
| 181 | // Set suspend flags. | 177 | // Set suspend flags. |
| 182 | suspend_request_flags = 0; | 178 | m_suspend_request_flags = 0; |
| 183 | suspend_allowed_flags = static_cast<u32>(ThreadState::SuspendFlagMask); | 179 | m_suspend_allowed_flags = static_cast<u32>(ThreadState::SuspendFlagMask); |
| 184 | 180 | ||
| 185 | // We're neither debug attached, nor are we nesting our priority inheritance. | 181 | // We're neither debug attached, nor are we nesting our priority inheritance. |
| 186 | debug_attached = false; | 182 | m_debug_attached = false; |
| 187 | priority_inheritance_count = 0; | 183 | m_priority_inheritance_count = 0; |
| 188 | 184 | ||
| 189 | // We haven't been scheduled, and we have done no light IPC. | 185 | // We haven't been scheduled, and we have done no light IPC. |
| 190 | schedule_count = -1; | 186 | m_schedule_count = -1; |
| 191 | last_scheduled_tick = 0; | 187 | m_last_scheduled_tick = 0; |
| 192 | light_ipc_data = nullptr; | 188 | m_light_ipc_data = nullptr; |
| 193 | 189 | ||
| 194 | // We're not waiting for a lock, and we haven't disabled migration. | 190 | // We're not waiting for a lock, and we haven't disabled migration. |
| 195 | waiting_lock_info = nullptr; | 191 | m_waiting_lock_info = nullptr; |
| 196 | num_core_migration_disables = 0; | 192 | m_num_core_migration_disables = 0; |
| 197 | 193 | ||
| 198 | // We have no waiters, but we do have an entrypoint. | 194 | // We have no waiters, but we do have an entrypoint. |
| 199 | num_kernel_waiters = 0; | 195 | m_num_kernel_waiters = 0; |
| 200 | 196 | ||
| 201 | // Set our current core id. | 197 | // Set our current core id. |
| 202 | current_core_id = phys_core; | 198 | m_current_core_id = phys_core; |
| 203 | 199 | ||
| 204 | // We haven't released our resource limit hint, and we've spent no time on the cpu. | 200 | // We haven't released our resource limit hint, and we've spent no time on the cpu. |
| 205 | resource_limit_release_hint = false; | 201 | m_resource_limit_release_hint = false; |
| 206 | cpu_time = 0; | 202 | m_cpu_time = 0; |
| 207 | 203 | ||
| 208 | // Set debug context. | 204 | // Set debug context. |
| 209 | stack_top = user_stack_top; | 205 | m_stack_top = user_stack_top; |
| 210 | argument = arg; | 206 | m_argument = arg; |
| 211 | 207 | ||
| 212 | // Clear our stack parameters. | 208 | // Clear our stack parameters. |
| 213 | std::memset(static_cast<void*>(std::addressof(GetStackParameters())), 0, | 209 | std::memset(static_cast<void*>(std::addressof(GetStackParameters())), 0, |
| @@ -217,34 +213,34 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack | |||
| 217 | if (owner != nullptr) { | 213 | if (owner != nullptr) { |
| 218 | // Setup the TLS, if needed. | 214 | // Setup the TLS, if needed. |
| 219 | if (type == ThreadType::User) { | 215 | if (type == ThreadType::User) { |
| 220 | R_TRY(owner->CreateThreadLocalRegion(std::addressof(tls_address))); | 216 | R_TRY(owner->CreateThreadLocalRegion(std::addressof(m_tls_address))); |
| 221 | } | 217 | } |
| 222 | 218 | ||
| 223 | parent = owner; | 219 | m_parent = owner; |
| 224 | parent->Open(); | 220 | m_parent->Open(); |
| 225 | } | 221 | } |
| 226 | 222 | ||
| 227 | // Initialize thread context. | 223 | // Initialize thread context. |
| 228 | ResetThreadContext64(thread_context_64, user_stack_top, func, arg); | 224 | ResetThreadContext64(m_thread_context_64, user_stack_top, func, arg); |
| 229 | ResetThreadContext32(thread_context_32, static_cast<u32>(user_stack_top), | 225 | ResetThreadContext32(m_thread_context_32, static_cast<u32>(user_stack_top), |
| 230 | static_cast<u32>(func), static_cast<u32>(arg)); | 226 | static_cast<u32>(func), static_cast<u32>(arg)); |
| 231 | 227 | ||
| 232 | // Setup the stack parameters. | 228 | // Setup the stack parameters. |
| 233 | StackParameters& sp = GetStackParameters(); | 229 | StackParameters& sp = this->GetStackParameters(); |
| 234 | sp.cur_thread = this; | 230 | sp.cur_thread = this; |
| 235 | sp.disable_count = 1; | 231 | sp.disable_count = 1; |
| 236 | SetInExceptionHandler(); | 232 | this->SetInExceptionHandler(); |
| 237 | 233 | ||
| 238 | // Set thread ID. | 234 | // Set thread ID. |
| 239 | thread_id = m_kernel.CreateNewThreadID(); | 235 | m_thread_id = m_kernel.CreateNewThreadID(); |
| 240 | 236 | ||
| 241 | // We initialized! | 237 | // We initialized! |
| 242 | initialized = true; | 238 | m_initialized = true; |
| 243 | 239 | ||
| 244 | // Register ourselves with our parent process. | 240 | // Register ourselves with our parent process. |
| 245 | if (parent != nullptr) { | 241 | if (m_parent != nullptr) { |
| 246 | parent->RegisterThread(this); | 242 | m_parent->RegisterThread(this); |
| 247 | if (parent->IsSuspended()) { | 243 | if (m_parent->IsSuspended()) { |
| 248 | RequestSuspend(SuspendType::Process); | 244 | RequestSuspend(SuspendType::Process); |
| 249 | } | 245 | } |
| 250 | } | 246 | } |
| @@ -259,8 +255,7 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_ | |||
| 259 | R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); | 255 | R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); |
| 260 | 256 | ||
| 261 | // Initialize emulation parameters. | 257 | // Initialize emulation parameters. |
| 262 | thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func)); | 258 | thread->m_host_context = std::make_shared<Common::Fiber>(std::move(init_func)); |
| 263 | thread->is_single_core = !Settings::values.use_multi_core.GetValue(); | ||
| 264 | 259 | ||
| 265 | R_SUCCEED(); | 260 | R_SUCCEED(); |
| 266 | } | 261 | } |
| @@ -270,7 +265,7 @@ Result KThread::InitializeDummyThread(KThread* thread, KProcess* owner) { | |||
| 270 | R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, owner, ThreadType::Dummy)); | 265 | R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, owner, ThreadType::Dummy)); |
| 271 | 266 | ||
| 272 | // Initialize emulation parameters. | 267 | // Initialize emulation parameters. |
| 273 | thread->stack_parameters.disable_count = 0; | 268 | thread->m_stack_parameters.disable_count = 0; |
| 274 | 269 | ||
| 275 | R_SUCCEED(); | 270 | R_SUCCEED(); |
| 276 | } | 271 | } |
| @@ -331,25 +326,25 @@ void KThread::PostDestroy(uintptr_t arg) { | |||
| 331 | 326 | ||
| 332 | void KThread::Finalize() { | 327 | void KThread::Finalize() { |
| 333 | // If the thread has an owner process, unregister it. | 328 | // If the thread has an owner process, unregister it. |
| 334 | if (parent != nullptr) { | 329 | if (m_parent != nullptr) { |
| 335 | parent->UnregisterThread(this); | 330 | m_parent->UnregisterThread(this); |
| 336 | } | 331 | } |
| 337 | 332 | ||
| 338 | // If the thread has a local region, delete it. | 333 | // If the thread has a local region, delete it. |
| 339 | if (tls_address != 0) { | 334 | if (m_tls_address != 0) { |
| 340 | ASSERT(parent->DeleteThreadLocalRegion(tls_address).IsSuccess()); | 335 | ASSERT(m_parent->DeleteThreadLocalRegion(m_tls_address).IsSuccess()); |
| 341 | } | 336 | } |
| 342 | 337 | ||
| 343 | // Release any waiters. | 338 | // Release any waiters. |
| 344 | { | 339 | { |
| 345 | ASSERT(waiting_lock_info == nullptr); | 340 | ASSERT(m_waiting_lock_info == nullptr); |
| 346 | KScopedSchedulerLock sl{m_kernel}; | 341 | KScopedSchedulerLock sl{m_kernel}; |
| 347 | 342 | ||
| 348 | // Check that we have no kernel waiters. | 343 | // Check that we have no kernel waiters. |
| 349 | ASSERT(num_kernel_waiters == 0); | 344 | ASSERT(m_num_kernel_waiters == 0); |
| 350 | 345 | ||
| 351 | auto it = held_lock_info_list.begin(); | 346 | auto it = m_held_lock_info_list.begin(); |
| 352 | while (it != held_lock_info_list.end()) { | 347 | while (it != m_held_lock_info_list.end()) { |
| 353 | // Get the lock info. | 348 | // Get the lock info. |
| 354 | auto* const lock_info = std::addressof(*it); | 349 | auto* const lock_info = std::addressof(*it); |
| 355 | 350 | ||
| @@ -371,7 +366,7 @@ void KThread::Finalize() { | |||
| 371 | } | 366 | } |
| 372 | 367 | ||
| 373 | // Remove the held lock from our list. | 368 | // Remove the held lock from our list. |
| 374 | it = held_lock_info_list.erase(it); | 369 | it = m_held_lock_info_list.erase(it); |
| 375 | 370 | ||
| 376 | // Free the lock info. | 371 | // Free the lock info. |
| 377 | LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info); | 372 | LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info); |
| @@ -379,58 +374,58 @@ void KThread::Finalize() { | |||
| 379 | } | 374 | } |
| 380 | 375 | ||
| 381 | // Release host emulation members. | 376 | // Release host emulation members. |
| 382 | host_context.reset(); | 377 | m_host_context.reset(); |
| 383 | 378 | ||
| 384 | // Perform inherited finalization. | 379 | // Perform inherited finalization. |
| 385 | KSynchronizationObject::Finalize(); | 380 | KSynchronizationObject::Finalize(); |
| 386 | } | 381 | } |
| 387 | 382 | ||
| 388 | bool KThread::IsSignaled() const { | 383 | bool KThread::IsSignaled() const { |
| 389 | return signaled; | 384 | return m_signaled; |
| 390 | } | 385 | } |
| 391 | 386 | ||
| 392 | void KThread::OnTimer() { | 387 | void KThread::OnTimer() { |
| 393 | ASSERT(m_kernel.GlobalSchedulerContext().IsLocked()); | 388 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
| 394 | 389 | ||
| 395 | // If we're waiting, cancel the wait. | 390 | // If we're waiting, cancel the wait. |
| 396 | if (GetState() == ThreadState::Waiting) { | 391 | if (this->GetState() == ThreadState::Waiting) { |
| 397 | wait_queue->CancelWait(this, ResultTimedOut, false); | 392 | m_wait_queue->CancelWait(this, ResultTimedOut, false); |
| 398 | } | 393 | } |
| 399 | } | 394 | } |
| 400 | 395 | ||
| 401 | void KThread::StartTermination() { | 396 | void KThread::StartTermination() { |
| 402 | ASSERT(m_kernel.GlobalSchedulerContext().IsLocked()); | 397 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
| 403 | 398 | ||
| 404 | // Release user exception and unpin, if relevant. | 399 | // Release user exception and unpin, if relevant. |
| 405 | if (parent != nullptr) { | 400 | if (m_parent != nullptr) { |
| 406 | parent->ReleaseUserException(this); | 401 | m_parent->ReleaseUserException(this); |
| 407 | if (parent->GetPinnedThread(GetCurrentCoreId(m_kernel)) == this) { | 402 | if (m_parent->GetPinnedThread(GetCurrentCoreId(m_kernel)) == this) { |
| 408 | parent->UnpinCurrentThread(core_id); | 403 | m_parent->UnpinCurrentThread(m_core_id); |
| 409 | } | 404 | } |
| 410 | } | 405 | } |
| 411 | 406 | ||
| 412 | // Set state to terminated. | 407 | // Set state to terminated. |
| 413 | SetState(ThreadState::Terminated); | 408 | this->SetState(ThreadState::Terminated); |
| 414 | 409 | ||
| 415 | // Clear the thread's status as running in parent. | 410 | // Clear the thread's status as running in parent. |
| 416 | if (parent != nullptr) { | 411 | if (m_parent != nullptr) { |
| 417 | parent->ClearRunningThread(this); | 412 | m_parent->ClearRunningThread(this); |
| 418 | } | 413 | } |
| 419 | 414 | ||
| 420 | // Signal. | 415 | // Signal. |
| 421 | signaled = true; | 416 | m_signaled = true; |
| 422 | KSynchronizationObject::NotifyAvailable(); | 417 | KSynchronizationObject::NotifyAvailable(); |
| 423 | 418 | ||
| 424 | // Clear previous thread in KScheduler. | 419 | // Clear previous thread in KScheduler. |
| 425 | KScheduler::ClearPreviousThread(m_kernel, this); | 420 | KScheduler::ClearPreviousThread(m_kernel, this); |
| 426 | 421 | ||
| 427 | // Register terminated dpc flag. | 422 | // Register terminated dpc flag. |
| 428 | RegisterDpc(DpcFlag::Terminated); | 423 | this->RegisterDpc(DpcFlag::Terminated); |
| 429 | } | 424 | } |
| 430 | 425 | ||
| 431 | void KThread::FinishTermination() { | 426 | void KThread::FinishTermination() { |
| 432 | // Ensure that the thread is not executing on any core. | 427 | // Ensure that the thread is not executing on any core. |
| 433 | if (parent != nullptr) { | 428 | if (m_parent != nullptr) { |
| 434 | for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) { | 429 | for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) { |
| 435 | KThread* core_thread{}; | 430 | KThread* core_thread{}; |
| 436 | do { | 431 | do { |
| @@ -449,75 +444,76 @@ void KThread::DoWorkerTaskImpl() { | |||
| 449 | } | 444 | } |
| 450 | 445 | ||
| 451 | void KThread::Pin(s32 current_core) { | 446 | void KThread::Pin(s32 current_core) { |
| 452 | ASSERT(m_kernel.GlobalSchedulerContext().IsLocked()); | 447 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
| 453 | 448 | ||
| 454 | // Set ourselves as pinned. | 449 | // Set ourselves as pinned. |
| 455 | GetStackParameters().is_pinned = true; | 450 | GetStackParameters().is_pinned = true; |
| 456 | 451 | ||
| 457 | // Disable core migration. | 452 | // Disable core migration. |
| 458 | ASSERT(num_core_migration_disables == 0); | 453 | ASSERT(m_num_core_migration_disables == 0); |
| 459 | { | 454 | { |
| 460 | ++num_core_migration_disables; | 455 | ++m_num_core_migration_disables; |
| 461 | 456 | ||
| 462 | // Save our ideal state to restore when we're unpinned. | 457 | // Save our ideal state to restore when we're unpinned. |
| 463 | original_physical_ideal_core_id = physical_ideal_core_id; | 458 | m_original_physical_ideal_core_id = m_physical_ideal_core_id; |
| 464 | original_physical_affinity_mask = physical_affinity_mask; | 459 | m_original_physical_affinity_mask = m_physical_affinity_mask; |
| 465 | 460 | ||
| 466 | // Bind ourselves to this core. | 461 | // Bind ourselves to this core. |
| 467 | const s32 active_core = GetActiveCore(); | 462 | const s32 active_core = this->GetActiveCore(); |
| 468 | 463 | ||
| 469 | SetActiveCore(current_core); | 464 | this->SetActiveCore(current_core); |
| 470 | physical_ideal_core_id = current_core; | 465 | m_physical_ideal_core_id = current_core; |
| 471 | physical_affinity_mask.SetAffinityMask(1ULL << current_core); | 466 | m_physical_affinity_mask.SetAffinityMask(1ULL << current_core); |
| 472 | 467 | ||
| 473 | if (active_core != current_core || physical_affinity_mask.GetAffinityMask() != | 468 | if (active_core != current_core || |
| 474 | original_physical_affinity_mask.GetAffinityMask()) { | 469 | m_physical_affinity_mask.GetAffinityMask() != |
| 475 | KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, original_physical_affinity_mask, | 470 | m_original_physical_affinity_mask.GetAffinityMask()) { |
| 476 | active_core); | 471 | KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, |
| 472 | m_original_physical_affinity_mask, active_core); | ||
| 477 | } | 473 | } |
| 478 | } | 474 | } |
| 479 | 475 | ||
| 480 | // Disallow performing thread suspension. | 476 | // Disallow performing thread suspension. |
| 481 | { | 477 | { |
| 482 | // Update our allow flags. | 478 | // Update our allow flags. |
| 483 | suspend_allowed_flags &= ~(1 << (static_cast<u32>(SuspendType::Thread) + | 479 | m_suspend_allowed_flags &= ~(1 << (static_cast<u32>(SuspendType::Thread) + |
| 484 | static_cast<u32>(ThreadState::SuspendShift))); | 480 | static_cast<u32>(ThreadState::SuspendShift))); |
| 485 | 481 | ||
| 486 | // Update our state. | 482 | // Update our state. |
| 487 | UpdateState(); | 483 | this->UpdateState(); |
| 488 | } | 484 | } |
| 489 | 485 | ||
| 490 | // TODO(bunnei): Update our SVC access permissions. | 486 | // TODO(bunnei): Update our SVC access permissions. |
| 491 | ASSERT(parent != nullptr); | 487 | ASSERT(m_parent != nullptr); |
| 492 | } | 488 | } |
| 493 | 489 | ||
| 494 | void KThread::Unpin() { | 490 | void KThread::Unpin() { |
| 495 | ASSERT(m_kernel.GlobalSchedulerContext().IsLocked()); | 491 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
| 496 | 492 | ||
| 497 | // Set ourselves as unpinned. | 493 | // Set ourselves as unpinned. |
| 498 | GetStackParameters().is_pinned = false; | 494 | this->GetStackParameters().is_pinned = false; |
| 499 | 495 | ||
| 500 | // Enable core migration. | 496 | // Enable core migration. |
| 501 | ASSERT(num_core_migration_disables == 1); | 497 | ASSERT(m_num_core_migration_disables == 1); |
| 502 | { | 498 | { |
| 503 | num_core_migration_disables--; | 499 | m_num_core_migration_disables--; |
| 504 | 500 | ||
| 505 | // Restore our original state. | 501 | // Restore our original state. |
| 506 | const KAffinityMask old_mask = physical_affinity_mask; | 502 | const KAffinityMask old_mask = m_physical_affinity_mask; |
| 507 | 503 | ||
| 508 | physical_ideal_core_id = original_physical_ideal_core_id; | 504 | m_physical_ideal_core_id = m_original_physical_ideal_core_id; |
| 509 | physical_affinity_mask = original_physical_affinity_mask; | 505 | m_physical_affinity_mask = m_original_physical_affinity_mask; |
| 510 | 506 | ||
| 511 | if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { | 507 | if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { |
| 512 | const s32 active_core = GetActiveCore(); | 508 | const s32 active_core = this->GetActiveCore(); |
| 513 | 509 | ||
| 514 | if (!physical_affinity_mask.GetAffinity(active_core)) { | 510 | if (!m_physical_affinity_mask.GetAffinity(active_core)) { |
| 515 | if (physical_ideal_core_id >= 0) { | 511 | if (m_physical_ideal_core_id >= 0) { |
| 516 | SetActiveCore(physical_ideal_core_id); | 512 | this->SetActiveCore(m_physical_ideal_core_id); |
| 517 | } else { | 513 | } else { |
| 518 | SetActiveCore(static_cast<s32>( | 514 | this->SetActiveCore(static_cast<s32>( |
| 519 | Common::BitSize<u64>() - 1 - | 515 | Common::BitSize<u64>() - 1 - |
| 520 | std::countl_zero(physical_affinity_mask.GetAffinityMask()))); | 516 | std::countl_zero(m_physical_affinity_mask.GetAffinityMask()))); |
| 521 | } | 517 | } |
| 522 | } | 518 | } |
| 523 | KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core); | 519 | KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core); |
| @@ -525,106 +521,106 @@ void KThread::Unpin() { | |||
| 525 | } | 521 | } |
| 526 | 522 | ||
| 527 | // Allow performing thread suspension (if termination hasn't been requested). | 523 | // Allow performing thread suspension (if termination hasn't been requested). |
| 528 | if (!IsTerminationRequested()) { | 524 | if (!this->IsTerminationRequested()) { |
| 529 | // Update our allow flags. | 525 | // Update our allow flags. |
| 530 | suspend_allowed_flags |= (1 << (static_cast<u32>(SuspendType::Thread) + | 526 | m_suspend_allowed_flags |= (1 << (static_cast<u32>(SuspendType::Thread) + |
| 531 | static_cast<u32>(ThreadState::SuspendShift))); | 527 | static_cast<u32>(ThreadState::SuspendShift))); |
| 532 | 528 | ||
| 533 | // Update our state. | 529 | // Update our state. |
| 534 | UpdateState(); | 530 | this->UpdateState(); |
| 535 | } | 531 | } |
| 536 | 532 | ||
| 537 | // TODO(bunnei): Update our SVC access permissions. | 533 | // TODO(bunnei): Update our SVC access permissions. |
| 538 | ASSERT(parent != nullptr); | 534 | ASSERT(m_parent != nullptr); |
| 539 | 535 | ||
| 540 | // Resume any threads that began waiting on us while we were pinned. | 536 | // Resume any threads that began waiting on us while we were pinned. |
| 541 | for (auto it = pinned_waiter_list.begin(); it != pinned_waiter_list.end(); ++it) { | 537 | for (auto it = m_pinned_waiter_list.begin(); it != m_pinned_waiter_list.end(); ++it) { |
| 542 | it->EndWait(ResultSuccess); | 538 | it->EndWait(ResultSuccess); |
| 543 | } | 539 | } |
| 544 | } | 540 | } |
| 545 | 541 | ||
| 546 | u16 KThread::GetUserDisableCount() const { | 542 | u16 KThread::GetUserDisableCount() const { |
| 547 | if (!IsUserThread()) { | 543 | if (!this->IsUserThread()) { |
| 548 | // We only emulate TLS for user threads | 544 | // We only emulate TLS for user threads |
| 549 | return {}; | 545 | return {}; |
| 550 | } | 546 | } |
| 551 | 547 | ||
| 552 | auto& memory = m_kernel.System().Memory(); | 548 | auto& memory = m_kernel.System().Memory(); |
| 553 | return memory.Read16(tls_address + offsetof(ThreadLocalRegion, disable_count)); | 549 | return memory.Read16(m_tls_address + offsetof(ThreadLocalRegion, disable_count)); |
| 554 | } | 550 | } |
| 555 | 551 | ||
| 556 | void KThread::SetInterruptFlag() { | 552 | void KThread::SetInterruptFlag() { |
| 557 | if (!IsUserThread()) { | 553 | if (!this->IsUserThread()) { |
| 558 | // We only emulate TLS for user threads | 554 | // We only emulate TLS for user threads |
| 559 | return; | 555 | return; |
| 560 | } | 556 | } |
| 561 | 557 | ||
| 562 | auto& memory = m_kernel.System().Memory(); | 558 | auto& memory = m_kernel.System().Memory(); |
| 563 | memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1); | 559 | memory.Write16(m_tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1); |
| 564 | } | 560 | } |
| 565 | 561 | ||
| 566 | void KThread::ClearInterruptFlag() { | 562 | void KThread::ClearInterruptFlag() { |
| 567 | if (!IsUserThread()) { | 563 | if (!this->IsUserThread()) { |
| 568 | // We only emulate TLS for user threads | 564 | // We only emulate TLS for user threads |
| 569 | return; | 565 | return; |
| 570 | } | 566 | } |
| 571 | 567 | ||
| 572 | auto& memory = m_kernel.System().Memory(); | 568 | auto& memory = m_kernel.System().Memory(); |
| 573 | memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0); | 569 | memory.Write16(m_tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0); |
| 574 | } | 570 | } |
| 575 | 571 | ||
| 576 | Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { | 572 | Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { |
| 577 | KScopedSchedulerLock sl{m_kernel}; | 573 | KScopedSchedulerLock sl{m_kernel}; |
| 578 | 574 | ||
| 579 | // Get the virtual mask. | 575 | // Get the virtual mask. |
| 580 | *out_ideal_core = virtual_ideal_core_id; | 576 | *out_ideal_core = m_virtual_ideal_core_id; |
| 581 | *out_affinity_mask = virtual_affinity_mask; | 577 | *out_affinity_mask = m_virtual_affinity_mask; |
| 582 | 578 | ||
| 583 | R_SUCCEED(); | 579 | R_SUCCEED(); |
| 584 | } | 580 | } |
| 585 | 581 | ||
| 586 | Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { | 582 | Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { |
| 587 | KScopedSchedulerLock sl{m_kernel}; | 583 | KScopedSchedulerLock sl{m_kernel}; |
| 588 | ASSERT(num_core_migration_disables >= 0); | 584 | ASSERT(m_num_core_migration_disables >= 0); |
| 589 | 585 | ||
| 590 | // Select between core mask and original core mask. | 586 | // Select between core mask and original core mask. |
| 591 | if (num_core_migration_disables == 0) { | 587 | if (m_num_core_migration_disables == 0) { |
| 592 | *out_ideal_core = physical_ideal_core_id; | 588 | *out_ideal_core = m_physical_ideal_core_id; |
| 593 | *out_affinity_mask = physical_affinity_mask.GetAffinityMask(); | 589 | *out_affinity_mask = m_physical_affinity_mask.GetAffinityMask(); |
| 594 | } else { | 590 | } else { |
| 595 | *out_ideal_core = original_physical_ideal_core_id; | 591 | *out_ideal_core = m_original_physical_ideal_core_id; |
| 596 | *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask(); | 592 | *out_affinity_mask = m_original_physical_affinity_mask.GetAffinityMask(); |
| 597 | } | 593 | } |
| 598 | 594 | ||
| 599 | R_SUCCEED(); | 595 | R_SUCCEED(); |
| 600 | } | 596 | } |
| 601 | 597 | ||
| 602 | Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { | 598 | Result KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) { |
| 603 | ASSERT(parent != nullptr); | 599 | ASSERT(m_parent != nullptr); |
| 604 | ASSERT(v_affinity_mask != 0); | 600 | ASSERT(v_affinity_mask != 0); |
| 605 | KScopedLightLock lk(activity_pause_lock); | 601 | KScopedLightLock lk(m_activity_pause_lock); |
| 606 | 602 | ||
| 607 | // Set the core mask. | 603 | // Set the core mask. |
| 608 | u64 p_affinity_mask = 0; | 604 | u64 p_affinity_mask = 0; |
| 609 | { | 605 | { |
| 610 | KScopedSchedulerLock sl(m_kernel); | 606 | KScopedSchedulerLock sl(m_kernel); |
| 611 | ASSERT(num_core_migration_disables >= 0); | 607 | ASSERT(m_num_core_migration_disables >= 0); |
| 612 | 608 | ||
| 613 | // If we're updating, set our ideal virtual core. | 609 | // If we're updating, set our ideal virtual core. |
| 614 | if (core_id_ != Svc::IdealCoreNoUpdate) { | 610 | if (core_id != Svc::IdealCoreNoUpdate) { |
| 615 | virtual_ideal_core_id = core_id_; | 611 | m_virtual_ideal_core_id = core_id; |
| 616 | } else { | 612 | } else { |
| 617 | // Preserve our ideal core id. | 613 | // Preserve our ideal core id. |
| 618 | core_id_ = virtual_ideal_core_id; | 614 | core_id = m_virtual_ideal_core_id; |
| 619 | R_UNLESS(((1ULL << core_id_) & v_affinity_mask) != 0, ResultInvalidCombination); | 615 | R_UNLESS(((1ULL << core_id) & v_affinity_mask) != 0, ResultInvalidCombination); |
| 620 | } | 616 | } |
| 621 | 617 | ||
| 622 | // Set our affinity mask. | 618 | // Set our affinity mask. |
| 623 | virtual_affinity_mask = v_affinity_mask; | 619 | m_virtual_affinity_mask = v_affinity_mask; |
| 624 | 620 | ||
| 625 | // Translate the virtual core to a physical core. | 621 | // Translate the virtual core to a physical core. |
| 626 | if (core_id_ >= 0) { | 622 | if (core_id >= 0) { |
| 627 | core_id_ = Core::Hardware::VirtualToPhysicalCoreMap[core_id_]; | 623 | core_id = Core::Hardware::VirtualToPhysicalCoreMap[core_id]; |
| 628 | } | 624 | } |
| 629 | 625 | ||
| 630 | // Translate the virtual affinity mask to a physical one. | 626 | // Translate the virtual affinity mask to a physical one. |
| @@ -635,35 +631,35 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { | |||
| 635 | } | 631 | } |
| 636 | 632 | ||
| 637 | // If we haven't disabled migration, perform an affinity change. | 633 | // If we haven't disabled migration, perform an affinity change. |
| 638 | if (num_core_migration_disables == 0) { | 634 | if (m_num_core_migration_disables == 0) { |
| 639 | const KAffinityMask old_mask = physical_affinity_mask; | 635 | const KAffinityMask old_mask = m_physical_affinity_mask; |
| 640 | 636 | ||
| 641 | // Set our new ideals. | 637 | // Set our new ideals. |
| 642 | physical_ideal_core_id = core_id_; | 638 | m_physical_ideal_core_id = core_id; |
| 643 | physical_affinity_mask.SetAffinityMask(p_affinity_mask); | 639 | m_physical_affinity_mask.SetAffinityMask(p_affinity_mask); |
| 644 | 640 | ||
| 645 | if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { | 641 | if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { |
| 646 | const s32 active_core = GetActiveCore(); | 642 | const s32 active_core = GetActiveCore(); |
| 647 | 643 | ||
| 648 | if (active_core >= 0 && !physical_affinity_mask.GetAffinity(active_core)) { | 644 | if (active_core >= 0 && !m_physical_affinity_mask.GetAffinity(active_core)) { |
| 649 | const s32 new_core = static_cast<s32>( | 645 | const s32 new_core = static_cast<s32>( |
| 650 | physical_ideal_core_id >= 0 | 646 | m_physical_ideal_core_id >= 0 |
| 651 | ? physical_ideal_core_id | 647 | ? m_physical_ideal_core_id |
| 652 | : Common::BitSize<u64>() - 1 - | 648 | : Common::BitSize<u64>() - 1 - |
| 653 | std::countl_zero(physical_affinity_mask.GetAffinityMask())); | 649 | std::countl_zero(m_physical_affinity_mask.GetAffinityMask())); |
| 654 | SetActiveCore(new_core); | 650 | SetActiveCore(new_core); |
| 655 | } | 651 | } |
| 656 | KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core); | 652 | KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core); |
| 657 | } | 653 | } |
| 658 | } else { | 654 | } else { |
| 659 | // Otherwise, we edit the original affinity for restoration later. | 655 | // Otherwise, we edit the original affinity for restoration later. |
| 660 | original_physical_ideal_core_id = core_id_; | 656 | m_original_physical_ideal_core_id = core_id; |
| 661 | original_physical_affinity_mask.SetAffinityMask(p_affinity_mask); | 657 | m_original_physical_affinity_mask.SetAffinityMask(p_affinity_mask); |
| 662 | } | 658 | } |
| 663 | } | 659 | } |
| 664 | 660 | ||
| 665 | // Update the pinned waiter list. | 661 | // Update the pinned waiter list. |
| 666 | ThreadQueueImplForKThreadSetProperty wait_queue_(m_kernel, std::addressof(pinned_waiter_list)); | 662 | ThreadQueueImplForKThreadSetProperty wait_queue(m_kernel, std::addressof(m_pinned_waiter_list)); |
| 667 | { | 663 | { |
| 668 | bool retry_update{}; | 664 | bool retry_update{}; |
| 669 | do { | 665 | do { |
| @@ -671,7 +667,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { | |||
| 671 | KScopedSchedulerLock sl(m_kernel); | 667 | KScopedSchedulerLock sl(m_kernel); |
| 672 | 668 | ||
| 673 | // Don't do any further management if our termination has been requested. | 669 | // Don't do any further management if our termination has been requested. |
| 674 | R_SUCCEED_IF(IsTerminationRequested()); | 670 | R_SUCCEED_IF(this->IsTerminationRequested()); |
| 675 | 671 | ||
| 676 | // By default, we won't need to retry. | 672 | // By default, we won't need to retry. |
| 677 | retry_update = false; | 673 | retry_update = false; |
| @@ -691,14 +687,14 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { | |||
| 691 | // new mask. | 687 | // new mask. |
| 692 | if (thread_is_current && ((1ULL << thread_core) & p_affinity_mask) == 0) { | 688 | if (thread_is_current && ((1ULL << thread_core) & p_affinity_mask) == 0) { |
| 693 | // If the thread is pinned, we want to wait until it's not pinned. | 689 | // If the thread is pinned, we want to wait until it's not pinned. |
| 694 | if (GetStackParameters().is_pinned) { | 690 | if (this->GetStackParameters().is_pinned) { |
| 695 | // Verify that the current thread isn't terminating. | 691 | // Verify that the current thread isn't terminating. |
| 696 | R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), | 692 | R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), |
| 697 | ResultTerminationRequested); | 693 | ResultTerminationRequested); |
| 698 | 694 | ||
| 699 | // Wait until the thread isn't pinned any more. | 695 | // Wait until the thread isn't pinned any more. |
| 700 | pinned_waiter_list.push_back(GetCurrentThread(m_kernel)); | 696 | m_pinned_waiter_list.push_back(GetCurrentThread(m_kernel)); |
| 701 | GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue_)); | 697 | GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue)); |
| 702 | } else { | 698 | } else { |
| 703 | // If the thread isn't pinned, release the scheduler lock and retry until it's | 699 | // If the thread isn't pinned, release the scheduler lock and retry until it's |
| 704 | // not current. | 700 | // not current. |
| @@ -717,24 +713,24 @@ void KThread::SetBasePriority(s32 value) { | |||
| 717 | KScopedSchedulerLock sl{m_kernel}; | 713 | KScopedSchedulerLock sl{m_kernel}; |
| 718 | 714 | ||
| 719 | // Change our base priority. | 715 | // Change our base priority. |
| 720 | base_priority = value; | 716 | m_base_priority = value; |
| 721 | 717 | ||
| 722 | // Perform a priority restoration. | 718 | // Perform a priority restoration. |
| 723 | RestorePriority(m_kernel, this); | 719 | RestorePriority(m_kernel, this); |
| 724 | } | 720 | } |
| 725 | 721 | ||
| 726 | KThread* KThread::GetLockOwner() const { | 722 | KThread* KThread::GetLockOwner() const { |
| 727 | return waiting_lock_info != nullptr ? waiting_lock_info->GetOwner() : nullptr; | 723 | return m_waiting_lock_info != nullptr ? m_waiting_lock_info->GetOwner() : nullptr; |
| 728 | } | 724 | } |
| 729 | 725 | ||
| 730 | void KThread::IncreaseBasePriority(s32 priority_) { | 726 | void KThread::IncreaseBasePriority(s32 priority) { |
| 731 | ASSERT(Svc::HighestThreadPriority <= priority_ && priority_ <= Svc::LowestThreadPriority); | 727 | ASSERT(Svc::HighestThreadPriority <= priority && priority <= Svc::LowestThreadPriority); |
| 732 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); | 728 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
| 733 | ASSERT(!this->GetStackParameters().is_pinned); | 729 | ASSERT(!this->GetStackParameters().is_pinned); |
| 734 | 730 | ||
| 735 | // Set our base priority. | 731 | // Set our base priority. |
| 736 | if (base_priority > priority_) { | 732 | if (m_base_priority > priority) { |
| 737 | base_priority = priority_; | 733 | m_base_priority = priority; |
| 738 | 734 | ||
| 739 | // Perform a priority restoration. | 735 | // Perform a priority restoration. |
| 740 | RestorePriority(m_kernel, this); | 736 | RestorePriority(m_kernel, this); |
| @@ -745,19 +741,19 @@ void KThread::RequestSuspend(SuspendType type) { | |||
| 745 | KScopedSchedulerLock sl{m_kernel}; | 741 | KScopedSchedulerLock sl{m_kernel}; |
| 746 | 742 | ||
| 747 | // Note the request in our flags. | 743 | // Note the request in our flags. |
| 748 | suspend_request_flags |= | 744 | m_suspend_request_flags |= |
| 749 | (1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type))); | 745 | (1U << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type))); |
| 750 | 746 | ||
| 751 | // Try to perform the suspend. | 747 | // Try to perform the suspend. |
| 752 | TrySuspend(); | 748 | this->TrySuspend(); |
| 753 | } | 749 | } |
| 754 | 750 | ||
| 755 | void KThread::Resume(SuspendType type) { | 751 | void KThread::Resume(SuspendType type) { |
| 756 | KScopedSchedulerLock sl{m_kernel}; | 752 | KScopedSchedulerLock sl{m_kernel}; |
| 757 | 753 | ||
| 758 | // Clear the request in our flags. | 754 | // Clear the request in our flags. |
| 759 | suspend_request_flags &= | 755 | m_suspend_request_flags &= |
| 760 | ~(1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type))); | 756 | ~(1U << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type))); |
| 761 | 757 | ||
| 762 | // Update our state. | 758 | // Update our state. |
| 763 | this->UpdateState(); | 759 | this->UpdateState(); |
| @@ -767,17 +763,17 @@ void KThread::WaitCancel() { | |||
| 767 | KScopedSchedulerLock sl{m_kernel}; | 763 | KScopedSchedulerLock sl{m_kernel}; |
| 768 | 764 | ||
| 769 | // Check if we're waiting and cancellable. | 765 | // Check if we're waiting and cancellable. |
| 770 | if (this->GetState() == ThreadState::Waiting && cancellable) { | 766 | if (this->GetState() == ThreadState::Waiting && m_cancellable) { |
| 771 | wait_cancelled = false; | 767 | m_wait_cancelled = false; |
| 772 | wait_queue->CancelWait(this, ResultCancelled, true); | 768 | m_wait_queue->CancelWait(this, ResultCancelled, true); |
| 773 | } else { | 769 | } else { |
| 774 | // Otherwise, note that we cancelled a wait. | 770 | // Otherwise, note that we cancelled a wait. |
| 775 | wait_cancelled = true; | 771 | m_wait_cancelled = true; |
| 776 | } | 772 | } |
| 777 | } | 773 | } |
| 778 | 774 | ||
| 779 | void KThread::TrySuspend() { | 775 | void KThread::TrySuspend() { |
| 780 | ASSERT(m_kernel.GlobalSchedulerContext().IsLocked()); | 776 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
| 781 | ASSERT(IsSuspendRequested()); | 777 | ASSERT(IsSuspendRequested()); |
| 782 | 778 | ||
| 783 | // Ensure that we have no waiters. | 779 | // Ensure that we have no waiters. |
| @@ -791,13 +787,13 @@ void KThread::TrySuspend() { | |||
| 791 | } | 787 | } |
| 792 | 788 | ||
| 793 | void KThread::UpdateState() { | 789 | void KThread::UpdateState() { |
| 794 | ASSERT(m_kernel.GlobalSchedulerContext().IsLocked()); | 790 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
| 795 | 791 | ||
| 796 | // Set our suspend flags in state. | 792 | // Set our suspend flags in state. |
| 797 | const ThreadState old_state = thread_state.load(std::memory_order_relaxed); | 793 | const ThreadState old_state = m_thread_state.load(std::memory_order_relaxed); |
| 798 | const auto new_state = | 794 | const auto new_state = |
| 799 | static_cast<ThreadState>(this->GetSuspendFlags()) | (old_state & ThreadState::Mask); | 795 | static_cast<ThreadState>(this->GetSuspendFlags()) | (old_state & ThreadState::Mask); |
| 800 | thread_state.store(new_state, std::memory_order_relaxed); | 796 | m_thread_state.store(new_state, std::memory_order_relaxed); |
| 801 | 797 | ||
| 802 | // Note the state change in scheduler. | 798 | // Note the state change in scheduler. |
| 803 | if (new_state != old_state) { | 799 | if (new_state != old_state) { |
| @@ -806,11 +802,11 @@ void KThread::UpdateState() { | |||
| 806 | } | 802 | } |
| 807 | 803 | ||
| 808 | void KThread::Continue() { | 804 | void KThread::Continue() { |
| 809 | ASSERT(m_kernel.GlobalSchedulerContext().IsLocked()); | 805 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
| 810 | 806 | ||
| 811 | // Clear our suspend flags in state. | 807 | // Clear our suspend flags in state. |
| 812 | const ThreadState old_state = thread_state.load(std::memory_order_relaxed); | 808 | const ThreadState old_state = m_thread_state.load(std::memory_order_relaxed); |
| 813 | thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed); | 809 | m_thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed); |
| 814 | 810 | ||
| 815 | // Note the state change in scheduler. | 811 | // Note the state change in scheduler. |
| 816 | KScheduler::OnThreadStateChanged(m_kernel, this, old_state); | 812 | KScheduler::OnThreadStateChanged(m_kernel, this, old_state); |
| @@ -839,7 +835,7 @@ void KThread::CloneFpuStatus() { | |||
| 839 | 835 | ||
| 840 | Result KThread::SetActivity(Svc::ThreadActivity activity) { | 836 | Result KThread::SetActivity(Svc::ThreadActivity activity) { |
| 841 | // Lock ourselves. | 837 | // Lock ourselves. |
| 842 | KScopedLightLock lk(activity_pause_lock); | 838 | KScopedLightLock lk(m_activity_pause_lock); |
| 843 | 839 | ||
| 844 | // Set the activity. | 840 | // Set the activity. |
| 845 | { | 841 | { |
| @@ -871,10 +867,10 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) { | |||
| 871 | 867 | ||
| 872 | // If the thread is now paused, update the pinned waiter list. | 868 | // If the thread is now paused, update the pinned waiter list. |
| 873 | if (activity == Svc::ThreadActivity::Paused) { | 869 | if (activity == Svc::ThreadActivity::Paused) { |
| 874 | ThreadQueueImplForKThreadSetProperty wait_queue_(m_kernel, | 870 | ThreadQueueImplForKThreadSetProperty wait_queue(m_kernel, |
| 875 | std::addressof(pinned_waiter_list)); | 871 | std::addressof(m_pinned_waiter_list)); |
| 876 | 872 | ||
| 877 | bool thread_is_current; | 873 | bool thread_is_current{}; |
| 878 | do { | 874 | do { |
| 879 | // Lock the scheduler. | 875 | // Lock the scheduler. |
| 880 | KScopedSchedulerLock sl(m_kernel); | 876 | KScopedSchedulerLock sl(m_kernel); |
| @@ -892,8 +888,8 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) { | |||
| 892 | ResultTerminationRequested); | 888 | ResultTerminationRequested); |
| 893 | 889 | ||
| 894 | // Wait until the thread isn't pinned any more. | 890 | // Wait until the thread isn't pinned any more. |
| 895 | pinned_waiter_list.push_back(GetCurrentThread(m_kernel)); | 891 | m_pinned_waiter_list.push_back(GetCurrentThread(m_kernel)); |
| 896 | GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue_)); | 892 | GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue)); |
| 897 | } else { | 893 | } else { |
| 898 | // Check if the thread is currently running. | 894 | // Check if the thread is currently running. |
| 899 | // If it is, we'll need to retry. | 895 | // If it is, we'll need to retry. |
| @@ -912,7 +908,7 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) { | |||
| 912 | 908 | ||
| 913 | Result KThread::GetThreadContext3(std::vector<u8>& out) { | 909 | Result KThread::GetThreadContext3(std::vector<u8>& out) { |
| 914 | // Lock ourselves. | 910 | // Lock ourselves. |
| 915 | KScopedLightLock lk{activity_pause_lock}; | 911 | KScopedLightLock lk{m_activity_pause_lock}; |
| 916 | 912 | ||
| 917 | // Get the context. | 913 | // Get the context. |
| 918 | { | 914 | { |
| @@ -923,8 +919,8 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) { | |||
| 923 | R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState); | 919 | R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState); |
| 924 | 920 | ||
| 925 | // If we're not terminating, get the thread's user context. | 921 | // If we're not terminating, get the thread's user context. |
| 926 | if (!IsTerminationRequested()) { | 922 | if (!this->IsTerminationRequested()) { |
| 927 | if (parent->Is64BitProcess()) { | 923 | if (m_parent->Is64BitProcess()) { |
| 928 | // Mask away mode bits, interrupt bits, IL bit, and other reserved bits. | 924 | // Mask away mode bits, interrupt bits, IL bit, and other reserved bits. |
| 929 | auto context = GetContext64(); | 925 | auto context = GetContext64(); |
| 930 | context.pstate &= 0xFF0FFE20; | 926 | context.pstate &= 0xFF0FFE20; |
| @@ -952,7 +948,7 @@ void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) { | |||
| 952 | lock_info->SetOwner(this); | 948 | lock_info->SetOwner(this); |
| 953 | 949 | ||
| 954 | // Add the lock to our held list. | 950 | // Add the lock to our held list. |
| 955 | held_lock_info_list.push_front(*lock_info); | 951 | m_held_lock_info_list.push_front(*lock_info); |
| 956 | } | 952 | } |
| 957 | 953 | ||
| 958 | KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key_, | 954 | KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key_, |
| @@ -960,7 +956,7 @@ KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_ke | |||
| 960 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); | 956 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
| 961 | 957 | ||
| 962 | // Try to find an existing held lock. | 958 | // Try to find an existing held lock. |
| 963 | for (auto& held_lock : held_lock_info_list) { | 959 | for (auto& held_lock : m_held_lock_info_list) { |
| 964 | if (held_lock.GetAddressKey() == address_key_ && | 960 | if (held_lock.GetAddressKey() == address_key_ && |
| 965 | held_lock.GetIsKernelAddressKey() == is_kernel_address_key_) { | 961 | held_lock.GetIsKernelAddressKey() == is_kernel_address_key_) { |
| 966 | return std::addressof(held_lock); | 962 | return std::addressof(held_lock); |
| @@ -975,21 +971,21 @@ void KThread::AddWaiterImpl(KThread* thread) { | |||
| 975 | ASSERT(thread->GetConditionVariableTree() == nullptr); | 971 | ASSERT(thread->GetConditionVariableTree() == nullptr); |
| 976 | 972 | ||
| 977 | // Get the thread's address key. | 973 | // Get the thread's address key. |
| 978 | const auto address_key_ = thread->GetAddressKey(); | 974 | const auto address_key = thread->GetAddressKey(); |
| 979 | const auto is_kernel_address_key_ = thread->GetIsKernelAddressKey(); | 975 | const auto is_kernel_address_key = thread->GetIsKernelAddressKey(); |
| 980 | 976 | ||
| 981 | // Keep track of how many kernel waiters we have. | 977 | // Keep track of how many kernel waiters we have. |
| 982 | if (is_kernel_address_key_) { | 978 | if (is_kernel_address_key) { |
| 983 | ASSERT((num_kernel_waiters++) >= 0); | 979 | ASSERT((m_num_kernel_waiters++) >= 0); |
| 984 | KScheduler::SetSchedulerUpdateNeeded(m_kernel); | 980 | KScheduler::SetSchedulerUpdateNeeded(m_kernel); |
| 985 | } | 981 | } |
| 986 | 982 | ||
| 987 | // Get the relevant lock info. | 983 | // Get the relevant lock info. |
| 988 | auto* lock_info = this->FindHeldLock(address_key_, is_kernel_address_key_); | 984 | auto* lock_info = this->FindHeldLock(address_key, is_kernel_address_key); |
| 989 | if (lock_info == nullptr) { | 985 | if (lock_info == nullptr) { |
| 990 | // Create a new lock for the address key. | 986 | // Create a new lock for the address key. |
| 991 | lock_info = | 987 | lock_info = |
| 992 | LockWithPriorityInheritanceInfo::Create(m_kernel, address_key_, is_kernel_address_key_); | 988 | LockWithPriorityInheritanceInfo::Create(m_kernel, address_key, is_kernel_address_key); |
| 993 | 989 | ||
| 994 | // Add the new lock to our list. | 990 | // Add the new lock to our list. |
| 995 | this->AddHeldLock(lock_info); | 991 | this->AddHeldLock(lock_info); |
| @@ -1004,7 +1000,7 @@ void KThread::RemoveWaiterImpl(KThread* thread) { | |||
| 1004 | 1000 | ||
| 1005 | // Keep track of how many kernel waiters we have. | 1001 | // Keep track of how many kernel waiters we have. |
| 1006 | if (thread->GetIsKernelAddressKey()) { | 1002 | if (thread->GetIsKernelAddressKey()) { |
| 1007 | ASSERT((num_kernel_waiters--) > 0); | 1003 | ASSERT((m_num_kernel_waiters--) > 0); |
| 1008 | KScheduler::SetSchedulerUpdateNeeded(m_kernel); | 1004 | KScheduler::SetSchedulerUpdateNeeded(m_kernel); |
| 1009 | } | 1005 | } |
| 1010 | 1006 | ||
| @@ -1014,7 +1010,7 @@ void KThread::RemoveWaiterImpl(KThread* thread) { | |||
| 1014 | 1010 | ||
| 1015 | // Remove the waiter. | 1011 | // Remove the waiter. |
| 1016 | if (lock_info->RemoveWaiter(thread)) { | 1012 | if (lock_info->RemoveWaiter(thread)) { |
| 1017 | held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info)); | 1013 | m_held_lock_info_list.erase(m_held_lock_info_list.iterator_to(*lock_info)); |
| 1018 | LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info); | 1014 | LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info); |
| 1019 | } | 1015 | } |
| 1020 | } | 1016 | } |
| @@ -1025,7 +1021,7 @@ void KThread::RestorePriority(KernelCore& kernel, KThread* thread) { | |||
| 1025 | while (thread != nullptr) { | 1021 | while (thread != nullptr) { |
| 1026 | // We want to inherit priority where possible. | 1022 | // We want to inherit priority where possible. |
| 1027 | s32 new_priority = thread->GetBasePriority(); | 1023 | s32 new_priority = thread->GetBasePriority(); |
| 1028 | for (const auto& held_lock : thread->held_lock_info_list) { | 1024 | for (const auto& held_lock : thread->m_held_lock_info_list) { |
| 1029 | new_priority = | 1025 | new_priority = |
| 1030 | std::min(new_priority, held_lock.GetHighestPriorityWaiter()->GetPriority()); | 1026 | std::min(new_priority, held_lock.GetHighestPriorityWaiter()->GetPriority()); |
| 1031 | } | 1027 | } |
| @@ -1102,12 +1098,12 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke | |||
| 1102 | } | 1098 | } |
| 1103 | 1099 | ||
| 1104 | // Remove the lock info from our held list. | 1100 | // Remove the lock info from our held list. |
| 1105 | held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info)); | 1101 | m_held_lock_info_list.erase(m_held_lock_info_list.iterator_to(*lock_info)); |
| 1106 | 1102 | ||
| 1107 | // Keep track of how many kernel waiters we have. | 1103 | // Keep track of how many kernel waiters we have. |
| 1108 | if (lock_info->GetIsKernelAddressKey()) { | 1104 | if (lock_info->GetIsKernelAddressKey()) { |
| 1109 | num_kernel_waiters -= lock_info->GetWaiterCount(); | 1105 | m_num_kernel_waiters -= lock_info->GetWaiterCount(); |
| 1110 | ASSERT(num_kernel_waiters >= 0); | 1106 | ASSERT(m_num_kernel_waiters >= 0); |
| 1111 | KScheduler::SetSchedulerUpdateNeeded(m_kernel); | 1107 | KScheduler::SetSchedulerUpdateNeeded(m_kernel); |
| 1112 | } | 1108 | } |
| 1113 | 1109 | ||
| @@ -1130,8 +1126,8 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke | |||
| 1130 | 1126 | ||
| 1131 | // Keep track of any kernel waiters for the new owner. | 1127 | // Keep track of any kernel waiters for the new owner. |
| 1132 | if (lock_info->GetIsKernelAddressKey()) { | 1128 | if (lock_info->GetIsKernelAddressKey()) { |
| 1133 | next_lock_owner->num_kernel_waiters += lock_info->GetWaiterCount(); | 1129 | next_lock_owner->m_num_kernel_waiters += lock_info->GetWaiterCount(); |
| 1134 | ASSERT(next_lock_owner->num_kernel_waiters > 0); | 1130 | ASSERT(next_lock_owner->m_num_kernel_waiters > 0); |
| 1135 | 1131 | ||
| 1136 | // NOTE: No need to set scheduler update needed, because we will have already done so | 1132 | // NOTE: No need to set scheduler update needed, because we will have already done so |
| 1137 | // when removing earlier. | 1133 | // when removing earlier. |
| @@ -1156,11 +1152,11 @@ Result KThread::Run() { | |||
| 1156 | KScopedSchedulerLock lk{m_kernel}; | 1152 | KScopedSchedulerLock lk{m_kernel}; |
| 1157 | 1153 | ||
| 1158 | // If either this thread or the current thread are requesting termination, note it. | 1154 | // If either this thread or the current thread are requesting termination, note it. |
| 1159 | R_UNLESS(!IsTerminationRequested(), ResultTerminationRequested); | 1155 | R_UNLESS(!this->IsTerminationRequested(), ResultTerminationRequested); |
| 1160 | R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested); | 1156 | R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested); |
| 1161 | 1157 | ||
| 1162 | // Ensure our thread state is correct. | 1158 | // Ensure our thread state is correct. |
| 1163 | R_UNLESS(GetState() == ThreadState::Initialized, ResultInvalidState); | 1159 | R_UNLESS(this->GetState() == ThreadState::Initialized, ResultInvalidState); |
| 1164 | 1160 | ||
| 1165 | // If the current thread has been asked to suspend, suspend it and retry. | 1161 | // If the current thread has been asked to suspend, suspend it and retry. |
| 1166 | if (GetCurrentThread(m_kernel).IsSuspended()) { | 1162 | if (GetCurrentThread(m_kernel).IsSuspended()) { |
| @@ -1177,7 +1173,7 @@ Result KThread::Run() { | |||
| 1177 | } | 1173 | } |
| 1178 | 1174 | ||
| 1179 | // Set our state and finish. | 1175 | // Set our state and finish. |
| 1180 | SetState(ThreadState::Runnable); | 1176 | this->SetState(ThreadState::Runnable); |
| 1181 | 1177 | ||
| 1182 | R_SUCCEED(); | 1178 | R_SUCCEED(); |
| 1183 | } | 1179 | } |
| @@ -1187,10 +1183,10 @@ void KThread::Exit() { | |||
| 1187 | ASSERT(this == GetCurrentThreadPointer(m_kernel)); | 1183 | ASSERT(this == GetCurrentThreadPointer(m_kernel)); |
| 1188 | 1184 | ||
| 1189 | // Release the thread resource hint, running thread count from parent. | 1185 | // Release the thread resource hint, running thread count from parent. |
| 1190 | if (parent != nullptr) { | 1186 | if (m_parent != nullptr) { |
| 1191 | parent->GetResourceLimit()->Release(Kernel::LimitableResource::ThreadCountMax, 0, 1); | 1187 | m_parent->GetResourceLimit()->Release(Kernel::LimitableResource::ThreadCountMax, 0, 1); |
| 1192 | resource_limit_release_hint = true; | 1188 | m_resource_limit_release_hint = true; |
| 1193 | parent->DecrementRunningThreadCount(); | 1189 | m_parent->DecrementRunningThreadCount(); |
| 1194 | } | 1190 | } |
| 1195 | 1191 | ||
| 1196 | // Perform termination. | 1192 | // Perform termination. |
| @@ -1198,11 +1194,11 @@ void KThread::Exit() { | |||
| 1198 | KScopedSchedulerLock sl{m_kernel}; | 1194 | KScopedSchedulerLock sl{m_kernel}; |
| 1199 | 1195 | ||
| 1200 | // Disallow all suspension. | 1196 | // Disallow all suspension. |
| 1201 | suspend_allowed_flags = 0; | 1197 | m_suspend_allowed_flags = 0; |
| 1202 | this->UpdateState(); | 1198 | this->UpdateState(); |
| 1203 | 1199 | ||
| 1204 | // Disallow all suspension. | 1200 | // Disallow all suspension. |
| 1205 | suspend_allowed_flags = 0; | 1201 | m_suspend_allowed_flags = 0; |
| 1206 | 1202 | ||
| 1207 | // Start termination. | 1203 | // Start termination. |
| 1208 | StartTermination(); | 1204 | StartTermination(); |
| @@ -1238,14 +1234,14 @@ ThreadState KThread::RequestTerminate() { | |||
| 1238 | const bool first_request = [&]() -> bool { | 1234 | const bool first_request = [&]() -> bool { |
| 1239 | // Perform an atomic compare-and-swap from false to true. | 1235 | // Perform an atomic compare-and-swap from false to true. |
| 1240 | bool expected = false; | 1236 | bool expected = false; |
| 1241 | return termination_requested.compare_exchange_strong(expected, true); | 1237 | return m_termination_requested.compare_exchange_strong(expected, true); |
| 1242 | }(); | 1238 | }(); |
| 1243 | 1239 | ||
| 1244 | // If this is the first request, start termination procedure. | 1240 | // If this is the first request, start termination procedure. |
| 1245 | if (first_request) { | 1241 | if (first_request) { |
| 1246 | // If the thread is in initialized state, just change state to terminated. | 1242 | // If the thread is in initialized state, just change state to terminated. |
| 1247 | if (this->GetState() == ThreadState::Initialized) { | 1243 | if (this->GetState() == ThreadState::Initialized) { |
| 1248 | thread_state = ThreadState::Terminated; | 1244 | m_thread_state = ThreadState::Terminated; |
| 1249 | return ThreadState::Terminated; | 1245 | return ThreadState::Terminated; |
| 1250 | } | 1246 | } |
| 1251 | 1247 | ||
| @@ -1259,7 +1255,7 @@ ThreadState KThread::RequestTerminate() { | |||
| 1259 | 1255 | ||
| 1260 | // If the thread is suspended, continue it. | 1256 | // If the thread is suspended, continue it. |
| 1261 | if (this->IsSuspended()) { | 1257 | if (this->IsSuspended()) { |
| 1262 | suspend_allowed_flags = 0; | 1258 | m_suspend_allowed_flags = 0; |
| 1263 | this->UpdateState(); | 1259 | this->UpdateState(); |
| 1264 | } | 1260 | } |
| 1265 | 1261 | ||
| @@ -1268,7 +1264,7 @@ ThreadState KThread::RequestTerminate() { | |||
| 1268 | 1264 | ||
| 1269 | // If the thread is runnable, send a termination interrupt to other cores. | 1265 | // If the thread is runnable, send a termination interrupt to other cores. |
| 1270 | if (this->GetState() == ThreadState::Runnable) { | 1266 | if (this->GetState() == ThreadState::Runnable) { |
| 1271 | if (const u64 core_mask = physical_affinity_mask.GetAffinityMask() & | 1267 | if (const u64 core_mask = m_physical_affinity_mask.GetAffinityMask() & |
| 1272 | ~(1ULL << GetCurrentCoreId(m_kernel)); | 1268 | ~(1ULL << GetCurrentCoreId(m_kernel)); |
| 1273 | core_mask != 0) { | 1269 | core_mask != 0) { |
| 1274 | Kernel::KInterruptManager::SendInterProcessorInterrupt(m_kernel, core_mask); | 1270 | Kernel::KInterruptManager::SendInterProcessorInterrupt(m_kernel, core_mask); |
| @@ -1277,7 +1273,7 @@ ThreadState KThread::RequestTerminate() { | |||
| 1277 | 1273 | ||
| 1278 | // Wake up the thread. | 1274 | // Wake up the thread. |
| 1279 | if (this->GetState() == ThreadState::Waiting) { | 1275 | if (this->GetState() == ThreadState::Waiting) { |
| 1280 | wait_queue->CancelWait(this, ResultTerminationRequested, true); | 1276 | m_wait_queue->CancelWait(this, ResultTerminationRequested, true); |
| 1281 | } | 1277 | } |
| 1282 | } | 1278 | } |
| 1283 | 1279 | ||
| @@ -1285,7 +1281,7 @@ ThreadState KThread::RequestTerminate() { | |||
| 1285 | } | 1281 | } |
| 1286 | 1282 | ||
| 1287 | Result KThread::Sleep(s64 timeout) { | 1283 | Result KThread::Sleep(s64 timeout) { |
| 1288 | ASSERT(!m_kernel.GlobalSchedulerContext().IsLocked()); | 1284 | ASSERT(!KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); |
| 1289 | ASSERT(this == GetCurrentThreadPointer(m_kernel)); | 1285 | ASSERT(this == GetCurrentThreadPointer(m_kernel)); |
| 1290 | ASSERT(timeout > 0); | 1286 | ASSERT(timeout > 0); |
| 1291 | 1287 | ||
| @@ -1315,7 +1311,7 @@ void KThread::RequestDummyThreadWait() { | |||
| 1315 | ASSERT(this->IsDummyThread()); | 1311 | ASSERT(this->IsDummyThread()); |
| 1316 | 1312 | ||
| 1317 | // We will block when the scheduler lock is released. | 1313 | // We will block when the scheduler lock is released. |
| 1318 | dummy_thread_runnable.store(false); | 1314 | m_dummy_thread_runnable.store(false); |
| 1319 | } | 1315 | } |
| 1320 | 1316 | ||
| 1321 | void KThread::DummyThreadBeginWait() { | 1317 | void KThread::DummyThreadBeginWait() { |
| @@ -1325,7 +1321,7 @@ void KThread::DummyThreadBeginWait() { | |||
| 1325 | } | 1321 | } |
| 1326 | 1322 | ||
| 1327 | // Block until runnable is no longer false. | 1323 | // Block until runnable is no longer false. |
| 1328 | dummy_thread_runnable.wait(false); | 1324 | m_dummy_thread_runnable.wait(false); |
| 1329 | } | 1325 | } |
| 1330 | 1326 | ||
| 1331 | void KThread::DummyThreadEndWait() { | 1327 | void KThread::DummyThreadEndWait() { |
| @@ -1333,8 +1329,8 @@ void KThread::DummyThreadEndWait() { | |||
| 1333 | ASSERT(this->IsDummyThread()); | 1329 | ASSERT(this->IsDummyThread()); |
| 1334 | 1330 | ||
| 1335 | // Wake up the waiting thread. | 1331 | // Wake up the waiting thread. |
| 1336 | dummy_thread_runnable.store(true); | 1332 | m_dummy_thread_runnable.store(true); |
| 1337 | dummy_thread_runnable.notify_one(); | 1333 | m_dummy_thread_runnable.notify_one(); |
| 1338 | } | 1334 | } |
| 1339 | 1335 | ||
| 1340 | void KThread::BeginWait(KThreadQueue* queue) { | 1336 | void KThread::BeginWait(KThreadQueue* queue) { |
| @@ -1342,42 +1338,42 @@ void KThread::BeginWait(KThreadQueue* queue) { | |||
| 1342 | SetState(ThreadState::Waiting); | 1338 | SetState(ThreadState::Waiting); |
| 1343 | 1339 | ||
| 1344 | // Set our wait queue. | 1340 | // Set our wait queue. |
| 1345 | wait_queue = queue; | 1341 | m_wait_queue = queue; |
| 1346 | } | 1342 | } |
| 1347 | 1343 | ||
| 1348 | void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_) { | 1344 | void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result) { |
| 1349 | // Lock the scheduler. | 1345 | // Lock the scheduler. |
| 1350 | KScopedSchedulerLock sl(m_kernel); | 1346 | KScopedSchedulerLock sl(m_kernel); |
| 1351 | 1347 | ||
| 1352 | // If we're waiting, notify our queue that we're available. | 1348 | // If we're waiting, notify our queue that we're available. |
| 1353 | if (GetState() == ThreadState::Waiting) { | 1349 | if (this->GetState() == ThreadState::Waiting) { |
| 1354 | wait_queue->NotifyAvailable(this, signaled_object, wait_result_); | 1350 | m_wait_queue->NotifyAvailable(this, signaled_object, wait_result); |
| 1355 | } | 1351 | } |
| 1356 | } | 1352 | } |
| 1357 | 1353 | ||
| 1358 | void KThread::EndWait(Result wait_result_) { | 1354 | void KThread::EndWait(Result wait_result) { |
| 1359 | // Lock the scheduler. | 1355 | // Lock the scheduler. |
| 1360 | KScopedSchedulerLock sl(m_kernel); | 1356 | KScopedSchedulerLock sl(m_kernel); |
| 1361 | 1357 | ||
| 1362 | // If we're waiting, notify our queue that we're available. | 1358 | // If we're waiting, notify our queue that we're available. |
| 1363 | if (GetState() == ThreadState::Waiting) { | 1359 | if (this->GetState() == ThreadState::Waiting) { |
| 1364 | if (wait_queue == nullptr) { | 1360 | if (m_wait_queue == nullptr) { |
| 1365 | // This should never happen, but avoid a hard crash below to get this logged. | 1361 | // This should never happen, but avoid a hard crash below to get this logged. |
| 1366 | ASSERT_MSG(false, "wait_queue is nullptr!"); | 1362 | ASSERT_MSG(false, "wait_queue is nullptr!"); |
| 1367 | return; | 1363 | return; |
| 1368 | } | 1364 | } |
| 1369 | 1365 | ||
| 1370 | wait_queue->EndWait(this, wait_result_); | 1366 | m_wait_queue->EndWait(this, wait_result); |
| 1371 | } | 1367 | } |
| 1372 | } | 1368 | } |
| 1373 | 1369 | ||
| 1374 | void KThread::CancelWait(Result wait_result_, bool cancel_timer_task) { | 1370 | void KThread::CancelWait(Result wait_result, bool cancel_timer_task) { |
| 1375 | // Lock the scheduler. | 1371 | // Lock the scheduler. |
| 1376 | KScopedSchedulerLock sl(m_kernel); | 1372 | KScopedSchedulerLock sl(m_kernel); |
| 1377 | 1373 | ||
| 1378 | // If we're waiting, notify our queue that we're available. | 1374 | // If we're waiting, notify our queue that we're available. |
| 1379 | if (GetState() == ThreadState::Waiting) { | 1375 | if (this->GetState() == ThreadState::Waiting) { |
| 1380 | wait_queue->CancelWait(this, wait_result_, cancel_timer_task); | 1376 | m_wait_queue->CancelWait(this, wait_result, cancel_timer_task); |
| 1381 | } | 1377 | } |
| 1382 | } | 1378 | } |
| 1383 | 1379 | ||
| @@ -1385,20 +1381,19 @@ void KThread::SetState(ThreadState state) { | |||
| 1385 | KScopedSchedulerLock sl{m_kernel}; | 1381 | KScopedSchedulerLock sl{m_kernel}; |
| 1386 | 1382 | ||
| 1387 | // Clear debugging state | 1383 | // Clear debugging state |
| 1388 | SetMutexWaitAddressForDebugging({}); | ||
| 1389 | SetWaitReasonForDebugging({}); | 1384 | SetWaitReasonForDebugging({}); |
| 1390 | 1385 | ||
| 1391 | const ThreadState old_state = thread_state.load(std::memory_order_relaxed); | 1386 | const ThreadState old_state = m_thread_state.load(std::memory_order_relaxed); |
| 1392 | thread_state.store( | 1387 | m_thread_state.store( |
| 1393 | static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)), | 1388 | static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)), |
| 1394 | std::memory_order_relaxed); | 1389 | std::memory_order_relaxed); |
| 1395 | if (thread_state.load(std::memory_order_relaxed) != old_state) { | 1390 | if (m_thread_state.load(std::memory_order_relaxed) != old_state) { |
| 1396 | KScheduler::OnThreadStateChanged(m_kernel, this, old_state); | 1391 | KScheduler::OnThreadStateChanged(m_kernel, this, old_state); |
| 1397 | } | 1392 | } |
| 1398 | } | 1393 | } |
| 1399 | 1394 | ||
| 1400 | std::shared_ptr<Common::Fiber>& KThread::GetHostContext() { | 1395 | std::shared_ptr<Common::Fiber>& KThread::GetHostContext() { |
| 1401 | return host_context; | 1396 | return m_host_context; |
| 1402 | } | 1397 | } |
| 1403 | 1398 | ||
| 1404 | void SetCurrentThread(KernelCore& kernel, KThread* thread) { | 1399 | void SetCurrentThread(KernelCore& kernel, KThread* thread) { |
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index e541ea079..53fa64369 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h | |||
| @@ -108,11 +108,11 @@ enum class StepState : u32 { | |||
| 108 | }; | 108 | }; |
| 109 | 109 | ||
| 110 | void SetCurrentThread(KernelCore& kernel, KThread* thread); | 110 | void SetCurrentThread(KernelCore& kernel, KThread* thread); |
| 111 | [[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel); | 111 | KThread* GetCurrentThreadPointer(KernelCore& kernel); |
| 112 | [[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel); | 112 | KThread& GetCurrentThread(KernelCore& kernel); |
| 113 | [[nodiscard]] KProcess* GetCurrentProcessPointer(KernelCore& kernel); | 113 | KProcess* GetCurrentProcessPointer(KernelCore& kernel); |
| 114 | [[nodiscard]] KProcess& GetCurrentProcess(KernelCore& kernel); | 114 | KProcess& GetCurrentProcess(KernelCore& kernel); |
| 115 | [[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel); | 115 | s32 GetCurrentCoreId(KernelCore& kernel); |
| 116 | 116 | ||
| 117 | class KThread final : public KAutoObjectWithSlabHeapAndContainer<KThread, KWorkerTask>, | 117 | class KThread final : public KAutoObjectWithSlabHeapAndContainer<KThread, KWorkerTask>, |
| 118 | public boost::intrusive::list_base_hook<>, | 118 | public boost::intrusive::list_base_hook<>, |
| @@ -136,16 +136,12 @@ public: | |||
| 136 | using ThreadContext64 = Core::ARM_Interface::ThreadContext64; | 136 | using ThreadContext64 = Core::ARM_Interface::ThreadContext64; |
| 137 | using WaiterList = boost::intrusive::list<KThread>; | 137 | using WaiterList = boost::intrusive::list<KThread>; |
| 138 | 138 | ||
| 139 | void SetName(std::string new_name) { | ||
| 140 | name = std::move(new_name); | ||
| 141 | } | ||
| 142 | |||
| 143 | /** | 139 | /** |
| 144 | * Gets the thread's current priority | 140 | * Gets the thread's current priority |
| 145 | * @return The current thread's priority | 141 | * @return The current thread's priority |
| 146 | */ | 142 | */ |
| 147 | [[nodiscard]] s32 GetPriority() const { | 143 | s32 GetPriority() const { |
| 148 | return priority; | 144 | return m_priority; |
| 149 | } | 145 | } |
| 150 | 146 | ||
| 151 | /** | 147 | /** |
| @@ -153,23 +149,23 @@ public: | |||
| 153 | * @param priority The new priority. | 149 | * @param priority The new priority. |
| 154 | */ | 150 | */ |
| 155 | void SetPriority(s32 value) { | 151 | void SetPriority(s32 value) { |
| 156 | priority = value; | 152 | m_priority = value; |
| 157 | } | 153 | } |
| 158 | 154 | ||
| 159 | /** | 155 | /** |
| 160 | * Gets the thread's nominal priority. | 156 | * Gets the thread's nominal priority. |
| 161 | * @return The current thread's nominal priority. | 157 | * @return The current thread's nominal priority. |
| 162 | */ | 158 | */ |
| 163 | [[nodiscard]] s32 GetBasePriority() const { | 159 | s32 GetBasePriority() const { |
| 164 | return base_priority; | 160 | return m_base_priority; |
| 165 | } | 161 | } |
| 166 | 162 | ||
| 167 | /** | 163 | /** |
| 168 | * Gets the thread's thread ID | 164 | * Gets the thread's thread ID |
| 169 | * @return The thread's ID | 165 | * @return The thread's ID |
| 170 | */ | 166 | */ |
| 171 | [[nodiscard]] u64 GetThreadID() const { | 167 | u64 GetThreadId() const { |
| 172 | return thread_id; | 168 | return m_thread_id; |
| 173 | } | 169 | } |
| 174 | 170 | ||
| 175 | void ContinueIfHasKernelWaiters() { | 171 | void ContinueIfHasKernelWaiters() { |
| @@ -180,7 +176,7 @@ public: | |||
| 180 | 176 | ||
| 181 | void SetBasePriority(s32 value); | 177 | void SetBasePriority(s32 value); |
| 182 | 178 | ||
| 183 | [[nodiscard]] Result Run(); | 179 | Result Run(); |
| 184 | 180 | ||
| 185 | void Exit(); | 181 | void Exit(); |
| 186 | 182 | ||
| @@ -188,22 +184,22 @@ public: | |||
| 188 | 184 | ||
| 189 | ThreadState RequestTerminate(); | 185 | ThreadState RequestTerminate(); |
| 190 | 186 | ||
| 191 | [[nodiscard]] u32 GetSuspendFlags() const { | 187 | u32 GetSuspendFlags() const { |
| 192 | return suspend_allowed_flags & suspend_request_flags; | 188 | return m_suspend_allowed_flags & m_suspend_request_flags; |
| 193 | } | 189 | } |
| 194 | 190 | ||
| 195 | [[nodiscard]] bool IsSuspended() const { | 191 | bool IsSuspended() const { |
| 196 | return GetSuspendFlags() != 0; | 192 | return GetSuspendFlags() != 0; |
| 197 | } | 193 | } |
| 198 | 194 | ||
| 199 | [[nodiscard]] bool IsSuspendRequested(SuspendType type) const { | 195 | bool IsSuspendRequested(SuspendType type) const { |
| 200 | return (suspend_request_flags & | 196 | return (m_suspend_request_flags & |
| 201 | (1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)))) != | 197 | (1U << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)))) != |
| 202 | 0; | 198 | 0; |
| 203 | } | 199 | } |
| 204 | 200 | ||
| 205 | [[nodiscard]] bool IsSuspendRequested() const { | 201 | bool IsSuspendRequested() const { |
| 206 | return suspend_request_flags != 0; | 202 | return m_suspend_request_flags != 0; |
| 207 | } | 203 | } |
| 208 | 204 | ||
| 209 | void RequestSuspend(SuspendType type); | 205 | void RequestSuspend(SuspendType type); |
| @@ -217,124 +213,124 @@ public: | |||
| 217 | void Continue(); | 213 | void Continue(); |
| 218 | 214 | ||
| 219 | constexpr void SetSyncedIndex(s32 index) { | 215 | constexpr void SetSyncedIndex(s32 index) { |
| 220 | synced_index = index; | 216 | m_synced_index = index; |
| 221 | } | 217 | } |
| 222 | 218 | ||
| 223 | [[nodiscard]] constexpr s32 GetSyncedIndex() const { | 219 | constexpr s32 GetSyncedIndex() const { |
| 224 | return synced_index; | 220 | return m_synced_index; |
| 225 | } | 221 | } |
| 226 | 222 | ||
| 227 | constexpr void SetWaitResult(Result wait_res) { | 223 | constexpr void SetWaitResult(Result wait_res) { |
| 228 | wait_result = wait_res; | 224 | m_wait_result = wait_res; |
| 229 | } | 225 | } |
| 230 | 226 | ||
| 231 | [[nodiscard]] constexpr Result GetWaitResult() const { | 227 | constexpr Result GetWaitResult() const { |
| 232 | return wait_result; | 228 | return m_wait_result; |
| 233 | } | 229 | } |
| 234 | 230 | ||
| 235 | /* | 231 | /* |
| 236 | * Returns the Thread Local Storage address of the current thread | 232 | * Returns the Thread Local Storage address of the current thread |
| 237 | * @returns VAddr of the thread's TLS | 233 | * @returns VAddr of the thread's TLS |
| 238 | */ | 234 | */ |
| 239 | [[nodiscard]] VAddr GetTLSAddress() const { | 235 | VAddr GetTlsAddress() const { |
| 240 | return tls_address; | 236 | return m_tls_address; |
| 241 | } | 237 | } |
| 242 | 238 | ||
| 243 | /* | 239 | /* |
| 244 | * Returns the value of the TPIDR_EL0 Read/Write system register for this thread. | 240 | * Returns the value of the TPIDR_EL0 Read/Write system register for this thread. |
| 245 | * @returns The value of the TPIDR_EL0 register. | 241 | * @returns The value of the TPIDR_EL0 register. |
| 246 | */ | 242 | */ |
| 247 | [[nodiscard]] u64 GetTPIDR_EL0() const { | 243 | u64 GetTpidrEl0() const { |
| 248 | return thread_context_64.tpidr; | 244 | return m_thread_context_64.tpidr; |
| 249 | } | 245 | } |
| 250 | 246 | ||
| 251 | /// Sets the value of the TPIDR_EL0 Read/Write system register for this thread. | 247 | /// Sets the value of the TPIDR_EL0 Read/Write system register for this thread. |
| 252 | void SetTPIDR_EL0(u64 value) { | 248 | void SetTpidrEl0(u64 value) { |
| 253 | thread_context_64.tpidr = value; | 249 | m_thread_context_64.tpidr = value; |
| 254 | thread_context_32.tpidr = static_cast<u32>(value); | 250 | m_thread_context_32.tpidr = static_cast<u32>(value); |
| 255 | } | 251 | } |
| 256 | 252 | ||
| 257 | void CloneFpuStatus(); | 253 | void CloneFpuStatus(); |
| 258 | 254 | ||
| 259 | [[nodiscard]] ThreadContext32& GetContext32() { | 255 | ThreadContext32& GetContext32() { |
| 260 | return thread_context_32; | 256 | return m_thread_context_32; |
| 261 | } | 257 | } |
| 262 | 258 | ||
| 263 | [[nodiscard]] const ThreadContext32& GetContext32() const { | 259 | const ThreadContext32& GetContext32() const { |
| 264 | return thread_context_32; | 260 | return m_thread_context_32; |
| 265 | } | 261 | } |
| 266 | 262 | ||
| 267 | [[nodiscard]] ThreadContext64& GetContext64() { | 263 | ThreadContext64& GetContext64() { |
| 268 | return thread_context_64; | 264 | return m_thread_context_64; |
| 269 | } | 265 | } |
| 270 | 266 | ||
| 271 | [[nodiscard]] const ThreadContext64& GetContext64() const { | 267 | const ThreadContext64& GetContext64() const { |
| 272 | return thread_context_64; | 268 | return m_thread_context_64; |
| 273 | } | 269 | } |
| 274 | 270 | ||
| 275 | [[nodiscard]] std::shared_ptr<Common::Fiber>& GetHostContext(); | 271 | std::shared_ptr<Common::Fiber>& GetHostContext(); |
| 276 | 272 | ||
| 277 | [[nodiscard]] ThreadState GetState() const { | 273 | ThreadState GetState() const { |
| 278 | return thread_state.load(std::memory_order_relaxed) & ThreadState::Mask; | 274 | return m_thread_state.load(std::memory_order_relaxed) & ThreadState::Mask; |
| 279 | } | 275 | } |
| 280 | 276 | ||
| 281 | [[nodiscard]] ThreadState GetRawState() const { | 277 | ThreadState GetRawState() const { |
| 282 | return thread_state.load(std::memory_order_relaxed); | 278 | return m_thread_state.load(std::memory_order_relaxed); |
| 283 | } | 279 | } |
| 284 | 280 | ||
| 285 | void SetState(ThreadState state); | 281 | void SetState(ThreadState state); |
| 286 | 282 | ||
| 287 | [[nodiscard]] StepState GetStepState() const { | 283 | StepState GetStepState() const { |
| 288 | return step_state; | 284 | return m_step_state; |
| 289 | } | 285 | } |
| 290 | 286 | ||
| 291 | void SetStepState(StepState state) { | 287 | void SetStepState(StepState state) { |
| 292 | step_state = state; | 288 | m_step_state = state; |
| 293 | } | 289 | } |
| 294 | 290 | ||
| 295 | [[nodiscard]] s64 GetLastScheduledTick() const { | 291 | s64 GetLastScheduledTick() const { |
| 296 | return last_scheduled_tick; | 292 | return m_last_scheduled_tick; |
| 297 | } | 293 | } |
| 298 | 294 | ||
| 299 | void SetLastScheduledTick(s64 tick) { | 295 | void SetLastScheduledTick(s64 tick) { |
| 300 | last_scheduled_tick = tick; | 296 | m_last_scheduled_tick = tick; |
| 301 | } | 297 | } |
| 302 | 298 | ||
| 303 | void AddCpuTime([[maybe_unused]] s32 core_id_, s64 amount) { | 299 | void AddCpuTime(s32 core_id, s64 amount) { |
| 304 | cpu_time += amount; | 300 | m_cpu_time += amount; |
| 305 | // TODO(bunnei): Debug kernels track per-core tick counts. Should we? | 301 | // TODO(bunnei): Debug kernels track per-core tick counts. Should we? |
| 306 | } | 302 | } |
| 307 | 303 | ||
| 308 | [[nodiscard]] s64 GetCpuTime() const { | 304 | s64 GetCpuTime() const { |
| 309 | return cpu_time; | 305 | return m_cpu_time; |
| 310 | } | 306 | } |
| 311 | 307 | ||
| 312 | [[nodiscard]] s32 GetActiveCore() const { | 308 | s32 GetActiveCore() const { |
| 313 | return core_id; | 309 | return m_core_id; |
| 314 | } | 310 | } |
| 315 | 311 | ||
| 316 | void SetActiveCore(s32 core) { | 312 | void SetActiveCore(s32 core) { |
| 317 | core_id = core; | 313 | m_core_id = core; |
| 318 | } | 314 | } |
| 319 | 315 | ||
| 320 | [[nodiscard]] s32 GetCurrentCore() const { | 316 | s32 GetCurrentCore() const { |
| 321 | return current_core_id; | 317 | return m_current_core_id; |
| 322 | } | 318 | } |
| 323 | 319 | ||
| 324 | void SetCurrentCore(s32 core) { | 320 | void SetCurrentCore(s32 core) { |
| 325 | current_core_id = core; | 321 | m_current_core_id = core; |
| 326 | } | 322 | } |
| 327 | 323 | ||
| 328 | [[nodiscard]] KProcess* GetOwnerProcess() { | 324 | KProcess* GetOwnerProcess() { |
| 329 | return parent; | 325 | return m_parent; |
| 330 | } | 326 | } |
| 331 | 327 | ||
| 332 | [[nodiscard]] const KProcess* GetOwnerProcess() const { | 328 | const KProcess* GetOwnerProcess() const { |
| 333 | return parent; | 329 | return m_parent; |
| 334 | } | 330 | } |
| 335 | 331 | ||
| 336 | [[nodiscard]] bool IsUserThread() const { | 332 | bool IsUserThread() const { |
| 337 | return parent != nullptr; | 333 | return m_parent != nullptr; |
| 338 | } | 334 | } |
| 339 | 335 | ||
| 340 | u16 GetUserDisableCount() const; | 336 | u16 GetUserDisableCount() const; |
| @@ -343,69 +339,69 @@ public: | |||
| 343 | 339 | ||
| 344 | KThread* GetLockOwner() const; | 340 | KThread* GetLockOwner() const; |
| 345 | 341 | ||
| 346 | [[nodiscard]] const KAffinityMask& GetAffinityMask() const { | 342 | const KAffinityMask& GetAffinityMask() const { |
| 347 | return physical_affinity_mask; | 343 | return m_physical_affinity_mask; |
| 348 | } | 344 | } |
| 349 | 345 | ||
| 350 | [[nodiscard]] Result GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask); | 346 | Result GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask); |
| 351 | 347 | ||
| 352 | [[nodiscard]] Result GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask); | 348 | Result GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask); |
| 353 | 349 | ||
| 354 | [[nodiscard]] Result SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask); | 350 | Result SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask); |
| 355 | 351 | ||
| 356 | [[nodiscard]] Result SetActivity(Svc::ThreadActivity activity); | 352 | Result SetActivity(Svc::ThreadActivity activity); |
| 357 | 353 | ||
| 358 | [[nodiscard]] Result Sleep(s64 timeout); | 354 | Result Sleep(s64 timeout); |
| 359 | 355 | ||
| 360 | [[nodiscard]] s64 GetYieldScheduleCount() const { | 356 | s64 GetYieldScheduleCount() const { |
| 361 | return schedule_count; | 357 | return m_schedule_count; |
| 362 | } | 358 | } |
| 363 | 359 | ||
| 364 | void SetYieldScheduleCount(s64 count) { | 360 | void SetYieldScheduleCount(s64 count) { |
| 365 | schedule_count = count; | 361 | m_schedule_count = count; |
| 366 | } | 362 | } |
| 367 | 363 | ||
| 368 | void WaitCancel(); | 364 | void WaitCancel(); |
| 369 | 365 | ||
| 370 | [[nodiscard]] bool IsWaitCancelled() const { | 366 | bool IsWaitCancelled() const { |
| 371 | return wait_cancelled; | 367 | return m_wait_cancelled; |
| 372 | } | 368 | } |
| 373 | 369 | ||
| 374 | void ClearWaitCancelled() { | 370 | void ClearWaitCancelled() { |
| 375 | wait_cancelled = false; | 371 | m_wait_cancelled = false; |
| 376 | } | 372 | } |
| 377 | 373 | ||
| 378 | [[nodiscard]] bool IsCancellable() const { | 374 | bool IsCancellable() const { |
| 379 | return cancellable; | 375 | return m_cancellable; |
| 380 | } | 376 | } |
| 381 | 377 | ||
| 382 | void SetCancellable() { | 378 | void SetCancellable() { |
| 383 | cancellable = true; | 379 | m_cancellable = true; |
| 384 | } | 380 | } |
| 385 | 381 | ||
| 386 | void ClearCancellable() { | 382 | void ClearCancellable() { |
| 387 | cancellable = false; | 383 | m_cancellable = false; |
| 388 | } | 384 | } |
| 389 | 385 | ||
| 390 | [[nodiscard]] bool IsTerminationRequested() const { | 386 | bool IsTerminationRequested() const { |
| 391 | return termination_requested || GetRawState() == ThreadState::Terminated; | 387 | return m_termination_requested || GetRawState() == ThreadState::Terminated; |
| 392 | } | 388 | } |
| 393 | 389 | ||
| 394 | [[nodiscard]] u64 GetId() const override { | 390 | u64 GetId() const override { |
| 395 | return this->GetThreadID(); | 391 | return this->GetThreadId(); |
| 396 | } | 392 | } |
| 397 | 393 | ||
| 398 | [[nodiscard]] bool IsInitialized() const override { | 394 | bool IsInitialized() const override { |
| 399 | return initialized; | 395 | return m_initialized; |
| 400 | } | 396 | } |
| 401 | 397 | ||
| 402 | [[nodiscard]] uintptr_t GetPostDestroyArgument() const override { | 398 | uintptr_t GetPostDestroyArgument() const override { |
| 403 | return reinterpret_cast<uintptr_t>(parent) | (resource_limit_release_hint ? 1 : 0); | 399 | return reinterpret_cast<uintptr_t>(m_parent) | (m_resource_limit_release_hint ? 1 : 0); |
| 404 | } | 400 | } |
| 405 | 401 | ||
| 406 | void Finalize() override; | 402 | void Finalize() override; |
| 407 | 403 | ||
| 408 | [[nodiscard]] bool IsSignaled() const override; | 404 | bool IsSignaled() const override; |
| 409 | 405 | ||
| 410 | void OnTimer(); | 406 | void OnTimer(); |
| 411 | 407 | ||
| @@ -413,26 +409,22 @@ public: | |||
| 413 | 409 | ||
| 414 | static void PostDestroy(uintptr_t arg); | 410 | static void PostDestroy(uintptr_t arg); |
| 415 | 411 | ||
| 416 | [[nodiscard]] static Result InitializeDummyThread(KThread* thread, KProcess* owner); | 412 | static Result InitializeDummyThread(KThread* thread, KProcess* owner); |
| 417 | 413 | ||
| 418 | [[nodiscard]] static Result InitializeMainThread(Core::System& system, KThread* thread, | 414 | static Result InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core); |
| 419 | s32 virt_core); | ||
| 420 | 415 | ||
| 421 | [[nodiscard]] static Result InitializeIdleThread(Core::System& system, KThread* thread, | 416 | static Result InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core); |
| 422 | s32 virt_core); | ||
| 423 | 417 | ||
| 424 | [[nodiscard]] static Result InitializeHighPriorityThread(Core::System& system, KThread* thread, | 418 | static Result InitializeHighPriorityThread(Core::System& system, KThread* thread, |
| 425 | KThreadFunction func, uintptr_t arg, | 419 | KThreadFunction func, uintptr_t arg, s32 virt_core); |
| 426 | s32 virt_core); | ||
| 427 | 420 | ||
| 428 | [[nodiscard]] static Result InitializeUserThread(Core::System& system, KThread* thread, | 421 | static Result InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func, |
| 429 | KThreadFunction func, uintptr_t arg, | 422 | uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core, |
| 430 | VAddr user_stack_top, s32 prio, s32 virt_core, | 423 | KProcess* owner); |
| 431 | KProcess* owner); | ||
| 432 | 424 | ||
| 433 | [[nodiscard]] static Result InitializeServiceThread(Core::System& system, KThread* thread, | 425 | static Result InitializeServiceThread(Core::System& system, KThread* thread, |
| 434 | std::function<void()>&& thread_func, | 426 | std::function<void()>&& thread_func, s32 prio, |
| 435 | s32 prio, s32 virt_core, KProcess* owner); | 427 | s32 virt_core, KProcess* owner); |
| 436 | 428 | ||
| 437 | public: | 429 | public: |
| 438 | struct StackParameters { | 430 | struct StackParameters { |
| @@ -446,12 +438,12 @@ public: | |||
| 446 | KThread* cur_thread; | 438 | KThread* cur_thread; |
| 447 | }; | 439 | }; |
| 448 | 440 | ||
| 449 | [[nodiscard]] StackParameters& GetStackParameters() { | 441 | StackParameters& GetStackParameters() { |
| 450 | return stack_parameters; | 442 | return m_stack_parameters; |
| 451 | } | 443 | } |
| 452 | 444 | ||
| 453 | [[nodiscard]] const StackParameters& GetStackParameters() const { | 445 | const StackParameters& GetStackParameters() const { |
| 454 | return stack_parameters; | 446 | return m_stack_parameters; |
| 455 | } | 447 | } |
| 456 | 448 | ||
| 457 | class QueueEntry { | 449 | class QueueEntry { |
| @@ -459,37 +451,37 @@ public: | |||
| 459 | constexpr QueueEntry() = default; | 451 | constexpr QueueEntry() = default; |
| 460 | 452 | ||
| 461 | constexpr void Initialize() { | 453 | constexpr void Initialize() { |
| 462 | prev = nullptr; | 454 | m_prev = nullptr; |
| 463 | next = nullptr; | 455 | m_next = nullptr; |
| 464 | } | 456 | } |
| 465 | 457 | ||
| 466 | constexpr KThread* GetPrev() const { | 458 | constexpr KThread* GetPrev() const { |
| 467 | return prev; | 459 | return m_prev; |
| 468 | } | 460 | } |
| 469 | constexpr KThread* GetNext() const { | 461 | constexpr KThread* GetNext() const { |
| 470 | return next; | 462 | return m_next; |
| 471 | } | 463 | } |
| 472 | constexpr void SetPrev(KThread* thread) { | 464 | constexpr void SetPrev(KThread* thread) { |
| 473 | prev = thread; | 465 | m_prev = thread; |
| 474 | } | 466 | } |
| 475 | constexpr void SetNext(KThread* thread) { | 467 | constexpr void SetNext(KThread* thread) { |
| 476 | next = thread; | 468 | m_next = thread; |
| 477 | } | 469 | } |
| 478 | 470 | ||
| 479 | private: | 471 | private: |
| 480 | KThread* prev{}; | 472 | KThread* m_prev{}; |
| 481 | KThread* next{}; | 473 | KThread* m_next{}; |
| 482 | }; | 474 | }; |
| 483 | 475 | ||
| 484 | [[nodiscard]] QueueEntry& GetPriorityQueueEntry(s32 core) { | 476 | QueueEntry& GetPriorityQueueEntry(s32 core) { |
| 485 | return per_core_priority_queue_entry[core]; | 477 | return m_per_core_priority_queue_entry[core]; |
| 486 | } | 478 | } |
| 487 | 479 | ||
| 488 | [[nodiscard]] const QueueEntry& GetPriorityQueueEntry(s32 core) const { | 480 | const QueueEntry& GetPriorityQueueEntry(s32 core) const { |
| 489 | return per_core_priority_queue_entry[core]; | 481 | return m_per_core_priority_queue_entry[core]; |
| 490 | } | 482 | } |
| 491 | 483 | ||
| 492 | [[nodiscard]] s32 GetDisableDispatchCount() const { | 484 | s32 GetDisableDispatchCount() const { |
| 493 | return this->GetStackParameters().disable_count; | 485 | return this->GetStackParameters().disable_count; |
| 494 | } | 486 | } |
| 495 | 487 | ||
| @@ -515,7 +507,7 @@ public: | |||
| 515 | this->GetStackParameters().is_in_exception_handler = false; | 507 | this->GetStackParameters().is_in_exception_handler = false; |
| 516 | } | 508 | } |
| 517 | 509 | ||
| 518 | [[nodiscard]] bool IsInExceptionHandler() const { | 510 | bool IsInExceptionHandler() const { |
| 519 | return this->GetStackParameters().is_in_exception_handler; | 511 | return this->GetStackParameters().is_in_exception_handler; |
| 520 | } | 512 | } |
| 521 | 513 | ||
| @@ -527,11 +519,11 @@ public: | |||
| 527 | this->GetStackParameters().is_calling_svc = false; | 519 | this->GetStackParameters().is_calling_svc = false; |
| 528 | } | 520 | } |
| 529 | 521 | ||
| 530 | [[nodiscard]] bool IsCallingSvc() const { | 522 | bool IsCallingSvc() const { |
| 531 | return this->GetStackParameters().is_calling_svc; | 523 | return this->GetStackParameters().is_calling_svc; |
| 532 | } | 524 | } |
| 533 | 525 | ||
| 534 | [[nodiscard]] u8 GetSvcId() const { | 526 | u8 GetSvcId() const { |
| 535 | return this->GetStackParameters().current_svc_id; | 527 | return this->GetStackParameters().current_svc_id; |
| 536 | } | 528 | } |
| 537 | 529 | ||
| @@ -543,78 +535,54 @@ public: | |||
| 543 | this->GetStackParameters().dpc_flags &= ~static_cast<u8>(flag); | 535 | this->GetStackParameters().dpc_flags &= ~static_cast<u8>(flag); |
| 544 | } | 536 | } |
| 545 | 537 | ||
| 546 | [[nodiscard]] u8 GetDpc() const { | 538 | u8 GetDpc() const { |
| 547 | return this->GetStackParameters().dpc_flags; | 539 | return this->GetStackParameters().dpc_flags; |
| 548 | } | 540 | } |
| 549 | 541 | ||
| 550 | [[nodiscard]] bool HasDpc() const { | 542 | bool HasDpc() const { |
| 551 | return this->GetDpc() != 0; | 543 | return this->GetDpc() != 0; |
| 552 | } | 544 | } |
| 553 | 545 | ||
| 554 | void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) { | 546 | void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) { |
| 555 | wait_reason_for_debugging = reason; | 547 | m_wait_reason_for_debugging = reason; |
| 556 | } | ||
| 557 | |||
| 558 | [[nodiscard]] ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const { | ||
| 559 | return wait_reason_for_debugging; | ||
| 560 | } | ||
| 561 | |||
| 562 | [[nodiscard]] ThreadType GetThreadType() const { | ||
| 563 | return thread_type; | ||
| 564 | } | ||
| 565 | |||
| 566 | [[nodiscard]] bool IsDummyThread() const { | ||
| 567 | return GetThreadType() == ThreadType::Dummy; | ||
| 568 | } | ||
| 569 | |||
| 570 | void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) { | ||
| 571 | wait_objects_for_debugging.clear(); | ||
| 572 | wait_objects_for_debugging.reserve(objects.size()); | ||
| 573 | for (const auto& object : objects) { | ||
| 574 | wait_objects_for_debugging.emplace_back(object); | ||
| 575 | } | ||
| 576 | } | 548 | } |
| 577 | 549 | ||
| 578 | [[nodiscard]] const std::vector<KSynchronizationObject*>& GetWaitObjectsForDebugging() const { | 550 | ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const { |
| 579 | return wait_objects_for_debugging; | 551 | return m_wait_reason_for_debugging; |
| 580 | } | 552 | } |
| 581 | 553 | ||
| 582 | void SetMutexWaitAddressForDebugging(VAddr address) { | 554 | ThreadType GetThreadType() const { |
| 583 | mutex_wait_address_for_debugging = address; | 555 | return m_thread_type; |
| 584 | } | 556 | } |
| 585 | 557 | ||
| 586 | [[nodiscard]] VAddr GetMutexWaitAddressForDebugging() const { | 558 | bool IsDummyThread() const { |
| 587 | return mutex_wait_address_for_debugging; | 559 | return this->GetThreadType() == ThreadType::Dummy; |
| 588 | } | ||
| 589 | |||
| 590 | [[nodiscard]] s32 GetIdealCoreForDebugging() const { | ||
| 591 | return virtual_ideal_core_id; | ||
| 592 | } | 560 | } |
| 593 | 561 | ||
| 594 | void AddWaiter(KThread* thread); | 562 | void AddWaiter(KThread* thread); |
| 595 | 563 | ||
| 596 | void RemoveWaiter(KThread* thread); | 564 | void RemoveWaiter(KThread* thread); |
| 597 | 565 | ||
| 598 | [[nodiscard]] Result GetThreadContext3(std::vector<u8>& out); | 566 | Result GetThreadContext3(std::vector<u8>& out); |
| 599 | 567 | ||
| 600 | [[nodiscard]] KThread* RemoveUserWaiterByKey(bool* out_has_waiters, VAddr key) { | 568 | KThread* RemoveUserWaiterByKey(bool* out_has_waiters, VAddr key) { |
| 601 | return this->RemoveWaiterByKey(out_has_waiters, key, false); | 569 | return this->RemoveWaiterByKey(out_has_waiters, key, false); |
| 602 | } | 570 | } |
| 603 | 571 | ||
| 604 | [[nodiscard]] KThread* RemoveKernelWaiterByKey(bool* out_has_waiters, VAddr key) { | 572 | KThread* RemoveKernelWaiterByKey(bool* out_has_waiters, VAddr key) { |
| 605 | return this->RemoveWaiterByKey(out_has_waiters, key, true); | 573 | return this->RemoveWaiterByKey(out_has_waiters, key, true); |
| 606 | } | 574 | } |
| 607 | 575 | ||
| 608 | [[nodiscard]] VAddr GetAddressKey() const { | 576 | VAddr GetAddressKey() const { |
| 609 | return address_key; | 577 | return m_address_key; |
| 610 | } | 578 | } |
| 611 | 579 | ||
| 612 | [[nodiscard]] u32 GetAddressKeyValue() const { | 580 | u32 GetAddressKeyValue() const { |
| 613 | return address_key_value; | 581 | return m_address_key_value; |
| 614 | } | 582 | } |
| 615 | 583 | ||
| 616 | [[nodiscard]] bool GetIsKernelAddressKey() const { | 584 | bool GetIsKernelAddressKey() const { |
| 617 | return is_kernel_address_key; | 585 | return m_is_kernel_address_key; |
| 618 | } | 586 | } |
| 619 | 587 | ||
| 620 | //! NB: intentional deviation from official kernel. | 588 | //! NB: intentional deviation from official kernel. |
| @@ -624,37 +592,37 @@ public: | |||
| 624 | // into things. | 592 | // into things. |
| 625 | 593 | ||
| 626 | void SetUserAddressKey(VAddr key, u32 val) { | 594 | void SetUserAddressKey(VAddr key, u32 val) { |
| 627 | ASSERT(waiting_lock_info == nullptr); | 595 | ASSERT(m_waiting_lock_info == nullptr); |
| 628 | address_key = key; | 596 | m_address_key = key; |
| 629 | address_key_value = val; | 597 | m_address_key_value = val; |
| 630 | is_kernel_address_key = false; | 598 | m_is_kernel_address_key = false; |
| 631 | } | 599 | } |
| 632 | 600 | ||
| 633 | void SetKernelAddressKey(VAddr key) { | 601 | void SetKernelAddressKey(VAddr key) { |
| 634 | ASSERT(waiting_lock_info == nullptr); | 602 | ASSERT(m_waiting_lock_info == nullptr); |
| 635 | address_key = key; | 603 | m_address_key = key; |
| 636 | is_kernel_address_key = true; | 604 | m_is_kernel_address_key = true; |
| 637 | } | 605 | } |
| 638 | 606 | ||
| 639 | void ClearWaitQueue() { | 607 | void ClearWaitQueue() { |
| 640 | wait_queue = nullptr; | 608 | m_wait_queue = nullptr; |
| 641 | } | 609 | } |
| 642 | 610 | ||
| 643 | void BeginWait(KThreadQueue* queue); | 611 | void BeginWait(KThreadQueue* queue); |
| 644 | void NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_); | 612 | void NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result); |
| 645 | void EndWait(Result wait_result_); | 613 | void EndWait(Result wait_result); |
| 646 | void CancelWait(Result wait_result_, bool cancel_timer_task); | 614 | void CancelWait(Result wait_result, bool cancel_timer_task); |
| 647 | 615 | ||
| 648 | [[nodiscard]] s32 GetNumKernelWaiters() const { | 616 | s32 GetNumKernelWaiters() const { |
| 649 | return num_kernel_waiters; | 617 | return m_num_kernel_waiters; |
| 650 | } | 618 | } |
| 651 | 619 | ||
| 652 | [[nodiscard]] u64 GetConditionVariableKey() const { | 620 | u64 GetConditionVariableKey() const { |
| 653 | return condvar_key; | 621 | return m_condvar_key; |
| 654 | } | 622 | } |
| 655 | 623 | ||
| 656 | [[nodiscard]] u64 GetAddressArbiterKey() const { | 624 | u64 GetAddressArbiterKey() const { |
| 657 | return condvar_key; | 625 | return m_condvar_key; |
| 658 | } | 626 | } |
| 659 | 627 | ||
| 660 | // Dummy threads (used for HLE host threads) cannot wait based on the guest scheduler, and | 628 | // Dummy threads (used for HLE host threads) cannot wait based on the guest scheduler, and |
| @@ -665,17 +633,16 @@ public: | |||
| 665 | void DummyThreadBeginWait(); | 633 | void DummyThreadBeginWait(); |
| 666 | void DummyThreadEndWait(); | 634 | void DummyThreadEndWait(); |
| 667 | 635 | ||
| 668 | [[nodiscard]] uintptr_t GetArgument() const { | 636 | uintptr_t GetArgument() const { |
| 669 | return argument; | 637 | return m_argument; |
| 670 | } | 638 | } |
| 671 | 639 | ||
| 672 | [[nodiscard]] VAddr GetUserStackTop() const { | 640 | VAddr GetUserStackTop() const { |
| 673 | return stack_top; | 641 | return m_stack_top; |
| 674 | } | 642 | } |
| 675 | 643 | ||
| 676 | private: | 644 | private: |
| 677 | [[nodiscard]] KThread* RemoveWaiterByKey(bool* out_has_waiters, VAddr key, | 645 | KThread* RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key); |
| 678 | bool is_kernel_address_key); | ||
| 679 | 646 | ||
| 680 | static constexpr size_t PriorityInheritanceCountMax = 10; | 647 | static constexpr size_t PriorityInheritanceCountMax = 10; |
| 681 | union SyncObjectBuffer { | 648 | union SyncObjectBuffer { |
| @@ -692,11 +659,11 @@ private: | |||
| 692 | u64 cv_key{}; | 659 | u64 cv_key{}; |
| 693 | s32 priority{}; | 660 | s32 priority{}; |
| 694 | 661 | ||
| 695 | [[nodiscard]] constexpr u64 GetConditionVariableKey() const { | 662 | constexpr u64 GetConditionVariableKey() const { |
| 696 | return cv_key; | 663 | return cv_key; |
| 697 | } | 664 | } |
| 698 | 665 | ||
| 699 | [[nodiscard]] constexpr s32 GetPriority() const { | 666 | constexpr s32 GetPriority() const { |
| 700 | return priority; | 667 | return priority; |
| 701 | } | 668 | } |
| 702 | }; | 669 | }; |
| @@ -728,22 +695,21 @@ private: | |||
| 728 | 695 | ||
| 729 | void IncreaseBasePriority(s32 priority); | 696 | void IncreaseBasePriority(s32 priority); |
| 730 | 697 | ||
| 731 | [[nodiscard]] Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, | 698 | Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, |
| 732 | s32 prio, s32 virt_core, KProcess* owner, ThreadType type); | 699 | s32 virt_core, KProcess* owner, ThreadType type); |
| 733 | 700 | ||
| 734 | [[nodiscard]] static Result InitializeThread(KThread* thread, KThreadFunction func, | 701 | static Result InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, |
| 735 | uintptr_t arg, VAddr user_stack_top, s32 prio, | 702 | VAddr user_stack_top, s32 prio, s32 core, KProcess* owner, |
| 736 | s32 core, KProcess* owner, ThreadType type, | 703 | ThreadType type, std::function<void()>&& init_func); |
| 737 | std::function<void()>&& init_func); | ||
| 738 | 704 | ||
| 739 | // For core KThread implementation | 705 | // For core KThread implementation |
| 740 | ThreadContext32 thread_context_32{}; | 706 | ThreadContext32 m_thread_context_32{}; |
| 741 | ThreadContext64 thread_context_64{}; | 707 | ThreadContext64 m_thread_context_64{}; |
| 742 | Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{}; | 708 | Common::IntrusiveRedBlackTreeNode m_condvar_arbiter_tree_node{}; |
| 743 | s32 priority{}; | 709 | s32 m_priority{}; |
| 744 | using ConditionVariableThreadTreeTraits = | 710 | using ConditionVariableThreadTreeTraits = |
| 745 | Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert< | 711 | Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert< |
| 746 | &KThread::condvar_arbiter_tree_node>; | 712 | &KThread::m_condvar_arbiter_tree_node>; |
| 747 | using ConditionVariableThreadTree = | 713 | using ConditionVariableThreadTree = |
| 748 | ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>; | 714 | ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>; |
| 749 | 715 | ||
| @@ -773,7 +739,7 @@ private: | |||
| 773 | 739 | ||
| 774 | using LockWithPriorityInheritanceThreadTreeTraits = | 740 | using LockWithPriorityInheritanceThreadTreeTraits = |
| 775 | Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert< | 741 | Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert< |
| 776 | &KThread::condvar_arbiter_tree_node>; | 742 | &KThread::m_condvar_arbiter_tree_node>; |
| 777 | using LockWithPriorityInheritanceThreadTree = | 743 | using LockWithPriorityInheritanceThreadTree = |
| 778 | ConditionVariableThreadTreeTraits::TreeType<LockWithPriorityInheritanceComparator>; | 744 | ConditionVariableThreadTreeTraits::TreeType<LockWithPriorityInheritanceComparator>; |
| 779 | 745 | ||
| @@ -809,7 +775,7 @@ public: | |||
| 809 | waiter->SetWaitingLockInfo(this); | 775 | waiter->SetWaitingLockInfo(this); |
| 810 | } | 776 | } |
| 811 | 777 | ||
| 812 | [[nodiscard]] bool RemoveWaiter(KThread* waiter) { | 778 | bool RemoveWaiter(KThread* waiter) { |
| 813 | m_tree.erase(m_tree.iterator_to(*waiter)); | 779 | m_tree.erase(m_tree.iterator_to(*waiter)); |
| 814 | 780 | ||
| 815 | waiter->SetWaitingLockInfo(nullptr); | 781 | waiter->SetWaitingLockInfo(nullptr); |
| @@ -853,11 +819,11 @@ public: | |||
| 853 | }; | 819 | }; |
| 854 | 820 | ||
| 855 | void SetWaitingLockInfo(LockWithPriorityInheritanceInfo* lock) { | 821 | void SetWaitingLockInfo(LockWithPriorityInheritanceInfo* lock) { |
| 856 | waiting_lock_info = lock; | 822 | m_waiting_lock_info = lock; |
| 857 | } | 823 | } |
| 858 | 824 | ||
| 859 | LockWithPriorityInheritanceInfo* GetWaitingLockInfo() { | 825 | LockWithPriorityInheritanceInfo* GetWaitingLockInfo() { |
| 860 | return waiting_lock_info; | 826 | return m_waiting_lock_info; |
| 861 | } | 827 | } |
| 862 | 828 | ||
| 863 | void AddHeldLock(LockWithPriorityInheritanceInfo* lock_info); | 829 | void AddHeldLock(LockWithPriorityInheritanceInfo* lock_info); |
| @@ -867,110 +833,108 @@ private: | |||
| 867 | using LockWithPriorityInheritanceInfoList = | 833 | using LockWithPriorityInheritanceInfoList = |
| 868 | boost::intrusive::list<LockWithPriorityInheritanceInfo>; | 834 | boost::intrusive::list<LockWithPriorityInheritanceInfo>; |
| 869 | 835 | ||
| 870 | ConditionVariableThreadTree* condvar_tree{}; | 836 | ConditionVariableThreadTree* m_condvar_tree{}; |
| 871 | u64 condvar_key{}; | 837 | u64 m_condvar_key{}; |
| 872 | u64 virtual_affinity_mask{}; | 838 | u64 m_virtual_affinity_mask{}; |
| 873 | KAffinityMask physical_affinity_mask{}; | 839 | KAffinityMask m_physical_affinity_mask{}; |
| 874 | u64 thread_id{}; | 840 | u64 m_thread_id{}; |
| 875 | std::atomic<s64> cpu_time{}; | 841 | std::atomic<s64> m_cpu_time{}; |
| 876 | VAddr address_key{}; | 842 | VAddr m_address_key{}; |
| 877 | KProcess* parent{}; | 843 | KProcess* m_parent{}; |
| 878 | VAddr kernel_stack_top{}; | 844 | VAddr m_kernel_stack_top{}; |
| 879 | u32* light_ipc_data{}; | 845 | u32* m_light_ipc_data{}; |
| 880 | VAddr tls_address{}; | 846 | VAddr m_tls_address{}; |
| 881 | KLightLock activity_pause_lock; | 847 | KLightLock m_activity_pause_lock; |
| 882 | s64 schedule_count{}; | 848 | s64 m_schedule_count{}; |
| 883 | s64 last_scheduled_tick{}; | 849 | s64 m_last_scheduled_tick{}; |
| 884 | std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{}; | 850 | std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> m_per_core_priority_queue_entry{}; |
| 885 | KThreadQueue* wait_queue{}; | 851 | KThreadQueue* m_wait_queue{}; |
| 886 | LockWithPriorityInheritanceInfoList held_lock_info_list{}; | 852 | LockWithPriorityInheritanceInfoList m_held_lock_info_list{}; |
| 887 | LockWithPriorityInheritanceInfo* waiting_lock_info{}; | 853 | LockWithPriorityInheritanceInfo* m_waiting_lock_info{}; |
| 888 | WaiterList pinned_waiter_list{}; | 854 | WaiterList m_pinned_waiter_list{}; |
| 889 | u32 address_key_value{}; | 855 | u32 m_address_key_value{}; |
| 890 | u32 suspend_request_flags{}; | 856 | u32 m_suspend_request_flags{}; |
| 891 | u32 suspend_allowed_flags{}; | 857 | u32 m_suspend_allowed_flags{}; |
| 892 | s32 synced_index{}; | 858 | s32 m_synced_index{}; |
| 893 | Result wait_result{ResultSuccess}; | 859 | Result m_wait_result{ResultSuccess}; |
| 894 | s32 base_priority{}; | 860 | s32 m_base_priority{}; |
| 895 | s32 physical_ideal_core_id{}; | 861 | s32 m_physical_ideal_core_id{}; |
| 896 | s32 virtual_ideal_core_id{}; | 862 | s32 m_virtual_ideal_core_id{}; |
| 897 | s32 num_kernel_waiters{}; | 863 | s32 m_num_kernel_waiters{}; |
| 898 | s32 current_core_id{}; | 864 | s32 m_current_core_id{}; |
| 899 | s32 core_id{}; | 865 | s32 m_core_id{}; |
| 900 | KAffinityMask original_physical_affinity_mask{}; | 866 | KAffinityMask m_original_physical_affinity_mask{}; |
| 901 | s32 original_physical_ideal_core_id{}; | 867 | s32 m_original_physical_ideal_core_id{}; |
| 902 | s32 num_core_migration_disables{}; | 868 | s32 m_num_core_migration_disables{}; |
| 903 | std::atomic<ThreadState> thread_state{}; | 869 | std::atomic<ThreadState> m_thread_state{}; |
| 904 | std::atomic<bool> termination_requested{}; | 870 | std::atomic<bool> m_termination_requested{}; |
| 905 | bool wait_cancelled{}; | 871 | bool m_wait_cancelled{}; |
| 906 | bool cancellable{}; | 872 | bool m_cancellable{}; |
| 907 | bool signaled{}; | 873 | bool m_signaled{}; |
| 908 | bool initialized{}; | 874 | bool m_initialized{}; |
| 909 | bool debug_attached{}; | 875 | bool m_debug_attached{}; |
| 910 | s8 priority_inheritance_count{}; | 876 | s8 m_priority_inheritance_count{}; |
| 911 | bool resource_limit_release_hint{}; | 877 | bool m_resource_limit_release_hint{}; |
| 912 | bool is_kernel_address_key{}; | 878 | bool m_is_kernel_address_key{}; |
| 913 | StackParameters stack_parameters{}; | 879 | StackParameters m_stack_parameters{}; |
| 914 | Common::SpinLock context_guard{}; | 880 | Common::SpinLock m_context_guard{}; |
| 915 | 881 | ||
| 916 | // For emulation | 882 | // For emulation |
| 917 | std::shared_ptr<Common::Fiber> host_context{}; | 883 | std::shared_ptr<Common::Fiber> m_host_context{}; |
| 918 | bool is_single_core{}; | 884 | ThreadType m_thread_type{}; |
| 919 | ThreadType thread_type{}; | 885 | StepState m_step_state{}; |
| 920 | StepState step_state{}; | 886 | std::atomic<bool> m_dummy_thread_runnable{true}; |
| 921 | std::atomic<bool> dummy_thread_runnable{true}; | ||
| 922 | 887 | ||
| 923 | // For debugging | 888 | // For debugging |
| 924 | std::vector<KSynchronizationObject*> wait_objects_for_debugging; | 889 | std::vector<KSynchronizationObject*> m_wait_objects_for_debugging{}; |
| 925 | VAddr mutex_wait_address_for_debugging{}; | 890 | VAddr m_mutex_wait_address_for_debugging{}; |
| 926 | ThreadWaitReasonForDebugging wait_reason_for_debugging{}; | 891 | ThreadWaitReasonForDebugging m_wait_reason_for_debugging{}; |
| 927 | uintptr_t argument{}; | 892 | uintptr_t m_argument{}; |
| 928 | VAddr stack_top{}; | 893 | VAddr m_stack_top{}; |
| 929 | std::string name{}; | ||
| 930 | 894 | ||
| 931 | public: | 895 | public: |
| 932 | using ConditionVariableThreadTreeType = ConditionVariableThreadTree; | 896 | using ConditionVariableThreadTreeType = ConditionVariableThreadTree; |
| 933 | 897 | ||
| 934 | void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key, | 898 | void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key, |
| 935 | u32 value) { | 899 | u32 value) { |
| 936 | ASSERT(waiting_lock_info == nullptr); | 900 | ASSERT(m_waiting_lock_info == nullptr); |
| 937 | condvar_tree = tree; | 901 | m_condvar_tree = tree; |
| 938 | condvar_key = cv_key; | 902 | m_condvar_key = cv_key; |
| 939 | address_key = address; | 903 | m_address_key = address; |
| 940 | address_key_value = value; | 904 | m_address_key_value = value; |
| 941 | is_kernel_address_key = false; | 905 | m_is_kernel_address_key = false; |
| 942 | } | 906 | } |
| 943 | 907 | ||
| 944 | void ClearConditionVariable() { | 908 | void ClearConditionVariable() { |
| 945 | condvar_tree = nullptr; | 909 | m_condvar_tree = nullptr; |
| 946 | } | 910 | } |
| 947 | 911 | ||
| 948 | [[nodiscard]] bool IsWaitingForConditionVariable() const { | 912 | bool IsWaitingForConditionVariable() const { |
| 949 | return condvar_tree != nullptr; | 913 | return m_condvar_tree != nullptr; |
| 950 | } | 914 | } |
| 951 | 915 | ||
| 952 | void SetAddressArbiter(ConditionVariableThreadTree* tree, u64 address) { | 916 | void SetAddressArbiter(ConditionVariableThreadTree* tree, u64 address) { |
| 953 | ASSERT(waiting_lock_info == nullptr); | 917 | ASSERT(m_waiting_lock_info == nullptr); |
| 954 | condvar_tree = tree; | 918 | m_condvar_tree = tree; |
| 955 | condvar_key = address; | 919 | m_condvar_key = address; |
| 956 | } | 920 | } |
| 957 | 921 | ||
| 958 | void ClearAddressArbiter() { | 922 | void ClearAddressArbiter() { |
| 959 | condvar_tree = nullptr; | 923 | m_condvar_tree = nullptr; |
| 960 | } | 924 | } |
| 961 | 925 | ||
| 962 | [[nodiscard]] bool IsWaitingForAddressArbiter() const { | 926 | bool IsWaitingForAddressArbiter() const { |
| 963 | return condvar_tree != nullptr; | 927 | return m_condvar_tree != nullptr; |
| 964 | } | 928 | } |
| 965 | 929 | ||
| 966 | [[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const { | 930 | ConditionVariableThreadTree* GetConditionVariableTree() const { |
| 967 | return condvar_tree; | 931 | return m_condvar_tree; |
| 968 | } | 932 | } |
| 969 | }; | 933 | }; |
| 970 | 934 | ||
| 971 | class KScopedDisableDispatch { | 935 | class KScopedDisableDispatch { |
| 972 | public: | 936 | public: |
| 973 | [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel) : m_kernel{kernel} { | 937 | explicit KScopedDisableDispatch(KernelCore& kernel) : m_kernel{kernel} { |
| 974 | // If we are shutting down the kernel, none of this is relevant anymore. | 938 | // If we are shutting down the kernel, none of this is relevant anymore. |
| 975 | if (m_kernel.IsShuttingDown()) { | 939 | if (m_kernel.IsShuttingDown()) { |
| 976 | return; | 940 | return; |
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index c236e9976..f35fa95b5 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -214,7 +214,6 @@ struct KernelCore::Impl { | |||
| 214 | cores[i] = std::make_unique<Kernel::PhysicalCore>(i, system, *schedulers[i]); | 214 | cores[i] = std::make_unique<Kernel::PhysicalCore>(i, system, *schedulers[i]); |
| 215 | 215 | ||
| 216 | auto* main_thread{Kernel::KThread::Create(system.Kernel())}; | 216 | auto* main_thread{Kernel::KThread::Create(system.Kernel())}; |
| 217 | main_thread->SetName(fmt::format("MainThread:{}", core)); | ||
| 218 | main_thread->SetCurrentCore(core); | 217 | main_thread->SetCurrentCore(core); |
| 219 | ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, core).IsSuccess()); | 218 | ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, core).IsSuccess()); |
| 220 | 219 | ||
| @@ -356,7 +355,6 @@ struct KernelCore::Impl { | |||
| 356 | ASSERT(KThread::InitializeHighPriorityThread(system, shutdown_threads[core_id], {}, {}, | 355 | ASSERT(KThread::InitializeHighPriorityThread(system, shutdown_threads[core_id], {}, {}, |
| 357 | core_id) | 356 | core_id) |
| 358 | .IsSuccess()); | 357 | .IsSuccess()); |
| 359 | shutdown_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id)); | ||
| 360 | } | 358 | } |
| 361 | } | 359 | } |
| 362 | 360 | ||
| @@ -390,7 +388,6 @@ struct KernelCore::Impl { | |||
| 390 | KThread* GetHostDummyThread(KThread* existing_thread) { | 388 | KThread* GetHostDummyThread(KThread* existing_thread) { |
| 391 | auto initialize = [this](KThread* thread) { | 389 | auto initialize = [this](KThread* thread) { |
| 392 | ASSERT(KThread::InitializeDummyThread(thread, nullptr).IsSuccess()); | 390 | ASSERT(KThread::InitializeDummyThread(thread, nullptr).IsSuccess()); |
| 393 | thread->SetName(fmt::format("DummyThread:{}", next_host_thread_id++)); | ||
| 394 | return thread; | 391 | return thread; |
| 395 | }; | 392 | }; |
| 396 | 393 | ||
diff --git a/src/core/hle/kernel/svc/svc_thread.cpp b/src/core/hle/kernel/svc/svc_thread.cpp index a16fc7ae3..50991fb62 100644 --- a/src/core/hle/kernel/svc/svc_thread.cpp +++ b/src/core/hle/kernel/svc/svc_thread.cpp | |||
| @@ -59,9 +59,6 @@ Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, | |||
| 59 | priority, core_id, std::addressof(process))); | 59 | priority, core_id, std::addressof(process))); |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | // Set the thread name for debugging purposes. | ||
| 63 | thread->SetName(fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *out_handle)); | ||
| 64 | |||
| 65 | // Commit the thread reservation. | 62 | // Commit the thread reservation. |
| 66 | thread_reservation.Commit(); | 63 | thread_reservation.Commit(); |
| 67 | 64 | ||
| @@ -252,7 +249,7 @@ Result GetThreadList(Core::System& system, s32* out_num_threads, VAddr out_threa | |||
| 252 | 249 | ||
| 253 | auto list_iter = thread_list.cbegin(); | 250 | auto list_iter = thread_list.cbegin(); |
| 254 | for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) { | 251 | for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) { |
| 255 | memory.Write64(out_thread_ids, (*list_iter)->GetThreadID()); | 252 | memory.Write64(out_thread_ids, (*list_iter)->GetThreadId()); |
| 256 | out_thread_ids += sizeof(u64); | 253 | out_thread_ids += sizeof(u64); |
| 257 | } | 254 | } |
| 258 | 255 | ||
diff --git a/src/core/hle/service/hle_ipc.cpp b/src/core/hle/service/hle_ipc.cpp index c221ffe11..cca697c64 100644 --- a/src/core/hle/service/hle_ipc.cpp +++ b/src/core/hle/service/hle_ipc.cpp | |||
| @@ -303,7 +303,7 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(Kernel::KThread& requesti | |||
| 303 | } | 303 | } |
| 304 | 304 | ||
| 305 | // Copy the translated command buffer back into the thread's command buffer area. | 305 | // Copy the translated command buffer back into the thread's command buffer area. |
| 306 | memory.WriteBlock(owner_process, requesting_thread.GetTLSAddress(), cmd_buf.data(), | 306 | memory.WriteBlock(owner_process, requesting_thread.GetTlsAddress(), cmd_buf.data(), |
| 307 | write_size * sizeof(u32)); | 307 | write_size * sizeof(u32)); |
| 308 | 308 | ||
| 309 | return ResultSuccess; | 309 | return ResultSuccess; |