diff options
| author | 2020-04-01 17:28:49 -0400 | |
|---|---|---|
| committer | 2020-06-27 11:36:05 -0400 | |
| commit | 48fa3b7a0f2054a836b0a8061e6b082c246b5ae0 (patch) | |
| tree | 37a09cfb55f13ebf2df2b9a71622c599733100b0 /src/core/hle/kernel/kernel.cpp | |
| parent | Kernel/svcBreak: Implement CacheInvalidation for Singlecore and correct svcBr... (diff) | |
| download | yuzu-48fa3b7a0f2054a836b0a8061e6b082c246b5ae0.tar.gz yuzu-48fa3b7a0f2054a836b0a8061e6b082c246b5ae0.tar.xz yuzu-48fa3b7a0f2054a836b0a8061e6b082c246b5ae0.zip | |
General: Cleanup legacy code.
Diffstat (limited to 'src/core/hle/kernel/kernel.cpp')
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 81 |
1 files changed, 2 insertions, 79 deletions
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 1f230fc4a..dbb75416d 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -48,72 +48,6 @@ MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70)); | |||
| 48 | 48 | ||
| 49 | namespace Kernel { | 49 | namespace Kernel { |
| 50 | 50 | ||
| 51 | /** | ||
| 52 | * Callback that will wake up the thread it was scheduled for | ||
| 53 | * @param thread_handle The handle of the thread that's been awoken | ||
| 54 | * @param cycles_late The number of CPU cycles that have passed since the desired wakeup time | ||
| 55 | */ | ||
| 56 | static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_late) { | ||
| 57 | UNREACHABLE(); | ||
| 58 | const auto proper_handle = static_cast<Handle>(thread_handle); | ||
| 59 | const auto& system = Core::System::GetInstance(); | ||
| 60 | |||
| 61 | // Lock the global kernel mutex when we enter the kernel HLE. | ||
| 62 | std::lock_guard lock{HLE::g_hle_lock}; | ||
| 63 | |||
| 64 | std::shared_ptr<Thread> thread = | ||
| 65 | system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle); | ||
| 66 | if (thread == nullptr) { | ||
| 67 | LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", proper_handle); | ||
| 68 | return; | ||
| 69 | } | ||
| 70 | |||
| 71 | bool resume = true; | ||
| 72 | |||
| 73 | if (thread->GetStatus() == ThreadStatus::WaitSynch || | ||
| 74 | thread->GetStatus() == ThreadStatus::WaitHLEEvent) { | ||
| 75 | // Remove the thread from each of its waiting objects' waitlists | ||
| 76 | for (const auto& object : thread->GetSynchronizationObjects()) { | ||
| 77 | object->RemoveWaitingThread(thread); | ||
| 78 | } | ||
| 79 | thread->ClearSynchronizationObjects(); | ||
| 80 | |||
| 81 | // Invoke the wakeup callback before clearing the wait objects | ||
| 82 | if (thread->HasWakeupCallback()) { | ||
| 83 | resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Timeout, thread, nullptr, 0); | ||
| 84 | } | ||
| 85 | } else if (thread->GetStatus() == ThreadStatus::WaitMutex || | ||
| 86 | thread->GetStatus() == ThreadStatus::WaitCondVar) { | ||
| 87 | thread->SetMutexWaitAddress(0); | ||
| 88 | thread->SetWaitHandle(0); | ||
| 89 | if (thread->GetStatus() == ThreadStatus::WaitCondVar) { | ||
| 90 | thread->GetOwnerProcess()->RemoveConditionVariableThread(thread); | ||
| 91 | thread->SetCondVarWaitAddress(0); | ||
| 92 | } | ||
| 93 | |||
| 94 | auto* const lock_owner = thread->GetLockOwner(); | ||
| 95 | // Threads waking up by timeout from WaitProcessWideKey do not perform priority inheritance | ||
| 96 | // and don't have a lock owner unless SignalProcessWideKey was called first and the thread | ||
| 97 | // wasn't awakened due to the mutex already being acquired. | ||
| 98 | if (lock_owner != nullptr) { | ||
| 99 | lock_owner->RemoveMutexWaiter(thread); | ||
| 100 | } | ||
| 101 | } | ||
| 102 | |||
| 103 | if (thread->GetStatus() == ThreadStatus::WaitArb) { | ||
| 104 | auto& address_arbiter = thread->GetOwnerProcess()->GetAddressArbiter(); | ||
| 105 | address_arbiter.HandleWakeupThread(thread); | ||
| 106 | } | ||
| 107 | |||
| 108 | if (resume) { | ||
| 109 | if (thread->GetStatus() == ThreadStatus::WaitCondVar || | ||
| 110 | thread->GetStatus() == ThreadStatus::WaitArb) { | ||
| 111 | thread->SetWaitSynchronizationResult(RESULT_TIMEOUT); | ||
| 112 | } | ||
| 113 | thread->ResumeFromWait(); | ||
| 114 | } | ||
| 115 | } | ||
| 116 | |||
| 117 | struct KernelCore::Impl { | 51 | struct KernelCore::Impl { |
| 118 | explicit Impl(Core::System& system, KernelCore& kernel) | 52 | explicit Impl(Core::System& system, KernelCore& kernel) |
| 119 | : global_scheduler{kernel}, synchronization{system}, time_manager{system}, system{system} {} | 53 | : global_scheduler{kernel}, synchronization{system}, time_manager{system}, system{system} {} |
| @@ -129,7 +63,6 @@ struct KernelCore::Impl { | |||
| 129 | InitializePhysicalCores(); | 63 | InitializePhysicalCores(); |
| 130 | InitializeSystemResourceLimit(kernel); | 64 | InitializeSystemResourceLimit(kernel); |
| 131 | InitializeMemoryLayout(); | 65 | InitializeMemoryLayout(); |
| 132 | InitializeThreads(); | ||
| 133 | InitializePreemption(kernel); | 66 | InitializePreemption(kernel); |
| 134 | InitializeSchedulers(); | 67 | InitializeSchedulers(); |
| 135 | InitializeSuspendThreads(); | 68 | InitializeSuspendThreads(); |
| @@ -161,7 +94,6 @@ struct KernelCore::Impl { | |||
| 161 | system_resource_limit = nullptr; | 94 | system_resource_limit = nullptr; |
| 162 | 95 | ||
| 163 | global_handle_table.Clear(); | 96 | global_handle_table.Clear(); |
| 164 | thread_wakeup_event_type = nullptr; | ||
| 165 | preemption_event = nullptr; | 97 | preemption_event = nullptr; |
| 166 | 98 | ||
| 167 | global_scheduler.Shutdown(); | 99 | global_scheduler.Shutdown(); |
| @@ -210,11 +142,6 @@ struct KernelCore::Impl { | |||
| 210 | } | 142 | } |
| 211 | } | 143 | } |
| 212 | 144 | ||
| 213 | void InitializeThreads() { | ||
| 214 | thread_wakeup_event_type = | ||
| 215 | Core::Timing::CreateEvent("ThreadWakeupCallback", ThreadWakeupCallback); | ||
| 216 | } | ||
| 217 | |||
| 218 | void InitializePreemption(KernelCore& kernel) { | 145 | void InitializePreemption(KernelCore& kernel) { |
| 219 | preemption_event = Core::Timing::CreateEvent( | 146 | preemption_event = Core::Timing::CreateEvent( |
| 220 | "PreemptionCallback", [this, &kernel](u64 userdata, s64 cycles_late) { | 147 | "PreemptionCallback", [this, &kernel](u64 userdata, s64 cycles_late) { |
| @@ -376,7 +303,6 @@ struct KernelCore::Impl { | |||
| 376 | 303 | ||
| 377 | std::shared_ptr<ResourceLimit> system_resource_limit; | 304 | std::shared_ptr<ResourceLimit> system_resource_limit; |
| 378 | 305 | ||
| 379 | std::shared_ptr<Core::Timing::EventType> thread_wakeup_event_type; | ||
| 380 | std::shared_ptr<Core::Timing::EventType> preemption_event; | 306 | std::shared_ptr<Core::Timing::EventType> preemption_event; |
| 381 | 307 | ||
| 382 | // This is the kernel's handle table or supervisor handle table which | 308 | // This is the kernel's handle table or supervisor handle table which |
| @@ -516,7 +442,8 @@ std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore | |||
| 516 | return impl->interrupts; | 442 | return impl->interrupts; |
| 517 | } | 443 | } |
| 518 | 444 | ||
| 519 | const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() const { | 445 | const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() |
| 446 | const { | ||
| 520 | return impl->interrupts; | 447 | return impl->interrupts; |
| 521 | } | 448 | } |
| 522 | 449 | ||
| @@ -595,10 +522,6 @@ u64 KernelCore::CreateNewUserProcessID() { | |||
| 595 | return impl->next_user_process_id++; | 522 | return impl->next_user_process_id++; |
| 596 | } | 523 | } |
| 597 | 524 | ||
| 598 | const std::shared_ptr<Core::Timing::EventType>& KernelCore::ThreadWakeupCallbackEventType() const { | ||
| 599 | return impl->thread_wakeup_event_type; | ||
| 600 | } | ||
| 601 | |||
| 602 | Kernel::HandleTable& KernelCore::GlobalHandleTable() { | 525 | Kernel::HandleTable& KernelCore::GlobalHandleTable() { |
| 603 | return impl->global_handle_table; | 526 | return impl->global_handle_table; |
| 604 | } | 527 | } |