diff options
Diffstat (limited to 'src')
23 files changed, 224 insertions, 140 deletions
diff --git a/src/core/core.cpp b/src/core/core.cpp index d3e84c4ef..5d8a61b3a 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp | |||
| @@ -494,12 +494,6 @@ const ARM_Interface& System::CurrentArmInterface() const { | |||
| 494 | return impl->kernel.CurrentPhysicalCore().ArmInterface(); | 494 | return impl->kernel.CurrentPhysicalCore().ArmInterface(); |
| 495 | } | 495 | } |
| 496 | 496 | ||
| 497 | std::size_t System::CurrentCoreIndex() const { | ||
| 498 | std::size_t core = impl->kernel.GetCurrentHostThreadID(); | ||
| 499 | ASSERT(core < Core::Hardware::NUM_CPU_CORES); | ||
| 500 | return core; | ||
| 501 | } | ||
| 502 | |||
| 503 | Kernel::PhysicalCore& System::CurrentPhysicalCore() { | 497 | Kernel::PhysicalCore& System::CurrentPhysicalCore() { |
| 504 | return impl->kernel.CurrentPhysicalCore(); | 498 | return impl->kernel.CurrentPhysicalCore(); |
| 505 | } | 499 | } |
diff --git a/src/core/core.h b/src/core/core.h index ea143043c..cd9af0c07 100644 --- a/src/core/core.h +++ b/src/core/core.h | |||
| @@ -205,9 +205,6 @@ public: | |||
| 205 | /// Gets an ARM interface to the CPU core that is currently running | 205 | /// Gets an ARM interface to the CPU core that is currently running |
| 206 | [[nodiscard]] const ARM_Interface& CurrentArmInterface() const; | 206 | [[nodiscard]] const ARM_Interface& CurrentArmInterface() const; |
| 207 | 207 | ||
| 208 | /// Gets the index of the currently running CPU core | ||
| 209 | [[nodiscard]] std::size_t CurrentCoreIndex() const; | ||
| 210 | |||
| 211 | /// Gets the physical core for the CPU core that is currently running | 208 | /// Gets the physical core for the CPU core that is currently running |
| 212 | [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore(); | 209 | [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore(); |
| 213 | 210 | ||
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp index 7e195346b..de2e5563e 100644 --- a/src/core/cpu_manager.cpp +++ b/src/core/cpu_manager.cpp | |||
| @@ -21,34 +21,25 @@ namespace Core { | |||
| 21 | CpuManager::CpuManager(System& system_) : system{system_} {} | 21 | CpuManager::CpuManager(System& system_) : system{system_} {} |
| 22 | CpuManager::~CpuManager() = default; | 22 | CpuManager::~CpuManager() = default; |
| 23 | 23 | ||
| 24 | void CpuManager::ThreadStart(CpuManager& cpu_manager, std::size_t core) { | 24 | void CpuManager::ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, |
| 25 | cpu_manager.RunThread(core); | 25 | std::size_t core) { |
| 26 | cpu_manager.RunThread(stop_token, core); | ||
| 26 | } | 27 | } |
| 27 | 28 | ||
| 28 | void CpuManager::Initialize() { | 29 | void CpuManager::Initialize() { |
| 29 | running_mode = true; | 30 | running_mode = true; |
| 30 | if (is_multicore) { | 31 | if (is_multicore) { |
| 31 | for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | 32 | for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 32 | core_data[core].host_thread = | 33 | core_data[core].host_thread = std::jthread(ThreadStart, std::ref(*this), core); |
| 33 | std::make_unique<std::thread>(ThreadStart, std::ref(*this), core); | ||
| 34 | } | 34 | } |
| 35 | } else { | 35 | } else { |
| 36 | core_data[0].host_thread = std::make_unique<std::thread>(ThreadStart, std::ref(*this), 0); | 36 | core_data[0].host_thread = std::jthread(ThreadStart, std::ref(*this), 0); |
| 37 | } | 37 | } |
| 38 | } | 38 | } |
| 39 | 39 | ||
| 40 | void CpuManager::Shutdown() { | 40 | void CpuManager::Shutdown() { |
| 41 | running_mode = false; | 41 | running_mode = false; |
| 42 | Pause(false); | 42 | Pause(false); |
| 43 | if (is_multicore) { | ||
| 44 | for (auto& data : core_data) { | ||
| 45 | data.host_thread->join(); | ||
| 46 | data.host_thread.reset(); | ||
| 47 | } | ||
| 48 | } else { | ||
| 49 | core_data[0].host_thread->join(); | ||
| 50 | core_data[0].host_thread.reset(); | ||
| 51 | } | ||
| 52 | } | 43 | } |
| 53 | 44 | ||
| 54 | std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() { | 45 | std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() { |
| @@ -127,17 +118,18 @@ void CpuManager::MultiCoreRunGuestLoop() { | |||
| 127 | physical_core = &kernel.CurrentPhysicalCore(); | 118 | physical_core = &kernel.CurrentPhysicalCore(); |
| 128 | } | 119 | } |
| 129 | system.ExitDynarmicProfile(); | 120 | system.ExitDynarmicProfile(); |
| 130 | physical_core->ArmInterface().ClearExclusiveState(); | 121 | { |
| 131 | kernel.CurrentScheduler()->RescheduleCurrentCore(); | 122 | Kernel::KScopedDisableDispatch dd(kernel); |
| 123 | physical_core->ArmInterface().ClearExclusiveState(); | ||
| 124 | } | ||
| 132 | } | 125 | } |
| 133 | } | 126 | } |
| 134 | 127 | ||
| 135 | void CpuManager::MultiCoreRunIdleThread() { | 128 | void CpuManager::MultiCoreRunIdleThread() { |
| 136 | auto& kernel = system.Kernel(); | 129 | auto& kernel = system.Kernel(); |
| 137 | while (true) { | 130 | while (true) { |
| 138 | auto& physical_core = kernel.CurrentPhysicalCore(); | 131 | Kernel::KScopedDisableDispatch dd(kernel); |
| 139 | physical_core.Idle(); | 132 | kernel.CurrentPhysicalCore().Idle(); |
| 140 | kernel.CurrentScheduler()->RescheduleCurrentCore(); | ||
| 141 | } | 133 | } |
| 142 | } | 134 | } |
| 143 | 135 | ||
| @@ -145,12 +137,12 @@ void CpuManager::MultiCoreRunSuspendThread() { | |||
| 145 | auto& kernel = system.Kernel(); | 137 | auto& kernel = system.Kernel(); |
| 146 | kernel.CurrentScheduler()->OnThreadStart(); | 138 | kernel.CurrentScheduler()->OnThreadStart(); |
| 147 | while (true) { | 139 | while (true) { |
| 148 | auto core = kernel.GetCurrentHostThreadID(); | 140 | auto core = kernel.CurrentPhysicalCoreIndex(); |
| 149 | auto& scheduler = *kernel.CurrentScheduler(); | 141 | auto& scheduler = *kernel.CurrentScheduler(); |
| 150 | Kernel::KThread* current_thread = scheduler.GetCurrentThread(); | 142 | Kernel::KThread* current_thread = scheduler.GetCurrentThread(); |
| 151 | Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context); | 143 | Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context); |
| 152 | ASSERT(scheduler.ContextSwitchPending()); | 144 | ASSERT(scheduler.ContextSwitchPending()); |
| 153 | ASSERT(core == kernel.GetCurrentHostThreadID()); | 145 | ASSERT(core == kernel.CurrentPhysicalCoreIndex()); |
| 154 | scheduler.RescheduleCurrentCore(); | 146 | scheduler.RescheduleCurrentCore(); |
| 155 | } | 147 | } |
| 156 | } | 148 | } |
| @@ -317,7 +309,7 @@ void CpuManager::Pause(bool paused) { | |||
| 317 | } | 309 | } |
| 318 | } | 310 | } |
| 319 | 311 | ||
| 320 | void CpuManager::RunThread(std::size_t core) { | 312 | void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) { |
| 321 | /// Initialization | 313 | /// Initialization |
| 322 | system.RegisterCoreThread(core); | 314 | system.RegisterCoreThread(core); |
| 323 | std::string name; | 315 | std::string name; |
| @@ -356,8 +348,8 @@ void CpuManager::RunThread(std::size_t core) { | |||
| 356 | sc_sync_first_use = false; | 348 | sc_sync_first_use = false; |
| 357 | } | 349 | } |
| 358 | 350 | ||
| 359 | // Abort if emulation was killed before the session really starts | 351 | // Emulation was stopped |
| 360 | if (!system.IsPoweredOn()) { | 352 | if (stop_token.stop_requested()) { |
| 361 | return; | 353 | return; |
| 362 | } | 354 | } |
| 363 | 355 | ||
diff --git a/src/core/cpu_manager.h b/src/core/cpu_manager.h index 140263b09..9d92d4af0 100644 --- a/src/core/cpu_manager.h +++ b/src/core/cpu_manager.h | |||
| @@ -78,9 +78,9 @@ private: | |||
| 78 | void SingleCoreRunSuspendThread(); | 78 | void SingleCoreRunSuspendThread(); |
| 79 | void SingleCorePause(bool paused); | 79 | void SingleCorePause(bool paused); |
| 80 | 80 | ||
| 81 | static void ThreadStart(CpuManager& cpu_manager, std::size_t core); | 81 | static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core); |
| 82 | 82 | ||
| 83 | void RunThread(std::size_t core); | 83 | void RunThread(std::stop_token stop_token, std::size_t core); |
| 84 | 84 | ||
| 85 | struct CoreData { | 85 | struct CoreData { |
| 86 | std::shared_ptr<Common::Fiber> host_context; | 86 | std::shared_ptr<Common::Fiber> host_context; |
| @@ -89,7 +89,7 @@ private: | |||
| 89 | std::atomic<bool> is_running; | 89 | std::atomic<bool> is_running; |
| 90 | std::atomic<bool> is_paused; | 90 | std::atomic<bool> is_paused; |
| 91 | std::atomic<bool> initialized; | 91 | std::atomic<bool> initialized; |
| 92 | std::unique_ptr<std::thread> host_thread; | 92 | std::jthread host_thread; |
| 93 | }; | 93 | }; |
| 94 | 94 | ||
| 95 | std::atomic<bool> running_mode{}; | 95 | std::atomic<bool> running_mode{}; |
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp index 1b429bc1e..6771ef621 100644 --- a/src/core/hle/kernel/k_address_arbiter.cpp +++ b/src/core/hle/kernel/k_address_arbiter.cpp | |||
| @@ -28,7 +28,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) { | |||
| 28 | 28 | ||
| 29 | bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { | 29 | bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { |
| 30 | auto& monitor = system.Monitor(); | 30 | auto& monitor = system.Monitor(); |
| 31 | const auto current_core = system.CurrentCoreIndex(); | 31 | const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); |
| 32 | 32 | ||
| 33 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. | 33 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. |
| 34 | // TODO(bunnei): We should call CanAccessAtomic(..) here. | 34 | // TODO(bunnei): We should call CanAccessAtomic(..) here. |
| @@ -58,7 +58,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu | |||
| 58 | 58 | ||
| 59 | bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { | 59 | bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { |
| 60 | auto& monitor = system.Monitor(); | 60 | auto& monitor = system.Monitor(); |
| 61 | const auto current_core = system.CurrentCoreIndex(); | 61 | const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); |
| 62 | 62 | ||
| 63 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. | 63 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. |
| 64 | // TODO(bunnei): We should call CanAccessAtomic(..) here. | 64 | // TODO(bunnei): We should call CanAccessAtomic(..) here. |
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h index e4fcdbc67..165b76747 100644 --- a/src/core/hle/kernel/k_auto_object.h +++ b/src/core/hle/kernel/k_auto_object.h | |||
| @@ -170,6 +170,10 @@ public: | |||
| 170 | } | 170 | } |
| 171 | } | 171 | } |
| 172 | 172 | ||
| 173 | const std::string& GetName() const { | ||
| 174 | return name; | ||
| 175 | } | ||
| 176 | |||
| 173 | private: | 177 | private: |
| 174 | void RegisterWithKernel(); | 178 | void RegisterWithKernel(); |
| 175 | void UnregisterWithKernel(); | 179 | void UnregisterWithKernel(); |
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index ef14ad1d2..4174f35fd 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp | |||
| @@ -35,7 +35,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) { | |||
| 35 | bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, | 35 | bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, |
| 36 | u32 new_orr_mask) { | 36 | u32 new_orr_mask) { |
| 37 | auto& monitor = system.Monitor(); | 37 | auto& monitor = system.Monitor(); |
| 38 | const auto current_core = system.CurrentCoreIndex(); | 38 | const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); |
| 39 | 39 | ||
| 40 | // Load the value from the address. | 40 | // Load the value from the address. |
| 41 | const auto expected = monitor.ExclusiveRead32(current_core, address); | 41 | const auto expected = monitor.ExclusiveRead32(current_core, address); |
diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp index 6a420d5b0..d720c2dda 100644 --- a/src/core/hle/kernel/k_handle_table.cpp +++ b/src/core/hle/kernel/k_handle_table.cpp | |||
| @@ -13,6 +13,7 @@ ResultCode KHandleTable::Finalize() { | |||
| 13 | // Get the table and clear our record of it. | 13 | // Get the table and clear our record of it. |
| 14 | u16 saved_table_size = 0; | 14 | u16 saved_table_size = 0; |
| 15 | { | 15 | { |
| 16 | KScopedDisableDispatch dd(kernel); | ||
| 16 | KScopedSpinLock lk(m_lock); | 17 | KScopedSpinLock lk(m_lock); |
| 17 | 18 | ||
| 18 | std::swap(m_table_size, saved_table_size); | 19 | std::swap(m_table_size, saved_table_size); |
| @@ -43,6 +44,7 @@ bool KHandleTable::Remove(Handle handle) { | |||
| 43 | // Find the object and free the entry. | 44 | // Find the object and free the entry. |
| 44 | KAutoObject* obj = nullptr; | 45 | KAutoObject* obj = nullptr; |
| 45 | { | 46 | { |
| 47 | KScopedDisableDispatch dd(kernel); | ||
| 46 | KScopedSpinLock lk(m_lock); | 48 | KScopedSpinLock lk(m_lock); |
| 47 | 49 | ||
| 48 | if (this->IsValidHandle(handle)) { | 50 | if (this->IsValidHandle(handle)) { |
| @@ -61,6 +63,7 @@ bool KHandleTable::Remove(Handle handle) { | |||
| 61 | } | 63 | } |
| 62 | 64 | ||
| 63 | ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { | 65 | ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { |
| 66 | KScopedDisableDispatch dd(kernel); | ||
| 64 | KScopedSpinLock lk(m_lock); | 67 | KScopedSpinLock lk(m_lock); |
| 65 | 68 | ||
| 66 | // Never exceed our capacity. | 69 | // Never exceed our capacity. |
| @@ -83,6 +86,7 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { | |||
| 83 | } | 86 | } |
| 84 | 87 | ||
| 85 | ResultCode KHandleTable::Reserve(Handle* out_handle) { | 88 | ResultCode KHandleTable::Reserve(Handle* out_handle) { |
| 89 | KScopedDisableDispatch dd(kernel); | ||
| 86 | KScopedSpinLock lk(m_lock); | 90 | KScopedSpinLock lk(m_lock); |
| 87 | 91 | ||
| 88 | // Never exceed our capacity. | 92 | // Never exceed our capacity. |
| @@ -93,6 +97,7 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) { | |||
| 93 | } | 97 | } |
| 94 | 98 | ||
| 95 | void KHandleTable::Unreserve(Handle handle) { | 99 | void KHandleTable::Unreserve(Handle handle) { |
| 100 | KScopedDisableDispatch dd(kernel); | ||
| 96 | KScopedSpinLock lk(m_lock); | 101 | KScopedSpinLock lk(m_lock); |
| 97 | 102 | ||
| 98 | // Unpack the handle. | 103 | // Unpack the handle. |
| @@ -111,6 +116,7 @@ void KHandleTable::Unreserve(Handle handle) { | |||
| 111 | } | 116 | } |
| 112 | 117 | ||
| 113 | void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { | 118 | void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { |
| 119 | KScopedDisableDispatch dd(kernel); | ||
| 114 | KScopedSpinLock lk(m_lock); | 120 | KScopedSpinLock lk(m_lock); |
| 115 | 121 | ||
| 116 | // Unpack the handle. | 122 | // Unpack the handle. |
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h index 2ff6aa160..75dcec7df 100644 --- a/src/core/hle/kernel/k_handle_table.h +++ b/src/core/hle/kernel/k_handle_table.h | |||
| @@ -69,6 +69,7 @@ public: | |||
| 69 | template <typename T = KAutoObject> | 69 | template <typename T = KAutoObject> |
| 70 | KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { | 70 | KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { |
| 71 | // Lock and look up in table. | 71 | // Lock and look up in table. |
| 72 | KScopedDisableDispatch dd(kernel); | ||
| 72 | KScopedSpinLock lk(m_lock); | 73 | KScopedSpinLock lk(m_lock); |
| 73 | 74 | ||
| 74 | if constexpr (std::is_same_v<T, KAutoObject>) { | 75 | if constexpr (std::is_same_v<T, KAutoObject>) { |
| @@ -123,6 +124,7 @@ public: | |||
| 123 | size_t num_opened; | 124 | size_t num_opened; |
| 124 | { | 125 | { |
| 125 | // Lock the table. | 126 | // Lock the table. |
| 127 | KScopedDisableDispatch dd(kernel); | ||
| 126 | KScopedSpinLock lk(m_lock); | 128 | KScopedSpinLock lk(m_lock); |
| 127 | for (num_opened = 0; num_opened < num_handles; num_opened++) { | 129 | for (num_opened = 0; num_opened < num_handles; num_opened++) { |
| 128 | // Get the current handle. | 130 | // Get the current handle. |
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 8ead1a769..3d7e6707e 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp | |||
| @@ -59,6 +59,7 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority | |||
| 59 | thread->GetContext64().cpu_registers[0] = 0; | 59 | thread->GetContext64().cpu_registers[0] = 0; |
| 60 | thread->GetContext32().cpu_registers[1] = thread_handle; | 60 | thread->GetContext32().cpu_registers[1] = thread_handle; |
| 61 | thread->GetContext64().cpu_registers[1] = thread_handle; | 61 | thread->GetContext64().cpu_registers[1] = thread_handle; |
| 62 | thread->DisableDispatch(); | ||
| 62 | 63 | ||
| 63 | auto& kernel = system.Kernel(); | 64 | auto& kernel = system.Kernel(); |
| 64 | // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires | 65 | // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires |
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 6a7d80d03..6ddbae52c 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -376,20 +376,18 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { | |||
| 376 | } | 376 | } |
| 377 | 377 | ||
| 378 | void KScheduler::DisableScheduling(KernelCore& kernel) { | 378 | void KScheduler::DisableScheduling(KernelCore& kernel) { |
| 379 | if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { | 379 | ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0); |
| 380 | ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0); | 380 | GetCurrentThreadPointer(kernel)->DisableDispatch(); |
| 381 | scheduler->GetCurrentThread()->DisableDispatch(); | ||
| 382 | } | ||
| 383 | } | 381 | } |
| 384 | 382 | ||
| 385 | void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { | 383 | void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { |
| 386 | if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { | 384 | ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 1); |
| 387 | ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1); | 385 | |
| 388 | if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) { | 386 | if (GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() > 1) { |
| 389 | scheduler->GetCurrentThread()->EnableDispatch(); | 387 | GetCurrentThreadPointer(kernel)->EnableDispatch(); |
| 390 | } | 388 | } else { |
| 389 | RescheduleCores(kernel, cores_needing_scheduling); | ||
| 391 | } | 390 | } |
| 392 | RescheduleCores(kernel, cores_needing_scheduling); | ||
| 393 | } | 391 | } |
| 394 | 392 | ||
| 395 | u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { | 393 | u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { |
| @@ -617,13 +615,17 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c | |||
| 617 | state.highest_priority_thread = nullptr; | 615 | state.highest_priority_thread = nullptr; |
| 618 | } | 616 | } |
| 619 | 617 | ||
| 620 | KScheduler::~KScheduler() { | 618 | void KScheduler::Finalize() { |
| 621 | if (idle_thread) { | 619 | if (idle_thread) { |
| 622 | idle_thread->Close(); | 620 | idle_thread->Close(); |
| 623 | idle_thread = nullptr; | 621 | idle_thread = nullptr; |
| 624 | } | 622 | } |
| 625 | } | 623 | } |
| 626 | 624 | ||
| 625 | KScheduler::~KScheduler() { | ||
| 626 | ASSERT(!idle_thread); | ||
| 627 | } | ||
| 628 | |||
| 627 | KThread* KScheduler::GetCurrentThread() const { | 629 | KThread* KScheduler::GetCurrentThread() const { |
| 628 | if (auto result = current_thread.load(); result) { | 630 | if (auto result = current_thread.load(); result) { |
| 629 | return result; | 631 | return result; |
| @@ -642,10 +644,12 @@ void KScheduler::RescheduleCurrentCore() { | |||
| 642 | if (phys_core.IsInterrupted()) { | 644 | if (phys_core.IsInterrupted()) { |
| 643 | phys_core.ClearInterrupt(); | 645 | phys_core.ClearInterrupt(); |
| 644 | } | 646 | } |
| 647 | |||
| 645 | guard.Lock(); | 648 | guard.Lock(); |
| 646 | if (state.needs_scheduling.load()) { | 649 | if (state.needs_scheduling.load()) { |
| 647 | Schedule(); | 650 | Schedule(); |
| 648 | } else { | 651 | } else { |
| 652 | GetCurrentThread()->EnableDispatch(); | ||
| 649 | guard.Unlock(); | 653 | guard.Unlock(); |
| 650 | } | 654 | } |
| 651 | } | 655 | } |
| @@ -655,26 +659,33 @@ void KScheduler::OnThreadStart() { | |||
| 655 | } | 659 | } |
| 656 | 660 | ||
| 657 | void KScheduler::Unload(KThread* thread) { | 661 | void KScheduler::Unload(KThread* thread) { |
| 662 | ASSERT(thread); | ||
| 663 | |||
| 658 | LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); | 664 | LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); |
| 659 | 665 | ||
| 660 | if (thread) { | 666 | if (thread->IsCallingSvc()) { |
| 661 | if (thread->IsCallingSvc()) { | 667 | thread->ClearIsCallingSvc(); |
| 662 | thread->ClearIsCallingSvc(); | ||
| 663 | } | ||
| 664 | if (!thread->IsTerminationRequested()) { | ||
| 665 | prev_thread = thread; | ||
| 666 | |||
| 667 | Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); | ||
| 668 | cpu_core.SaveContext(thread->GetContext32()); | ||
| 669 | cpu_core.SaveContext(thread->GetContext64()); | ||
| 670 | // Save the TPIDR_EL0 system register in case it was modified. | ||
| 671 | thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | ||
| 672 | cpu_core.ClearExclusiveState(); | ||
| 673 | } else { | ||
| 674 | prev_thread = nullptr; | ||
| 675 | } | ||
| 676 | thread->context_guard.Unlock(); | ||
| 677 | } | 668 | } |
| 669 | |||
| 670 | auto& physical_core = system.Kernel().PhysicalCore(core_id); | ||
| 671 | if (!physical_core.IsInitialized()) { | ||
| 672 | return; | ||
| 673 | } | ||
| 674 | |||
| 675 | Core::ARM_Interface& cpu_core = physical_core.ArmInterface(); | ||
| 676 | cpu_core.SaveContext(thread->GetContext32()); | ||
| 677 | cpu_core.SaveContext(thread->GetContext64()); | ||
| 678 | // Save the TPIDR_EL0 system register in case it was modified. | ||
| 679 | thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | ||
| 680 | cpu_core.ClearExclusiveState(); | ||
| 681 | |||
| 682 | if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) { | ||
| 683 | prev_thread = thread; | ||
| 684 | } else { | ||
| 685 | prev_thread = nullptr; | ||
| 686 | } | ||
| 687 | |||
| 688 | thread->context_guard.Unlock(); | ||
| 678 | } | 689 | } |
| 679 | 690 | ||
| 680 | void KScheduler::Reload(KThread* thread) { | 691 | void KScheduler::Reload(KThread* thread) { |
| @@ -683,11 +694,6 @@ void KScheduler::Reload(KThread* thread) { | |||
| 683 | if (thread) { | 694 | if (thread) { |
| 684 | ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); | 695 | ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); |
| 685 | 696 | ||
| 686 | auto* const thread_owner_process = thread->GetOwnerProcess(); | ||
| 687 | if (thread_owner_process != nullptr) { | ||
| 688 | system.Kernel().MakeCurrentProcess(thread_owner_process); | ||
| 689 | } | ||
| 690 | |||
| 691 | Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); | 697 | Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); |
| 692 | cpu_core.LoadContext(thread->GetContext32()); | 698 | cpu_core.LoadContext(thread->GetContext32()); |
| 693 | cpu_core.LoadContext(thread->GetContext64()); | 699 | cpu_core.LoadContext(thread->GetContext64()); |
| @@ -705,7 +711,7 @@ void KScheduler::SwitchContextStep2() { | |||
| 705 | } | 711 | } |
| 706 | 712 | ||
| 707 | void KScheduler::ScheduleImpl() { | 713 | void KScheduler::ScheduleImpl() { |
| 708 | KThread* previous_thread = current_thread.load(); | 714 | KThread* previous_thread = GetCurrentThread(); |
| 709 | KThread* next_thread = state.highest_priority_thread; | 715 | KThread* next_thread = state.highest_priority_thread; |
| 710 | 716 | ||
| 711 | state.needs_scheduling = false; | 717 | state.needs_scheduling = false; |
| @@ -717,10 +723,15 @@ void KScheduler::ScheduleImpl() { | |||
| 717 | 723 | ||
| 718 | // If we're not actually switching thread, there's nothing to do. | 724 | // If we're not actually switching thread, there's nothing to do. |
| 719 | if (next_thread == current_thread.load()) { | 725 | if (next_thread == current_thread.load()) { |
| 726 | previous_thread->EnableDispatch(); | ||
| 720 | guard.Unlock(); | 727 | guard.Unlock(); |
| 721 | return; | 728 | return; |
| 722 | } | 729 | } |
| 723 | 730 | ||
| 731 | if (next_thread->GetCurrentCore() != core_id) { | ||
| 732 | next_thread->SetCurrentCore(core_id); | ||
| 733 | } | ||
| 734 | |||
| 724 | current_thread.store(next_thread); | 735 | current_thread.store(next_thread); |
| 725 | 736 | ||
| 726 | KProcess* const previous_process = system.Kernel().CurrentProcess(); | 737 | KProcess* const previous_process = system.Kernel().CurrentProcess(); |
| @@ -731,11 +742,7 @@ void KScheduler::ScheduleImpl() { | |||
| 731 | Unload(previous_thread); | 742 | Unload(previous_thread); |
| 732 | 743 | ||
| 733 | std::shared_ptr<Common::Fiber>* old_context; | 744 | std::shared_ptr<Common::Fiber>* old_context; |
| 734 | if (previous_thread != nullptr) { | 745 | old_context = &previous_thread->GetHostContext(); |
| 735 | old_context = &previous_thread->GetHostContext(); | ||
| 736 | } else { | ||
| 737 | old_context = &idle_thread->GetHostContext(); | ||
| 738 | } | ||
| 739 | guard.Unlock(); | 746 | guard.Unlock(); |
| 740 | 747 | ||
| 741 | Common::Fiber::YieldTo(*old_context, *switch_fiber); | 748 | Common::Fiber::YieldTo(*old_context, *switch_fiber); |
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 12cfae919..516e0cdba 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h | |||
| @@ -33,6 +33,8 @@ public: | |||
| 33 | explicit KScheduler(Core::System& system_, s32 core_id_); | 33 | explicit KScheduler(Core::System& system_, s32 core_id_); |
| 34 | ~KScheduler(); | 34 | ~KScheduler(); |
| 35 | 35 | ||
| 36 | void Finalize(); | ||
| 37 | |||
| 36 | /// Reschedules to the next available thread (call after current thread is suspended) | 38 | /// Reschedules to the next available thread (call after current thread is suspended) |
| 37 | void RescheduleCurrentCore(); | 39 | void RescheduleCurrentCore(); |
| 38 | 40 | ||
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 9f1d3156b..0f6808ade 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include "common/fiber.h" | 14 | #include "common/fiber.h" |
| 15 | #include "common/logging/log.h" | 15 | #include "common/logging/log.h" |
| 16 | #include "common/scope_exit.h" | 16 | #include "common/scope_exit.h" |
| 17 | #include "common/settings.h" | ||
| 17 | #include "common/thread_queue_list.h" | 18 | #include "common/thread_queue_list.h" |
| 18 | #include "core/core.h" | 19 | #include "core/core.h" |
| 19 | #include "core/cpu_manager.h" | 20 | #include "core/cpu_manager.h" |
| @@ -188,7 +189,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s | |||
| 188 | // Setup the stack parameters. | 189 | // Setup the stack parameters. |
| 189 | StackParameters& sp = GetStackParameters(); | 190 | StackParameters& sp = GetStackParameters(); |
| 190 | sp.cur_thread = this; | 191 | sp.cur_thread = this; |
| 191 | sp.disable_count = 1; | 192 | sp.disable_count = 0; |
| 192 | SetInExceptionHandler(); | 193 | SetInExceptionHandler(); |
| 193 | 194 | ||
| 194 | // Set thread ID. | 195 | // Set thread ID. |
| @@ -215,9 +216,10 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint | |||
| 215 | // Initialize the thread. | 216 | // Initialize the thread. |
| 216 | R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); | 217 | R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); |
| 217 | 218 | ||
| 218 | // Initialize host context. | 219 | // Initialize emulation parameters. |
| 219 | thread->host_context = | 220 | thread->host_context = |
| 220 | std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter); | 221 | std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter); |
| 222 | thread->is_single_core = !Settings::values.use_multi_core.GetValue(); | ||
| 221 | 223 | ||
| 222 | return ResultSuccess; | 224 | return ResultSuccess; |
| 223 | } | 225 | } |
| @@ -970,6 +972,9 @@ ResultCode KThread::Run() { | |||
| 970 | 972 | ||
| 971 | // Set our state and finish. | 973 | // Set our state and finish. |
| 972 | SetState(ThreadState::Runnable); | 974 | SetState(ThreadState::Runnable); |
| 975 | |||
| 976 | DisableDispatch(); | ||
| 977 | |||
| 973 | return ResultSuccess; | 978 | return ResultSuccess; |
| 974 | } | 979 | } |
| 975 | } | 980 | } |
| @@ -1054,4 +1059,16 @@ s32 GetCurrentCoreId(KernelCore& kernel) { | |||
| 1054 | return GetCurrentThread(kernel).GetCurrentCore(); | 1059 | return GetCurrentThread(kernel).GetCurrentCore(); |
| 1055 | } | 1060 | } |
| 1056 | 1061 | ||
| 1062 | KScopedDisableDispatch::~KScopedDisableDispatch() { | ||
| 1063 | if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) { | ||
| 1064 | auto scheduler = kernel.CurrentScheduler(); | ||
| 1065 | |||
| 1066 | if (scheduler) { | ||
| 1067 | scheduler->RescheduleCurrentCore(); | ||
| 1068 | } | ||
| 1069 | } else { | ||
| 1070 | GetCurrentThread(kernel).EnableDispatch(); | ||
| 1071 | } | ||
| 1072 | } | ||
| 1073 | |||
| 1057 | } // namespace Kernel | 1074 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index c77f44ad4..e4c4c877d 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h | |||
| @@ -450,16 +450,39 @@ public: | |||
| 450 | sleeping_queue = q; | 450 | sleeping_queue = q; |
| 451 | } | 451 | } |
| 452 | 452 | ||
| 453 | [[nodiscard]] bool IsKernelThread() const { | ||
| 454 | return GetActiveCore() == 3; | ||
| 455 | } | ||
| 456 | |||
| 457 | [[nodiscard]] bool IsDispatchTrackingDisabled() const { | ||
| 458 | return is_single_core || IsKernelThread(); | ||
| 459 | } | ||
| 460 | |||
| 453 | [[nodiscard]] s32 GetDisableDispatchCount() const { | 461 | [[nodiscard]] s32 GetDisableDispatchCount() const { |
| 462 | if (IsDispatchTrackingDisabled()) { | ||
| 463 | // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||
| 464 | return 1; | ||
| 465 | } | ||
| 466 | |||
| 454 | return this->GetStackParameters().disable_count; | 467 | return this->GetStackParameters().disable_count; |
| 455 | } | 468 | } |
| 456 | 469 | ||
| 457 | void DisableDispatch() { | 470 | void DisableDispatch() { |
| 471 | if (IsDispatchTrackingDisabled()) { | ||
| 472 | // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||
| 473 | return; | ||
| 474 | } | ||
| 475 | |||
| 458 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); | 476 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); |
| 459 | this->GetStackParameters().disable_count++; | 477 | this->GetStackParameters().disable_count++; |
| 460 | } | 478 | } |
| 461 | 479 | ||
| 462 | void EnableDispatch() { | 480 | void EnableDispatch() { |
| 481 | if (IsDispatchTrackingDisabled()) { | ||
| 482 | // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||
| 483 | return; | ||
| 484 | } | ||
| 485 | |||
| 463 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); | 486 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); |
| 464 | this->GetStackParameters().disable_count--; | 487 | this->GetStackParameters().disable_count--; |
| 465 | } | 488 | } |
| @@ -708,6 +731,7 @@ private: | |||
| 708 | 731 | ||
| 709 | // For emulation | 732 | // For emulation |
| 710 | std::shared_ptr<Common::Fiber> host_context{}; | 733 | std::shared_ptr<Common::Fiber> host_context{}; |
| 734 | bool is_single_core{}; | ||
| 711 | 735 | ||
| 712 | // For debugging | 736 | // For debugging |
| 713 | std::vector<KSynchronizationObject*> wait_objects_for_debugging; | 737 | std::vector<KSynchronizationObject*> wait_objects_for_debugging; |
| @@ -752,4 +776,16 @@ public: | |||
| 752 | } | 776 | } |
| 753 | }; | 777 | }; |
| 754 | 778 | ||
| 779 | class KScopedDisableDispatch { | ||
| 780 | public: | ||
| 781 | [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} { | ||
| 782 | GetCurrentThread(kernel).DisableDispatch(); | ||
| 783 | } | ||
| 784 | |||
| 785 | ~KScopedDisableDispatch(); | ||
| 786 | |||
| 787 | private: | ||
| 788 | KernelCore& kernel; | ||
| 789 | }; | ||
| 790 | |||
| 755 | } // namespace Kernel | 791 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 92fbc5532..8673384ee 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -85,8 +85,9 @@ struct KernelCore::Impl { | |||
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | void InitializeCores() { | 87 | void InitializeCores() { |
| 88 | for (auto& core : cores) { | 88 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { |
| 89 | core.Initialize(current_process->Is64BitProcess()); | 89 | cores[core_id].Initialize(current_process->Is64BitProcess()); |
| 90 | system.Memory().SetCurrentPageTable(*current_process, core_id); | ||
| 90 | } | 91 | } |
| 91 | } | 92 | } |
| 92 | 93 | ||
| @@ -131,15 +132,6 @@ struct KernelCore::Impl { | |||
| 131 | next_user_process_id = KProcess::ProcessIDMin; | 132 | next_user_process_id = KProcess::ProcessIDMin; |
| 132 | next_thread_id = 1; | 133 | next_thread_id = 1; |
| 133 | 134 | ||
| 134 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||
| 135 | if (suspend_threads[core_id]) { | ||
| 136 | suspend_threads[core_id]->Close(); | ||
| 137 | suspend_threads[core_id] = nullptr; | ||
| 138 | } | ||
| 139 | |||
| 140 | schedulers[core_id].reset(); | ||
| 141 | } | ||
| 142 | |||
| 143 | cores.clear(); | 135 | cores.clear(); |
| 144 | 136 | ||
| 145 | global_handle_table->Finalize(); | 137 | global_handle_table->Finalize(); |
| @@ -167,6 +159,16 @@ struct KernelCore::Impl { | |||
| 167 | CleanupObject(time_shared_mem); | 159 | CleanupObject(time_shared_mem); |
| 168 | CleanupObject(system_resource_limit); | 160 | CleanupObject(system_resource_limit); |
| 169 | 161 | ||
| 162 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||
| 163 | if (suspend_threads[core_id]) { | ||
| 164 | suspend_threads[core_id]->Close(); | ||
| 165 | suspend_threads[core_id] = nullptr; | ||
| 166 | } | ||
| 167 | |||
| 168 | schedulers[core_id]->Finalize(); | ||
| 169 | schedulers[core_id].reset(); | ||
| 170 | } | ||
| 171 | |||
| 170 | // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others | 172 | // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others |
| 171 | next_host_thread_id = Core::Hardware::NUM_CPU_CORES; | 173 | next_host_thread_id = Core::Hardware::NUM_CPU_CORES; |
| 172 | 174 | ||
| @@ -257,14 +259,6 @@ struct KernelCore::Impl { | |||
| 257 | 259 | ||
| 258 | void MakeCurrentProcess(KProcess* process) { | 260 | void MakeCurrentProcess(KProcess* process) { |
| 259 | current_process = process; | 261 | current_process = process; |
| 260 | if (process == nullptr) { | ||
| 261 | return; | ||
| 262 | } | ||
| 263 | |||
| 264 | const u32 core_id = GetCurrentHostThreadID(); | ||
| 265 | if (core_id < Core::Hardware::NUM_CPU_CORES) { | ||
| 266 | system.Memory().SetCurrentPageTable(*process, core_id); | ||
| 267 | } | ||
| 268 | } | 262 | } |
| 269 | 263 | ||
| 270 | /// Creates a new host thread ID, should only be called by GetHostThreadId | 264 | /// Creates a new host thread ID, should only be called by GetHostThreadId |
| @@ -824,16 +818,20 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const { | |||
| 824 | return impl->cores[id]; | 818 | return impl->cores[id]; |
| 825 | } | 819 | } |
| 826 | 820 | ||
| 821 | size_t KernelCore::CurrentPhysicalCoreIndex() const { | ||
| 822 | const u32 core_id = impl->GetCurrentHostThreadID(); | ||
| 823 | if (core_id >= Core::Hardware::NUM_CPU_CORES) { | ||
| 824 | return Core::Hardware::NUM_CPU_CORES - 1; | ||
| 825 | } | ||
| 826 | return core_id; | ||
| 827 | } | ||
| 828 | |||
| 827 | Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { | 829 | Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { |
| 828 | u32 core_id = impl->GetCurrentHostThreadID(); | 830 | return impl->cores[CurrentPhysicalCoreIndex()]; |
| 829 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 830 | return impl->cores[core_id]; | ||
| 831 | } | 831 | } |
| 832 | 832 | ||
| 833 | const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { | 833 | const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { |
| 834 | u32 core_id = impl->GetCurrentHostThreadID(); | 834 | return impl->cores[CurrentPhysicalCoreIndex()]; |
| 835 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 836 | return impl->cores[core_id]; | ||
| 837 | } | 835 | } |
| 838 | 836 | ||
| 839 | Kernel::KScheduler* KernelCore::CurrentScheduler() { | 837 | Kernel::KScheduler* KernelCore::CurrentScheduler() { |
| @@ -1026,6 +1024,9 @@ void KernelCore::Suspend(bool in_suspention) { | |||
| 1026 | impl->suspend_threads[core_id]->SetState(state); | 1024 | impl->suspend_threads[core_id]->SetState(state); |
| 1027 | impl->suspend_threads[core_id]->SetWaitReasonForDebugging( | 1025 | impl->suspend_threads[core_id]->SetWaitReasonForDebugging( |
| 1028 | ThreadWaitReasonForDebugging::Suspended); | 1026 | ThreadWaitReasonForDebugging::Suspended); |
| 1027 | if (!should_suspend) { | ||
| 1028 | impl->suspend_threads[core_id]->DisableDispatch(); | ||
| 1029 | } | ||
| 1029 | } | 1030 | } |
| 1030 | } | 1031 | } |
| 1031 | } | 1032 | } |
| @@ -1040,13 +1041,11 @@ void KernelCore::ExceptionalExit() { | |||
| 1040 | } | 1041 | } |
| 1041 | 1042 | ||
| 1042 | void KernelCore::EnterSVCProfile() { | 1043 | void KernelCore::EnterSVCProfile() { |
| 1043 | std::size_t core = impl->GetCurrentHostThreadID(); | 1044 | impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); |
| 1044 | impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); | ||
| 1045 | } | 1045 | } |
| 1046 | 1046 | ||
| 1047 | void KernelCore::ExitSVCProfile() { | 1047 | void KernelCore::ExitSVCProfile() { |
| 1048 | std::size_t core = impl->GetCurrentHostThreadID(); | 1048 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]); |
| 1049 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]); | ||
| 1050 | } | 1049 | } |
| 1051 | 1050 | ||
| 1052 | std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { | 1051 | std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { |
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 3a6db0b1c..57535433b 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h | |||
| @@ -146,6 +146,9 @@ public: | |||
| 146 | /// Gets the an instance of the respective physical CPU core. | 146 | /// Gets the an instance of the respective physical CPU core. |
| 147 | const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; | 147 | const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; |
| 148 | 148 | ||
| 149 | /// Gets the current physical core index for the running host thread. | ||
| 150 | std::size_t CurrentPhysicalCoreIndex() const; | ||
| 151 | |||
| 149 | /// Gets the sole instance of the Scheduler at the current running core. | 152 | /// Gets the sole instance of the Scheduler at the current running core. |
| 150 | Kernel::KScheduler* CurrentScheduler(); | 153 | Kernel::KScheduler* CurrentScheduler(); |
| 151 | 154 | ||
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 2eb532472..a90b291da 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -877,7 +877,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle | |||
| 877 | const u64 thread_ticks = current_thread->GetCpuTime(); | 877 | const u64 thread_ticks = current_thread->GetCpuTime(); |
| 878 | 878 | ||
| 879 | out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); | 879 | out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); |
| 880 | } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { | 880 | } else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) { |
| 881 | out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; | 881 | out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; |
| 882 | } | 882 | } |
| 883 | 883 | ||
diff --git a/src/core/hle/service/nvflinger/buffer_queue.cpp b/src/core/hle/service/nvflinger/buffer_queue.cpp index 59ddf6298..b4c3a6099 100644 --- a/src/core/hle/service/nvflinger/buffer_queue.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue.cpp | |||
| @@ -9,17 +9,20 @@ | |||
| 9 | #include "core/core.h" | 9 | #include "core/core.h" |
| 10 | #include "core/hle/kernel/k_writable_event.h" | 10 | #include "core/hle/kernel/k_writable_event.h" |
| 11 | #include "core/hle/kernel/kernel.h" | 11 | #include "core/hle/kernel/kernel.h" |
| 12 | #include "core/hle/service/kernel_helpers.h" | ||
| 12 | #include "core/hle/service/nvflinger/buffer_queue.h" | 13 | #include "core/hle/service/nvflinger/buffer_queue.h" |
| 13 | 14 | ||
| 14 | namespace Service::NVFlinger { | 15 | namespace Service::NVFlinger { |
| 15 | 16 | ||
| 16 | BufferQueue::BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_) | 17 | BufferQueue::BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_, |
| 17 | : id(id_), layer_id(layer_id_), buffer_wait_event{kernel} { | 18 | KernelHelpers::ServiceContext& service_context_) |
| 18 | Kernel::KAutoObject::Create(std::addressof(buffer_wait_event)); | 19 | : id(id_), layer_id(layer_id_), service_context{service_context_} { |
| 19 | buffer_wait_event.Initialize("BufferQueue:WaitEvent"); | 20 | buffer_wait_event = service_context.CreateEvent("BufferQueue:WaitEvent"); |
| 20 | } | 21 | } |
| 21 | 22 | ||
| 22 | BufferQueue::~BufferQueue() = default; | 23 | BufferQueue::~BufferQueue() { |
| 24 | service_context.CloseEvent(buffer_wait_event); | ||
| 25 | } | ||
| 23 | 26 | ||
| 24 | void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) { | 27 | void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) { |
| 25 | ASSERT(slot < buffer_slots); | 28 | ASSERT(slot < buffer_slots); |
| @@ -41,7 +44,7 @@ void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) | |||
| 41 | .multi_fence = {}, | 44 | .multi_fence = {}, |
| 42 | }; | 45 | }; |
| 43 | 46 | ||
| 44 | buffer_wait_event.GetWritableEvent().Signal(); | 47 | buffer_wait_event->GetWritableEvent().Signal(); |
| 45 | } | 48 | } |
| 46 | 49 | ||
| 47 | std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> BufferQueue::DequeueBuffer(u32 width, | 50 | std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> BufferQueue::DequeueBuffer(u32 width, |
| @@ -119,7 +122,7 @@ void BufferQueue::CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& mult | |||
| 119 | } | 122 | } |
| 120 | free_buffers_condition.notify_one(); | 123 | free_buffers_condition.notify_one(); |
| 121 | 124 | ||
| 122 | buffer_wait_event.GetWritableEvent().Signal(); | 125 | buffer_wait_event->GetWritableEvent().Signal(); |
| 123 | } | 126 | } |
| 124 | 127 | ||
| 125 | std::optional<std::reference_wrapper<const BufferQueue::Buffer>> BufferQueue::AcquireBuffer() { | 128 | std::optional<std::reference_wrapper<const BufferQueue::Buffer>> BufferQueue::AcquireBuffer() { |
| @@ -154,7 +157,7 @@ void BufferQueue::ReleaseBuffer(u32 slot) { | |||
| 154 | } | 157 | } |
| 155 | free_buffers_condition.notify_one(); | 158 | free_buffers_condition.notify_one(); |
| 156 | 159 | ||
| 157 | buffer_wait_event.GetWritableEvent().Signal(); | 160 | buffer_wait_event->GetWritableEvent().Signal(); |
| 158 | } | 161 | } |
| 159 | 162 | ||
| 160 | void BufferQueue::Connect() { | 163 | void BufferQueue::Connect() { |
| @@ -169,7 +172,7 @@ void BufferQueue::Disconnect() { | |||
| 169 | std::unique_lock lock{queue_sequence_mutex}; | 172 | std::unique_lock lock{queue_sequence_mutex}; |
| 170 | queue_sequence.clear(); | 173 | queue_sequence.clear(); |
| 171 | } | 174 | } |
| 172 | buffer_wait_event.GetWritableEvent().Signal(); | 175 | buffer_wait_event->GetWritableEvent().Signal(); |
| 173 | is_connect = false; | 176 | is_connect = false; |
| 174 | free_buffers_condition.notify_one(); | 177 | free_buffers_condition.notify_one(); |
| 175 | } | 178 | } |
| @@ -189,11 +192,11 @@ u32 BufferQueue::Query(QueryType type) { | |||
| 189 | } | 192 | } |
| 190 | 193 | ||
| 191 | Kernel::KWritableEvent& BufferQueue::GetWritableBufferWaitEvent() { | 194 | Kernel::KWritableEvent& BufferQueue::GetWritableBufferWaitEvent() { |
| 192 | return buffer_wait_event.GetWritableEvent(); | 195 | return buffer_wait_event->GetWritableEvent(); |
| 193 | } | 196 | } |
| 194 | 197 | ||
| 195 | Kernel::KReadableEvent& BufferQueue::GetBufferWaitEvent() { | 198 | Kernel::KReadableEvent& BufferQueue::GetBufferWaitEvent() { |
| 196 | return buffer_wait_event.GetReadableEvent(); | 199 | return buffer_wait_event->GetReadableEvent(); |
| 197 | } | 200 | } |
| 198 | 201 | ||
| 199 | } // namespace Service::NVFlinger | 202 | } // namespace Service::NVFlinger |
diff --git a/src/core/hle/service/nvflinger/buffer_queue.h b/src/core/hle/service/nvflinger/buffer_queue.h index 61e337ac5..759247eb0 100644 --- a/src/core/hle/service/nvflinger/buffer_queue.h +++ b/src/core/hle/service/nvflinger/buffer_queue.h | |||
| @@ -24,6 +24,10 @@ class KReadableEvent; | |||
| 24 | class KWritableEvent; | 24 | class KWritableEvent; |
| 25 | } // namespace Kernel | 25 | } // namespace Kernel |
| 26 | 26 | ||
| 27 | namespace Service::KernelHelpers { | ||
| 28 | class ServiceContext; | ||
| 29 | } // namespace Service::KernelHelpers | ||
| 30 | |||
| 27 | namespace Service::NVFlinger { | 31 | namespace Service::NVFlinger { |
| 28 | 32 | ||
| 29 | constexpr u32 buffer_slots = 0x40; | 33 | constexpr u32 buffer_slots = 0x40; |
| @@ -54,7 +58,8 @@ public: | |||
| 54 | NativeWindowFormat = 2, | 58 | NativeWindowFormat = 2, |
| 55 | }; | 59 | }; |
| 56 | 60 | ||
| 57 | explicit BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_); | 61 | explicit BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_, |
| 62 | KernelHelpers::ServiceContext& service_context_); | ||
| 58 | ~BufferQueue(); | 63 | ~BufferQueue(); |
| 59 | 64 | ||
| 60 | enum class BufferTransformFlags : u32 { | 65 | enum class BufferTransformFlags : u32 { |
| @@ -130,12 +135,14 @@ private: | |||
| 130 | std::list<u32> free_buffers; | 135 | std::list<u32> free_buffers; |
| 131 | std::array<Buffer, buffer_slots> buffers; | 136 | std::array<Buffer, buffer_slots> buffers; |
| 132 | std::list<u32> queue_sequence; | 137 | std::list<u32> queue_sequence; |
| 133 | Kernel::KEvent buffer_wait_event; | 138 | Kernel::KEvent* buffer_wait_event{}; |
| 134 | 139 | ||
| 135 | std::mutex free_buffers_mutex; | 140 | std::mutex free_buffers_mutex; |
| 136 | std::condition_variable free_buffers_condition; | 141 | std::condition_variable free_buffers_condition; |
| 137 | 142 | ||
| 138 | std::mutex queue_sequence_mutex; | 143 | std::mutex queue_sequence_mutex; |
| 144 | |||
| 145 | KernelHelpers::ServiceContext& service_context; | ||
| 139 | }; | 146 | }; |
| 140 | 147 | ||
| 141 | } // namespace Service::NVFlinger | 148 | } // namespace Service::NVFlinger |
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index 941748970..00bff8caf 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp | |||
| @@ -61,12 +61,13 @@ void NVFlinger::SplitVSync() { | |||
| 61 | } | 61 | } |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | NVFlinger::NVFlinger(Core::System& system_) : system(system_) { | 64 | NVFlinger::NVFlinger(Core::System& system_) |
| 65 | displays.emplace_back(0, "Default", system); | 65 | : system(system_), service_context(system_, "nvflinger") { |
| 66 | displays.emplace_back(1, "External", system); | 66 | displays.emplace_back(0, "Default", service_context, system); |
| 67 | displays.emplace_back(2, "Edid", system); | 67 | displays.emplace_back(1, "External", service_context, system); |
| 68 | displays.emplace_back(3, "Internal", system); | 68 | displays.emplace_back(2, "Edid", service_context, system); |
| 69 | displays.emplace_back(4, "Null", system); | 69 | displays.emplace_back(3, "Internal", service_context, system); |
| 70 | displays.emplace_back(4, "Null", service_context, system); | ||
| 70 | guard = std::make_shared<std::mutex>(); | 71 | guard = std::make_shared<std::mutex>(); |
| 71 | 72 | ||
| 72 | // Schedule the screen composition events | 73 | // Schedule the screen composition events |
| @@ -146,7 +147,7 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) { | |||
| 146 | void NVFlinger::CreateLayerAtId(VI::Display& display, u64 layer_id) { | 147 | void NVFlinger::CreateLayerAtId(VI::Display& display, u64 layer_id) { |
| 147 | const u32 buffer_queue_id = next_buffer_queue_id++; | 148 | const u32 buffer_queue_id = next_buffer_queue_id++; |
| 148 | buffer_queues.emplace_back( | 149 | buffer_queues.emplace_back( |
| 149 | std::make_unique<BufferQueue>(system.Kernel(), buffer_queue_id, layer_id)); | 150 | std::make_unique<BufferQueue>(system.Kernel(), buffer_queue_id, layer_id, service_context)); |
| 150 | display.CreateLayer(layer_id, *buffer_queues.back()); | 151 | display.CreateLayer(layer_id, *buffer_queues.back()); |
| 151 | } | 152 | } |
| 152 | 153 | ||
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h index d80fd07ef..6d84cafb4 100644 --- a/src/core/hle/service/nvflinger/nvflinger.h +++ b/src/core/hle/service/nvflinger/nvflinger.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <vector> | 15 | #include <vector> |
| 16 | 16 | ||
| 17 | #include "common/common_types.h" | 17 | #include "common/common_types.h" |
| 18 | #include "core/hle/service/kernel_helpers.h" | ||
| 18 | 19 | ||
| 19 | namespace Common { | 20 | namespace Common { |
| 20 | class Event; | 21 | class Event; |
| @@ -135,6 +136,8 @@ private: | |||
| 135 | std::unique_ptr<std::thread> vsync_thread; | 136 | std::unique_ptr<std::thread> vsync_thread; |
| 136 | std::unique_ptr<Common::Event> wait_event; | 137 | std::unique_ptr<Common::Event> wait_event; |
| 137 | std::atomic<bool> is_running{}; | 138 | std::atomic<bool> is_running{}; |
| 139 | |||
| 140 | KernelHelpers::ServiceContext service_context; | ||
| 138 | }; | 141 | }; |
| 139 | 142 | ||
| 140 | } // namespace Service::NVFlinger | 143 | } // namespace Service::NVFlinger |
diff --git a/src/core/hle/service/vi/display/vi_display.cpp b/src/core/hle/service/vi/display/vi_display.cpp index 0dd342dbf..b7705c02a 100644 --- a/src/core/hle/service/vi/display/vi_display.cpp +++ b/src/core/hle/service/vi/display/vi_display.cpp | |||
| @@ -12,18 +12,21 @@ | |||
| 12 | #include "core/hle/kernel/k_event.h" | 12 | #include "core/hle/kernel/k_event.h" |
| 13 | #include "core/hle/kernel/k_readable_event.h" | 13 | #include "core/hle/kernel/k_readable_event.h" |
| 14 | #include "core/hle/kernel/k_writable_event.h" | 14 | #include "core/hle/kernel/k_writable_event.h" |
| 15 | #include "core/hle/service/kernel_helpers.h" | ||
| 15 | #include "core/hle/service/vi/display/vi_display.h" | 16 | #include "core/hle/service/vi/display/vi_display.h" |
| 16 | #include "core/hle/service/vi/layer/vi_layer.h" | 17 | #include "core/hle/service/vi/layer/vi_layer.h" |
| 17 | 18 | ||
| 18 | namespace Service::VI { | 19 | namespace Service::VI { |
| 19 | 20 | ||
| 20 | Display::Display(u64 id, std::string name_, Core::System& system) | 21 | Display::Display(u64 id, std::string name_, KernelHelpers::ServiceContext& service_context_, |
| 21 | : display_id{id}, name{std::move(name_)}, vsync_event{system.Kernel()} { | 22 | Core::System& system_) |
| 22 | Kernel::KAutoObject::Create(std::addressof(vsync_event)); | 23 | : display_id{id}, name{std::move(name_)}, service_context{service_context_} { |
| 23 | vsync_event.Initialize(fmt::format("Display VSync Event {}", id)); | 24 | vsync_event = service_context.CreateEvent(fmt::format("Display VSync Event {}", id)); |
| 24 | } | 25 | } |
| 25 | 26 | ||
| 26 | Display::~Display() = default; | 27 | Display::~Display() { |
| 28 | service_context.CloseEvent(vsync_event); | ||
| 29 | } | ||
| 27 | 30 | ||
| 28 | Layer& Display::GetLayer(std::size_t index) { | 31 | Layer& Display::GetLayer(std::size_t index) { |
| 29 | return *layers.at(index); | 32 | return *layers.at(index); |
| @@ -34,11 +37,11 @@ const Layer& Display::GetLayer(std::size_t index) const { | |||
| 34 | } | 37 | } |
| 35 | 38 | ||
| 36 | Kernel::KReadableEvent& Display::GetVSyncEvent() { | 39 | Kernel::KReadableEvent& Display::GetVSyncEvent() { |
| 37 | return vsync_event.GetReadableEvent(); | 40 | return vsync_event->GetReadableEvent(); |
| 38 | } | 41 | } |
| 39 | 42 | ||
| 40 | void Display::SignalVSyncEvent() { | 43 | void Display::SignalVSyncEvent() { |
| 41 | vsync_event.GetWritableEvent().Signal(); | 44 | vsync_event->GetWritableEvent().Signal(); |
| 42 | } | 45 | } |
| 43 | 46 | ||
| 44 | void Display::CreateLayer(u64 layer_id, NVFlinger::BufferQueue& buffer_queue) { | 47 | void Display::CreateLayer(u64 layer_id, NVFlinger::BufferQueue& buffer_queue) { |
diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h index 166f2a4cc..0979fc421 100644 --- a/src/core/hle/service/vi/display/vi_display.h +++ b/src/core/hle/service/vi/display/vi_display.h | |||
| @@ -18,6 +18,9 @@ class KEvent; | |||
| 18 | namespace Service::NVFlinger { | 18 | namespace Service::NVFlinger { |
| 19 | class BufferQueue; | 19 | class BufferQueue; |
| 20 | } | 20 | } |
| 21 | namespace Service::KernelHelpers { | ||
| 22 | class ServiceContext; | ||
| 23 | } // namespace Service::KernelHelpers | ||
| 21 | 24 | ||
| 22 | namespace Service::VI { | 25 | namespace Service::VI { |
| 23 | 26 | ||
| @@ -31,10 +34,13 @@ class Display { | |||
| 31 | public: | 34 | public: |
| 32 | /// Constructs a display with a given unique ID and name. | 35 | /// Constructs a display with a given unique ID and name. |
| 33 | /// | 36 | /// |
| 34 | /// @param id The unique ID for this display. | 37 | /// @param id The unique ID for this display. |
| 38 | /// @param service_context_ The ServiceContext for the owning service. | ||
| 35 | /// @param name_ The name for this display. | 39 | /// @param name_ The name for this display. |
| 40 | /// @param system_ The global system instance. | ||
| 36 | /// | 41 | /// |
| 37 | Display(u64 id, std::string name_, Core::System& system); | 42 | Display(u64 id, std::string name_, KernelHelpers::ServiceContext& service_context_, |
| 43 | Core::System& system_); | ||
| 38 | ~Display(); | 44 | ~Display(); |
| 39 | 45 | ||
| 40 | /// Gets the unique ID assigned to this display. | 46 | /// Gets the unique ID assigned to this display. |
| @@ -98,9 +104,10 @@ public: | |||
| 98 | private: | 104 | private: |
| 99 | u64 display_id; | 105 | u64 display_id; |
| 100 | std::string name; | 106 | std::string name; |
| 107 | KernelHelpers::ServiceContext& service_context; | ||
| 101 | 108 | ||
| 102 | std::vector<std::shared_ptr<Layer>> layers; | 109 | std::vector<std::shared_ptr<Layer>> layers; |
| 103 | Kernel::KEvent vsync_event; | 110 | Kernel::KEvent* vsync_event{}; |
| 104 | }; | 111 | }; |
| 105 | 112 | ||
| 106 | } // namespace Service::VI | 113 | } // namespace Service::VI |