diff options
Diffstat (limited to 'src')
35 files changed, 392 insertions, 426 deletions
diff --git a/src/common/logging/filter.cpp b/src/common/logging/filter.cpp index f055f0e11..42744c994 100644 --- a/src/common/logging/filter.cpp +++ b/src/common/logging/filter.cpp | |||
| @@ -111,6 +111,7 @@ bool ParseFilterRule(Filter& instance, Iterator begin, Iterator end) { | |||
| 111 | SUB(Service, NCM) \ | 111 | SUB(Service, NCM) \ |
| 112 | SUB(Service, NFC) \ | 112 | SUB(Service, NFC) \ |
| 113 | SUB(Service, NFP) \ | 113 | SUB(Service, NFP) \ |
| 114 | SUB(Service, NGCT) \ | ||
| 114 | SUB(Service, NIFM) \ | 115 | SUB(Service, NIFM) \ |
| 115 | SUB(Service, NIM) \ | 116 | SUB(Service, NIM) \ |
| 116 | SUB(Service, NPNS) \ | 117 | SUB(Service, NPNS) \ |
diff --git a/src/common/logging/types.h b/src/common/logging/types.h index 7ad0334fc..ddf9d27ca 100644 --- a/src/common/logging/types.h +++ b/src/common/logging/types.h | |||
| @@ -81,6 +81,7 @@ enum class Class : u8 { | |||
| 81 | Service_NCM, ///< The NCM service | 81 | Service_NCM, ///< The NCM service |
| 82 | Service_NFC, ///< The NFC (Near-field communication) service | 82 | Service_NFC, ///< The NFC (Near-field communication) service |
| 83 | Service_NFP, ///< The NFP service | 83 | Service_NFP, ///< The NFP service |
| 84 | Service_NGCT, ///< The NGCT (No Good Content for Terra) service | ||
| 84 | Service_NIFM, ///< The NIFM (Network interface) service | 85 | Service_NIFM, ///< The NIFM (Network interface) service |
| 85 | Service_NIM, ///< The NIM service | 86 | Service_NIM, ///< The NIM service |
| 86 | Service_NPNS, ///< The NPNS service | 87 | Service_NPNS, ///< The NPNS service |
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index f5cf5c16a..87d47e2e5 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -452,6 +452,8 @@ add_library(core STATIC | |||
| 452 | hle/service/nfp/nfp.h | 452 | hle/service/nfp/nfp.h |
| 453 | hle/service/nfp/nfp_user.cpp | 453 | hle/service/nfp/nfp_user.cpp |
| 454 | hle/service/nfp/nfp_user.h | 454 | hle/service/nfp/nfp_user.h |
| 455 | hle/service/ngct/ngct.cpp | ||
| 456 | hle/service/ngct/ngct.h | ||
| 455 | hle/service/nifm/nifm.cpp | 457 | hle/service/nifm/nifm.cpp |
| 456 | hle/service/nifm/nifm.h | 458 | hle/service/nifm/nifm.h |
| 457 | hle/service/nim/nim.cpp | 459 | hle/service/nim/nim.cpp |
diff --git a/src/core/core.cpp b/src/core/core.cpp index 5893a86bf..ba4629993 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp | |||
| @@ -507,6 +507,12 @@ const ARM_Interface& System::CurrentArmInterface() const { | |||
| 507 | return impl->kernel.CurrentPhysicalCore().ArmInterface(); | 507 | return impl->kernel.CurrentPhysicalCore().ArmInterface(); |
| 508 | } | 508 | } |
| 509 | 509 | ||
| 510 | std::size_t System::CurrentCoreIndex() const { | ||
| 511 | std::size_t core = impl->kernel.GetCurrentHostThreadID(); | ||
| 512 | ASSERT(core < Core::Hardware::NUM_CPU_CORES); | ||
| 513 | return core; | ||
| 514 | } | ||
| 515 | |||
| 510 | Kernel::PhysicalCore& System::CurrentPhysicalCore() { | 516 | Kernel::PhysicalCore& System::CurrentPhysicalCore() { |
| 511 | return impl->kernel.CurrentPhysicalCore(); | 517 | return impl->kernel.CurrentPhysicalCore(); |
| 512 | } | 518 | } |
diff --git a/src/core/core.h b/src/core/core.h index f9116ebb6..715ab88e7 100644 --- a/src/core/core.h +++ b/src/core/core.h | |||
| @@ -205,6 +205,9 @@ public: | |||
| 205 | /// Gets an ARM interface to the CPU core that is currently running | 205 | /// Gets an ARM interface to the CPU core that is currently running |
| 206 | [[nodiscard]] const ARM_Interface& CurrentArmInterface() const; | 206 | [[nodiscard]] const ARM_Interface& CurrentArmInterface() const; |
| 207 | 207 | ||
| 208 | /// Gets the index of the currently running CPU core | ||
| 209 | [[nodiscard]] std::size_t CurrentCoreIndex() const; | ||
| 210 | |||
| 208 | /// Gets the physical core for the CPU core that is currently running | 211 | /// Gets the physical core for the CPU core that is currently running |
| 209 | [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore(); | 212 | [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore(); |
| 210 | 213 | ||
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp index de2e5563e..7e195346b 100644 --- a/src/core/cpu_manager.cpp +++ b/src/core/cpu_manager.cpp | |||
| @@ -21,25 +21,34 @@ namespace Core { | |||
| 21 | CpuManager::CpuManager(System& system_) : system{system_} {} | 21 | CpuManager::CpuManager(System& system_) : system{system_} {} |
| 22 | CpuManager::~CpuManager() = default; | 22 | CpuManager::~CpuManager() = default; |
| 23 | 23 | ||
| 24 | void CpuManager::ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, | 24 | void CpuManager::ThreadStart(CpuManager& cpu_manager, std::size_t core) { |
| 25 | std::size_t core) { | 25 | cpu_manager.RunThread(core); |
| 26 | cpu_manager.RunThread(stop_token, core); | ||
| 27 | } | 26 | } |
| 28 | 27 | ||
| 29 | void CpuManager::Initialize() { | 28 | void CpuManager::Initialize() { |
| 30 | running_mode = true; | 29 | running_mode = true; |
| 31 | if (is_multicore) { | 30 | if (is_multicore) { |
| 32 | for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | 31 | for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 33 | core_data[core].host_thread = std::jthread(ThreadStart, std::ref(*this), core); | 32 | core_data[core].host_thread = |
| 33 | std::make_unique<std::thread>(ThreadStart, std::ref(*this), core); | ||
| 34 | } | 34 | } |
| 35 | } else { | 35 | } else { |
| 36 | core_data[0].host_thread = std::jthread(ThreadStart, std::ref(*this), 0); | 36 | core_data[0].host_thread = std::make_unique<std::thread>(ThreadStart, std::ref(*this), 0); |
| 37 | } | 37 | } |
| 38 | } | 38 | } |
| 39 | 39 | ||
| 40 | void CpuManager::Shutdown() { | 40 | void CpuManager::Shutdown() { |
| 41 | running_mode = false; | 41 | running_mode = false; |
| 42 | Pause(false); | 42 | Pause(false); |
| 43 | if (is_multicore) { | ||
| 44 | for (auto& data : core_data) { | ||
| 45 | data.host_thread->join(); | ||
| 46 | data.host_thread.reset(); | ||
| 47 | } | ||
| 48 | } else { | ||
| 49 | core_data[0].host_thread->join(); | ||
| 50 | core_data[0].host_thread.reset(); | ||
| 51 | } | ||
| 43 | } | 52 | } |
| 44 | 53 | ||
| 45 | std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() { | 54 | std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() { |
| @@ -118,18 +127,17 @@ void CpuManager::MultiCoreRunGuestLoop() { | |||
| 118 | physical_core = &kernel.CurrentPhysicalCore(); | 127 | physical_core = &kernel.CurrentPhysicalCore(); |
| 119 | } | 128 | } |
| 120 | system.ExitDynarmicProfile(); | 129 | system.ExitDynarmicProfile(); |
| 121 | { | 130 | physical_core->ArmInterface().ClearExclusiveState(); |
| 122 | Kernel::KScopedDisableDispatch dd(kernel); | 131 | kernel.CurrentScheduler()->RescheduleCurrentCore(); |
| 123 | physical_core->ArmInterface().ClearExclusiveState(); | ||
| 124 | } | ||
| 125 | } | 132 | } |
| 126 | } | 133 | } |
| 127 | 134 | ||
| 128 | void CpuManager::MultiCoreRunIdleThread() { | 135 | void CpuManager::MultiCoreRunIdleThread() { |
| 129 | auto& kernel = system.Kernel(); | 136 | auto& kernel = system.Kernel(); |
| 130 | while (true) { | 137 | while (true) { |
| 131 | Kernel::KScopedDisableDispatch dd(kernel); | 138 | auto& physical_core = kernel.CurrentPhysicalCore(); |
| 132 | kernel.CurrentPhysicalCore().Idle(); | 139 | physical_core.Idle(); |
| 140 | kernel.CurrentScheduler()->RescheduleCurrentCore(); | ||
| 133 | } | 141 | } |
| 134 | } | 142 | } |
| 135 | 143 | ||
| @@ -137,12 +145,12 @@ void CpuManager::MultiCoreRunSuspendThread() { | |||
| 137 | auto& kernel = system.Kernel(); | 145 | auto& kernel = system.Kernel(); |
| 138 | kernel.CurrentScheduler()->OnThreadStart(); | 146 | kernel.CurrentScheduler()->OnThreadStart(); |
| 139 | while (true) { | 147 | while (true) { |
| 140 | auto core = kernel.CurrentPhysicalCoreIndex(); | 148 | auto core = kernel.GetCurrentHostThreadID(); |
| 141 | auto& scheduler = *kernel.CurrentScheduler(); | 149 | auto& scheduler = *kernel.CurrentScheduler(); |
| 142 | Kernel::KThread* current_thread = scheduler.GetCurrentThread(); | 150 | Kernel::KThread* current_thread = scheduler.GetCurrentThread(); |
| 143 | Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context); | 151 | Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context); |
| 144 | ASSERT(scheduler.ContextSwitchPending()); | 152 | ASSERT(scheduler.ContextSwitchPending()); |
| 145 | ASSERT(core == kernel.CurrentPhysicalCoreIndex()); | 153 | ASSERT(core == kernel.GetCurrentHostThreadID()); |
| 146 | scheduler.RescheduleCurrentCore(); | 154 | scheduler.RescheduleCurrentCore(); |
| 147 | } | 155 | } |
| 148 | } | 156 | } |
| @@ -309,7 +317,7 @@ void CpuManager::Pause(bool paused) { | |||
| 309 | } | 317 | } |
| 310 | } | 318 | } |
| 311 | 319 | ||
| 312 | void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) { | 320 | void CpuManager::RunThread(std::size_t core) { |
| 313 | /// Initialization | 321 | /// Initialization |
| 314 | system.RegisterCoreThread(core); | 322 | system.RegisterCoreThread(core); |
| 315 | std::string name; | 323 | std::string name; |
| @@ -348,8 +356,8 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) { | |||
| 348 | sc_sync_first_use = false; | 356 | sc_sync_first_use = false; |
| 349 | } | 357 | } |
| 350 | 358 | ||
| 351 | // Emulation was stopped | 359 | // Abort if emulation was killed before the session really starts |
| 352 | if (stop_token.stop_requested()) { | 360 | if (!system.IsPoweredOn()) { |
| 353 | return; | 361 | return; |
| 354 | } | 362 | } |
| 355 | 363 | ||
diff --git a/src/core/cpu_manager.h b/src/core/cpu_manager.h index 9d92d4af0..140263b09 100644 --- a/src/core/cpu_manager.h +++ b/src/core/cpu_manager.h | |||
| @@ -78,9 +78,9 @@ private: | |||
| 78 | void SingleCoreRunSuspendThread(); | 78 | void SingleCoreRunSuspendThread(); |
| 79 | void SingleCorePause(bool paused); | 79 | void SingleCorePause(bool paused); |
| 80 | 80 | ||
| 81 | static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core); | 81 | static void ThreadStart(CpuManager& cpu_manager, std::size_t core); |
| 82 | 82 | ||
| 83 | void RunThread(std::stop_token stop_token, std::size_t core); | 83 | void RunThread(std::size_t core); |
| 84 | 84 | ||
| 85 | struct CoreData { | 85 | struct CoreData { |
| 86 | std::shared_ptr<Common::Fiber> host_context; | 86 | std::shared_ptr<Common::Fiber> host_context; |
| @@ -89,7 +89,7 @@ private: | |||
| 89 | std::atomic<bool> is_running; | 89 | std::atomic<bool> is_running; |
| 90 | std::atomic<bool> is_paused; | 90 | std::atomic<bool> is_paused; |
| 91 | std::atomic<bool> initialized; | 91 | std::atomic<bool> initialized; |
| 92 | std::jthread host_thread; | 92 | std::unique_ptr<std::thread> host_thread; |
| 93 | }; | 93 | }; |
| 94 | 94 | ||
| 95 | std::atomic<bool> running_mode{}; | 95 | std::atomic<bool> running_mode{}; |
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp index 6771ef621..1b429bc1e 100644 --- a/src/core/hle/kernel/k_address_arbiter.cpp +++ b/src/core/hle/kernel/k_address_arbiter.cpp | |||
| @@ -28,7 +28,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) { | |||
| 28 | 28 | ||
| 29 | bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { | 29 | bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { |
| 30 | auto& monitor = system.Monitor(); | 30 | auto& monitor = system.Monitor(); |
| 31 | const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); | 31 | const auto current_core = system.CurrentCoreIndex(); |
| 32 | 32 | ||
| 33 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. | 33 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. |
| 34 | // TODO(bunnei): We should call CanAccessAtomic(..) here. | 34 | // TODO(bunnei): We should call CanAccessAtomic(..) here. |
| @@ -58,7 +58,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu | |||
| 58 | 58 | ||
| 59 | bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { | 59 | bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { |
| 60 | auto& monitor = system.Monitor(); | 60 | auto& monitor = system.Monitor(); |
| 61 | const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); | 61 | const auto current_core = system.CurrentCoreIndex(); |
| 62 | 62 | ||
| 63 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. | 63 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. |
| 64 | // TODO(bunnei): We should call CanAccessAtomic(..) here. | 64 | // TODO(bunnei): We should call CanAccessAtomic(..) here. |
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h index 165b76747..e4fcdbc67 100644 --- a/src/core/hle/kernel/k_auto_object.h +++ b/src/core/hle/kernel/k_auto_object.h | |||
| @@ -170,10 +170,6 @@ public: | |||
| 170 | } | 170 | } |
| 171 | } | 171 | } |
| 172 | 172 | ||
| 173 | const std::string& GetName() const { | ||
| 174 | return name; | ||
| 175 | } | ||
| 176 | |||
| 177 | private: | 173 | private: |
| 178 | void RegisterWithKernel(); | 174 | void RegisterWithKernel(); |
| 179 | void UnregisterWithKernel(); | 175 | void UnregisterWithKernel(); |
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index 4174f35fd..ef14ad1d2 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp | |||
| @@ -35,7 +35,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) { | |||
| 35 | bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, | 35 | bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, |
| 36 | u32 new_orr_mask) { | 36 | u32 new_orr_mask) { |
| 37 | auto& monitor = system.Monitor(); | 37 | auto& monitor = system.Monitor(); |
| 38 | const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); | 38 | const auto current_core = system.CurrentCoreIndex(); |
| 39 | 39 | ||
| 40 | // Load the value from the address. | 40 | // Load the value from the address. |
| 41 | const auto expected = monitor.ExclusiveRead32(current_core, address); | 41 | const auto expected = monitor.ExclusiveRead32(current_core, address); |
diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp index d720c2dda..6a420d5b0 100644 --- a/src/core/hle/kernel/k_handle_table.cpp +++ b/src/core/hle/kernel/k_handle_table.cpp | |||
| @@ -13,7 +13,6 @@ ResultCode KHandleTable::Finalize() { | |||
| 13 | // Get the table and clear our record of it. | 13 | // Get the table and clear our record of it. |
| 14 | u16 saved_table_size = 0; | 14 | u16 saved_table_size = 0; |
| 15 | { | 15 | { |
| 16 | KScopedDisableDispatch dd(kernel); | ||
| 17 | KScopedSpinLock lk(m_lock); | 16 | KScopedSpinLock lk(m_lock); |
| 18 | 17 | ||
| 19 | std::swap(m_table_size, saved_table_size); | 18 | std::swap(m_table_size, saved_table_size); |
| @@ -44,7 +43,6 @@ bool KHandleTable::Remove(Handle handle) { | |||
| 44 | // Find the object and free the entry. | 43 | // Find the object and free the entry. |
| 45 | KAutoObject* obj = nullptr; | 44 | KAutoObject* obj = nullptr; |
| 46 | { | 45 | { |
| 47 | KScopedDisableDispatch dd(kernel); | ||
| 48 | KScopedSpinLock lk(m_lock); | 46 | KScopedSpinLock lk(m_lock); |
| 49 | 47 | ||
| 50 | if (this->IsValidHandle(handle)) { | 48 | if (this->IsValidHandle(handle)) { |
| @@ -63,7 +61,6 @@ bool KHandleTable::Remove(Handle handle) { | |||
| 63 | } | 61 | } |
| 64 | 62 | ||
| 65 | ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { | 63 | ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { |
| 66 | KScopedDisableDispatch dd(kernel); | ||
| 67 | KScopedSpinLock lk(m_lock); | 64 | KScopedSpinLock lk(m_lock); |
| 68 | 65 | ||
| 69 | // Never exceed our capacity. | 66 | // Never exceed our capacity. |
| @@ -86,7 +83,6 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { | |||
| 86 | } | 83 | } |
| 87 | 84 | ||
| 88 | ResultCode KHandleTable::Reserve(Handle* out_handle) { | 85 | ResultCode KHandleTable::Reserve(Handle* out_handle) { |
| 89 | KScopedDisableDispatch dd(kernel); | ||
| 90 | KScopedSpinLock lk(m_lock); | 86 | KScopedSpinLock lk(m_lock); |
| 91 | 87 | ||
| 92 | // Never exceed our capacity. | 88 | // Never exceed our capacity. |
| @@ -97,7 +93,6 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) { | |||
| 97 | } | 93 | } |
| 98 | 94 | ||
| 99 | void KHandleTable::Unreserve(Handle handle) { | 95 | void KHandleTable::Unreserve(Handle handle) { |
| 100 | KScopedDisableDispatch dd(kernel); | ||
| 101 | KScopedSpinLock lk(m_lock); | 96 | KScopedSpinLock lk(m_lock); |
| 102 | 97 | ||
| 103 | // Unpack the handle. | 98 | // Unpack the handle. |
| @@ -116,7 +111,6 @@ void KHandleTable::Unreserve(Handle handle) { | |||
| 116 | } | 111 | } |
| 117 | 112 | ||
| 118 | void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { | 113 | void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { |
| 119 | KScopedDisableDispatch dd(kernel); | ||
| 120 | KScopedSpinLock lk(m_lock); | 114 | KScopedSpinLock lk(m_lock); |
| 121 | 115 | ||
| 122 | // Unpack the handle. | 116 | // Unpack the handle. |
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h index 75dcec7df..2ff6aa160 100644 --- a/src/core/hle/kernel/k_handle_table.h +++ b/src/core/hle/kernel/k_handle_table.h | |||
| @@ -69,7 +69,6 @@ public: | |||
| 69 | template <typename T = KAutoObject> | 69 | template <typename T = KAutoObject> |
| 70 | KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { | 70 | KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { |
| 71 | // Lock and look up in table. | 71 | // Lock and look up in table. |
| 72 | KScopedDisableDispatch dd(kernel); | ||
| 73 | KScopedSpinLock lk(m_lock); | 72 | KScopedSpinLock lk(m_lock); |
| 74 | 73 | ||
| 75 | if constexpr (std::is_same_v<T, KAutoObject>) { | 74 | if constexpr (std::is_same_v<T, KAutoObject>) { |
| @@ -124,7 +123,6 @@ public: | |||
| 124 | size_t num_opened; | 123 | size_t num_opened; |
| 125 | { | 124 | { |
| 126 | // Lock the table. | 125 | // Lock the table. |
| 127 | KScopedDisableDispatch dd(kernel); | ||
| 128 | KScopedSpinLock lk(m_lock); | 126 | KScopedSpinLock lk(m_lock); |
| 129 | for (num_opened = 0; num_opened < num_handles; num_opened++) { | 127 | for (num_opened = 0; num_opened < num_handles; num_opened++) { |
| 130 | // Get the current handle. | 128 | // Get the current handle. |
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 3d7e6707e..8ead1a769 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp | |||
| @@ -59,7 +59,6 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority | |||
| 59 | thread->GetContext64().cpu_registers[0] = 0; | 59 | thread->GetContext64().cpu_registers[0] = 0; |
| 60 | thread->GetContext32().cpu_registers[1] = thread_handle; | 60 | thread->GetContext32().cpu_registers[1] = thread_handle; |
| 61 | thread->GetContext64().cpu_registers[1] = thread_handle; | 61 | thread->GetContext64().cpu_registers[1] = thread_handle; |
| 62 | thread->DisableDispatch(); | ||
| 63 | 62 | ||
| 64 | auto& kernel = system.Kernel(); | 63 | auto& kernel = system.Kernel(); |
| 65 | // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires | 64 | // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires |
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 6ddbae52c..6a7d80d03 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -376,18 +376,20 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { | |||
| 376 | } | 376 | } |
| 377 | 377 | ||
| 378 | void KScheduler::DisableScheduling(KernelCore& kernel) { | 378 | void KScheduler::DisableScheduling(KernelCore& kernel) { |
| 379 | ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0); | 379 | if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { |
| 380 | GetCurrentThreadPointer(kernel)->DisableDispatch(); | 380 | ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0); |
| 381 | scheduler->GetCurrentThread()->DisableDispatch(); | ||
| 382 | } | ||
| 381 | } | 383 | } |
| 382 | 384 | ||
| 383 | void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { | 385 | void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { |
| 384 | ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 1); | 386 | if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { |
| 385 | 387 | ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1); | |
| 386 | if (GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() > 1) { | 388 | if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) { |
| 387 | GetCurrentThreadPointer(kernel)->EnableDispatch(); | 389 | scheduler->GetCurrentThread()->EnableDispatch(); |
| 388 | } else { | 390 | } |
| 389 | RescheduleCores(kernel, cores_needing_scheduling); | ||
| 390 | } | 391 | } |
| 392 | RescheduleCores(kernel, cores_needing_scheduling); | ||
| 391 | } | 393 | } |
| 392 | 394 | ||
| 393 | u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { | 395 | u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { |
| @@ -615,17 +617,13 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c | |||
| 615 | state.highest_priority_thread = nullptr; | 617 | state.highest_priority_thread = nullptr; |
| 616 | } | 618 | } |
| 617 | 619 | ||
| 618 | void KScheduler::Finalize() { | 620 | KScheduler::~KScheduler() { |
| 619 | if (idle_thread) { | 621 | if (idle_thread) { |
| 620 | idle_thread->Close(); | 622 | idle_thread->Close(); |
| 621 | idle_thread = nullptr; | 623 | idle_thread = nullptr; |
| 622 | } | 624 | } |
| 623 | } | 625 | } |
| 624 | 626 | ||
| 625 | KScheduler::~KScheduler() { | ||
| 626 | ASSERT(!idle_thread); | ||
| 627 | } | ||
| 628 | |||
| 629 | KThread* KScheduler::GetCurrentThread() const { | 627 | KThread* KScheduler::GetCurrentThread() const { |
| 630 | if (auto result = current_thread.load(); result) { | 628 | if (auto result = current_thread.load(); result) { |
| 631 | return result; | 629 | return result; |
| @@ -644,12 +642,10 @@ void KScheduler::RescheduleCurrentCore() { | |||
| 644 | if (phys_core.IsInterrupted()) { | 642 | if (phys_core.IsInterrupted()) { |
| 645 | phys_core.ClearInterrupt(); | 643 | phys_core.ClearInterrupt(); |
| 646 | } | 644 | } |
| 647 | |||
| 648 | guard.Lock(); | 645 | guard.Lock(); |
| 649 | if (state.needs_scheduling.load()) { | 646 | if (state.needs_scheduling.load()) { |
| 650 | Schedule(); | 647 | Schedule(); |
| 651 | } else { | 648 | } else { |
| 652 | GetCurrentThread()->EnableDispatch(); | ||
| 653 | guard.Unlock(); | 649 | guard.Unlock(); |
| 654 | } | 650 | } |
| 655 | } | 651 | } |
| @@ -659,33 +655,26 @@ void KScheduler::OnThreadStart() { | |||
| 659 | } | 655 | } |
| 660 | 656 | ||
| 661 | void KScheduler::Unload(KThread* thread) { | 657 | void KScheduler::Unload(KThread* thread) { |
| 662 | ASSERT(thread); | ||
| 663 | |||
| 664 | LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); | 658 | LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); |
| 665 | 659 | ||
| 666 | if (thread->IsCallingSvc()) { | 660 | if (thread) { |
| 667 | thread->ClearIsCallingSvc(); | 661 | if (thread->IsCallingSvc()) { |
| 668 | } | 662 | thread->ClearIsCallingSvc(); |
| 669 | 663 | } | |
| 670 | auto& physical_core = system.Kernel().PhysicalCore(core_id); | 664 | if (!thread->IsTerminationRequested()) { |
| 671 | if (!physical_core.IsInitialized()) { | 665 | prev_thread = thread; |
| 672 | return; | 666 | |
| 673 | } | 667 | Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); |
| 674 | 668 | cpu_core.SaveContext(thread->GetContext32()); | |
| 675 | Core::ARM_Interface& cpu_core = physical_core.ArmInterface(); | 669 | cpu_core.SaveContext(thread->GetContext64()); |
| 676 | cpu_core.SaveContext(thread->GetContext32()); | 670 | // Save the TPIDR_EL0 system register in case it was modified. |
| 677 | cpu_core.SaveContext(thread->GetContext64()); | 671 | thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); |
| 678 | // Save the TPIDR_EL0 system register in case it was modified. | 672 | cpu_core.ClearExclusiveState(); |
| 679 | thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | 673 | } else { |
| 680 | cpu_core.ClearExclusiveState(); | 674 | prev_thread = nullptr; |
| 681 | 675 | } | |
| 682 | if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) { | 676 | thread->context_guard.Unlock(); |
| 683 | prev_thread = thread; | ||
| 684 | } else { | ||
| 685 | prev_thread = nullptr; | ||
| 686 | } | 677 | } |
| 687 | |||
| 688 | thread->context_guard.Unlock(); | ||
| 689 | } | 678 | } |
| 690 | 679 | ||
| 691 | void KScheduler::Reload(KThread* thread) { | 680 | void KScheduler::Reload(KThread* thread) { |
| @@ -694,6 +683,11 @@ void KScheduler::Reload(KThread* thread) { | |||
| 694 | if (thread) { | 683 | if (thread) { |
| 695 | ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); | 684 | ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); |
| 696 | 685 | ||
| 686 | auto* const thread_owner_process = thread->GetOwnerProcess(); | ||
| 687 | if (thread_owner_process != nullptr) { | ||
| 688 | system.Kernel().MakeCurrentProcess(thread_owner_process); | ||
| 689 | } | ||
| 690 | |||
| 697 | Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); | 691 | Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); |
| 698 | cpu_core.LoadContext(thread->GetContext32()); | 692 | cpu_core.LoadContext(thread->GetContext32()); |
| 699 | cpu_core.LoadContext(thread->GetContext64()); | 693 | cpu_core.LoadContext(thread->GetContext64()); |
| @@ -711,7 +705,7 @@ void KScheduler::SwitchContextStep2() { | |||
| 711 | } | 705 | } |
| 712 | 706 | ||
| 713 | void KScheduler::ScheduleImpl() { | 707 | void KScheduler::ScheduleImpl() { |
| 714 | KThread* previous_thread = GetCurrentThread(); | 708 | KThread* previous_thread = current_thread.load(); |
| 715 | KThread* next_thread = state.highest_priority_thread; | 709 | KThread* next_thread = state.highest_priority_thread; |
| 716 | 710 | ||
| 717 | state.needs_scheduling = false; | 711 | state.needs_scheduling = false; |
| @@ -723,15 +717,10 @@ void KScheduler::ScheduleImpl() { | |||
| 723 | 717 | ||
| 724 | // If we're not actually switching thread, there's nothing to do. | 718 | // If we're not actually switching thread, there's nothing to do. |
| 725 | if (next_thread == current_thread.load()) { | 719 | if (next_thread == current_thread.load()) { |
| 726 | previous_thread->EnableDispatch(); | ||
| 727 | guard.Unlock(); | 720 | guard.Unlock(); |
| 728 | return; | 721 | return; |
| 729 | } | 722 | } |
| 730 | 723 | ||
| 731 | if (next_thread->GetCurrentCore() != core_id) { | ||
| 732 | next_thread->SetCurrentCore(core_id); | ||
| 733 | } | ||
| 734 | |||
| 735 | current_thread.store(next_thread); | 724 | current_thread.store(next_thread); |
| 736 | 725 | ||
| 737 | KProcess* const previous_process = system.Kernel().CurrentProcess(); | 726 | KProcess* const previous_process = system.Kernel().CurrentProcess(); |
| @@ -742,7 +731,11 @@ void KScheduler::ScheduleImpl() { | |||
| 742 | Unload(previous_thread); | 731 | Unload(previous_thread); |
| 743 | 732 | ||
| 744 | std::shared_ptr<Common::Fiber>* old_context; | 733 | std::shared_ptr<Common::Fiber>* old_context; |
| 745 | old_context = &previous_thread->GetHostContext(); | 734 | if (previous_thread != nullptr) { |
| 735 | old_context = &previous_thread->GetHostContext(); | ||
| 736 | } else { | ||
| 737 | old_context = &idle_thread->GetHostContext(); | ||
| 738 | } | ||
| 746 | guard.Unlock(); | 739 | guard.Unlock(); |
| 747 | 740 | ||
| 748 | Common::Fiber::YieldTo(*old_context, *switch_fiber); | 741 | Common::Fiber::YieldTo(*old_context, *switch_fiber); |
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 516e0cdba..12cfae919 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h | |||
| @@ -33,8 +33,6 @@ public: | |||
| 33 | explicit KScheduler(Core::System& system_, s32 core_id_); | 33 | explicit KScheduler(Core::System& system_, s32 core_id_); |
| 34 | ~KScheduler(); | 34 | ~KScheduler(); |
| 35 | 35 | ||
| 36 | void Finalize(); | ||
| 37 | |||
| 38 | /// Reschedules to the next available thread (call after current thread is suspended) | 36 | /// Reschedules to the next available thread (call after current thread is suspended) |
| 39 | void RescheduleCurrentCore(); | 37 | void RescheduleCurrentCore(); |
| 40 | 38 | ||
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 0f6808ade..9f1d3156b 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | #include "common/fiber.h" | 14 | #include "common/fiber.h" |
| 15 | #include "common/logging/log.h" | 15 | #include "common/logging/log.h" |
| 16 | #include "common/scope_exit.h" | 16 | #include "common/scope_exit.h" |
| 17 | #include "common/settings.h" | ||
| 18 | #include "common/thread_queue_list.h" | 17 | #include "common/thread_queue_list.h" |
| 19 | #include "core/core.h" | 18 | #include "core/core.h" |
| 20 | #include "core/cpu_manager.h" | 19 | #include "core/cpu_manager.h" |
| @@ -189,7 +188,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s | |||
| 189 | // Setup the stack parameters. | 188 | // Setup the stack parameters. |
| 190 | StackParameters& sp = GetStackParameters(); | 189 | StackParameters& sp = GetStackParameters(); |
| 191 | sp.cur_thread = this; | 190 | sp.cur_thread = this; |
| 192 | sp.disable_count = 0; | 191 | sp.disable_count = 1; |
| 193 | SetInExceptionHandler(); | 192 | SetInExceptionHandler(); |
| 194 | 193 | ||
| 195 | // Set thread ID. | 194 | // Set thread ID. |
| @@ -216,10 +215,9 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint | |||
| 216 | // Initialize the thread. | 215 | // Initialize the thread. |
| 217 | R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); | 216 | R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); |
| 218 | 217 | ||
| 219 | // Initialize emulation parameters. | 218 | // Initialize host context. |
| 220 | thread->host_context = | 219 | thread->host_context = |
| 221 | std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter); | 220 | std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter); |
| 222 | thread->is_single_core = !Settings::values.use_multi_core.GetValue(); | ||
| 223 | 221 | ||
| 224 | return ResultSuccess; | 222 | return ResultSuccess; |
| 225 | } | 223 | } |
| @@ -972,9 +970,6 @@ ResultCode KThread::Run() { | |||
| 972 | 970 | ||
| 973 | // Set our state and finish. | 971 | // Set our state and finish. |
| 974 | SetState(ThreadState::Runnable); | 972 | SetState(ThreadState::Runnable); |
| 975 | |||
| 976 | DisableDispatch(); | ||
| 977 | |||
| 978 | return ResultSuccess; | 973 | return ResultSuccess; |
| 979 | } | 974 | } |
| 980 | } | 975 | } |
| @@ -1059,16 +1054,4 @@ s32 GetCurrentCoreId(KernelCore& kernel) { | |||
| 1059 | return GetCurrentThread(kernel).GetCurrentCore(); | 1054 | return GetCurrentThread(kernel).GetCurrentCore(); |
| 1060 | } | 1055 | } |
| 1061 | 1056 | ||
| 1062 | KScopedDisableDispatch::~KScopedDisableDispatch() { | ||
| 1063 | if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) { | ||
| 1064 | auto scheduler = kernel.CurrentScheduler(); | ||
| 1065 | |||
| 1066 | if (scheduler) { | ||
| 1067 | scheduler->RescheduleCurrentCore(); | ||
| 1068 | } | ||
| 1069 | } else { | ||
| 1070 | GetCurrentThread(kernel).EnableDispatch(); | ||
| 1071 | } | ||
| 1072 | } | ||
| 1073 | |||
| 1074 | } // namespace Kernel | 1057 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index e4c4c877d..c77f44ad4 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h | |||
| @@ -450,39 +450,16 @@ public: | |||
| 450 | sleeping_queue = q; | 450 | sleeping_queue = q; |
| 451 | } | 451 | } |
| 452 | 452 | ||
| 453 | [[nodiscard]] bool IsKernelThread() const { | ||
| 454 | return GetActiveCore() == 3; | ||
| 455 | } | ||
| 456 | |||
| 457 | [[nodiscard]] bool IsDispatchTrackingDisabled() const { | ||
| 458 | return is_single_core || IsKernelThread(); | ||
| 459 | } | ||
| 460 | |||
| 461 | [[nodiscard]] s32 GetDisableDispatchCount() const { | 453 | [[nodiscard]] s32 GetDisableDispatchCount() const { |
| 462 | if (IsDispatchTrackingDisabled()) { | ||
| 463 | // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||
| 464 | return 1; | ||
| 465 | } | ||
| 466 | |||
| 467 | return this->GetStackParameters().disable_count; | 454 | return this->GetStackParameters().disable_count; |
| 468 | } | 455 | } |
| 469 | 456 | ||
| 470 | void DisableDispatch() { | 457 | void DisableDispatch() { |
| 471 | if (IsDispatchTrackingDisabled()) { | ||
| 472 | // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||
| 473 | return; | ||
| 474 | } | ||
| 475 | |||
| 476 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); | 458 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); |
| 477 | this->GetStackParameters().disable_count++; | 459 | this->GetStackParameters().disable_count++; |
| 478 | } | 460 | } |
| 479 | 461 | ||
| 480 | void EnableDispatch() { | 462 | void EnableDispatch() { |
| 481 | if (IsDispatchTrackingDisabled()) { | ||
| 482 | // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. | ||
| 483 | return; | ||
| 484 | } | ||
| 485 | |||
| 486 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); | 463 | ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); |
| 487 | this->GetStackParameters().disable_count--; | 464 | this->GetStackParameters().disable_count--; |
| 488 | } | 465 | } |
| @@ -731,7 +708,6 @@ private: | |||
| 731 | 708 | ||
| 732 | // For emulation | 709 | // For emulation |
| 733 | std::shared_ptr<Common::Fiber> host_context{}; | 710 | std::shared_ptr<Common::Fiber> host_context{}; |
| 734 | bool is_single_core{}; | ||
| 735 | 711 | ||
| 736 | // For debugging | 712 | // For debugging |
| 737 | std::vector<KSynchronizationObject*> wait_objects_for_debugging; | 713 | std::vector<KSynchronizationObject*> wait_objects_for_debugging; |
| @@ -776,16 +752,4 @@ public: | |||
| 776 | } | 752 | } |
| 777 | }; | 753 | }; |
| 778 | 754 | ||
| 779 | class KScopedDisableDispatch { | ||
| 780 | public: | ||
| 781 | [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} { | ||
| 782 | GetCurrentThread(kernel).DisableDispatch(); | ||
| 783 | } | ||
| 784 | |||
| 785 | ~KScopedDisableDispatch(); | ||
| 786 | |||
| 787 | private: | ||
| 788 | KernelCore& kernel; | ||
| 789 | }; | ||
| 790 | |||
| 791 | } // namespace Kernel | 755 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 8fdab44e4..bea945301 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -85,9 +85,8 @@ struct KernelCore::Impl { | |||
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | void InitializeCores() { | 87 | void InitializeCores() { |
| 88 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | 88 | for (auto& core : cores) { |
| 89 | cores[core_id].Initialize(current_process->Is64BitProcess()); | 89 | core.Initialize(current_process->Is64BitProcess()); |
| 90 | system.Memory().SetCurrentPageTable(*current_process, core_id); | ||
| 91 | } | 90 | } |
| 92 | } | 91 | } |
| 93 | 92 | ||
| @@ -132,6 +131,15 @@ struct KernelCore::Impl { | |||
| 132 | next_user_process_id = KProcess::ProcessIDMin; | 131 | next_user_process_id = KProcess::ProcessIDMin; |
| 133 | next_thread_id = 1; | 132 | next_thread_id = 1; |
| 134 | 133 | ||
| 134 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||
| 135 | if (suspend_threads[core_id]) { | ||
| 136 | suspend_threads[core_id]->Close(); | ||
| 137 | suspend_threads[core_id] = nullptr; | ||
| 138 | } | ||
| 139 | |||
| 140 | schedulers[core_id].reset(); | ||
| 141 | } | ||
| 142 | |||
| 135 | cores.clear(); | 143 | cores.clear(); |
| 136 | 144 | ||
| 137 | global_handle_table->Finalize(); | 145 | global_handle_table->Finalize(); |
| @@ -159,16 +167,6 @@ struct KernelCore::Impl { | |||
| 159 | CleanupObject(time_shared_mem); | 167 | CleanupObject(time_shared_mem); |
| 160 | CleanupObject(system_resource_limit); | 168 | CleanupObject(system_resource_limit); |
| 161 | 169 | ||
| 162 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | ||
| 163 | if (suspend_threads[core_id]) { | ||
| 164 | suspend_threads[core_id]->Close(); | ||
| 165 | suspend_threads[core_id] = nullptr; | ||
| 166 | } | ||
| 167 | |||
| 168 | schedulers[core_id]->Finalize(); | ||
| 169 | schedulers[core_id].reset(); | ||
| 170 | } | ||
| 171 | |||
| 172 | // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others | 170 | // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others |
| 173 | next_host_thread_id = Core::Hardware::NUM_CPU_CORES; | 171 | next_host_thread_id = Core::Hardware::NUM_CPU_CORES; |
| 174 | 172 | ||
| @@ -259,6 +257,14 @@ struct KernelCore::Impl { | |||
| 259 | 257 | ||
| 260 | void MakeCurrentProcess(KProcess* process) { | 258 | void MakeCurrentProcess(KProcess* process) { |
| 261 | current_process = process; | 259 | current_process = process; |
| 260 | if (process == nullptr) { | ||
| 261 | return; | ||
| 262 | } | ||
| 263 | |||
| 264 | const u32 core_id = GetCurrentHostThreadID(); | ||
| 265 | if (core_id < Core::Hardware::NUM_CPU_CORES) { | ||
| 266 | system.Memory().SetCurrentPageTable(*process, core_id); | ||
| 267 | } | ||
| 262 | } | 268 | } |
| 263 | 269 | ||
| 264 | static inline thread_local u32 host_thread_id = UINT32_MAX; | 270 | static inline thread_local u32 host_thread_id = UINT32_MAX; |
| @@ -821,20 +827,16 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const { | |||
| 821 | return impl->cores[id]; | 827 | return impl->cores[id]; |
| 822 | } | 828 | } |
| 823 | 829 | ||
| 824 | size_t KernelCore::CurrentPhysicalCoreIndex() const { | ||
| 825 | const u32 core_id = impl->GetCurrentHostThreadID(); | ||
| 826 | if (core_id >= Core::Hardware::NUM_CPU_CORES) { | ||
| 827 | return Core::Hardware::NUM_CPU_CORES - 1; | ||
| 828 | } | ||
| 829 | return core_id; | ||
| 830 | } | ||
| 831 | |||
| 832 | Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { | 830 | Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { |
| 833 | return impl->cores[CurrentPhysicalCoreIndex()]; | 831 | u32 core_id = impl->GetCurrentHostThreadID(); |
| 832 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 833 | return impl->cores[core_id]; | ||
| 834 | } | 834 | } |
| 835 | 835 | ||
| 836 | const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { | 836 | const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { |
| 837 | return impl->cores[CurrentPhysicalCoreIndex()]; | 837 | u32 core_id = impl->GetCurrentHostThreadID(); |
| 838 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 839 | return impl->cores[core_id]; | ||
| 838 | } | 840 | } |
| 839 | 841 | ||
| 840 | Kernel::KScheduler* KernelCore::CurrentScheduler() { | 842 | Kernel::KScheduler* KernelCore::CurrentScheduler() { |
| @@ -1027,9 +1029,6 @@ void KernelCore::Suspend(bool in_suspention) { | |||
| 1027 | impl->suspend_threads[core_id]->SetState(state); | 1029 | impl->suspend_threads[core_id]->SetState(state); |
| 1028 | impl->suspend_threads[core_id]->SetWaitReasonForDebugging( | 1030 | impl->suspend_threads[core_id]->SetWaitReasonForDebugging( |
| 1029 | ThreadWaitReasonForDebugging::Suspended); | 1031 | ThreadWaitReasonForDebugging::Suspended); |
| 1030 | if (!should_suspend) { | ||
| 1031 | impl->suspend_threads[core_id]->DisableDispatch(); | ||
| 1032 | } | ||
| 1033 | } | 1032 | } |
| 1034 | } | 1033 | } |
| 1035 | } | 1034 | } |
| @@ -1044,11 +1043,13 @@ void KernelCore::ExceptionalExit() { | |||
| 1044 | } | 1043 | } |
| 1045 | 1044 | ||
| 1046 | void KernelCore::EnterSVCProfile() { | 1045 | void KernelCore::EnterSVCProfile() { |
| 1047 | impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); | 1046 | std::size_t core = impl->GetCurrentHostThreadID(); |
| 1047 | impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); | ||
| 1048 | } | 1048 | } |
| 1049 | 1049 | ||
| 1050 | void KernelCore::ExitSVCProfile() { | 1050 | void KernelCore::ExitSVCProfile() { |
| 1051 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]); | 1051 | std::size_t core = impl->GetCurrentHostThreadID(); |
| 1052 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]); | ||
| 1052 | } | 1053 | } |
| 1053 | 1054 | ||
| 1054 | std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { | 1055 | std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { |
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 57535433b..3a6db0b1c 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h | |||
| @@ -146,9 +146,6 @@ public: | |||
| 146 | /// Gets the an instance of the respective physical CPU core. | 146 | /// Gets the an instance of the respective physical CPU core. |
| 147 | const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; | 147 | const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; |
| 148 | 148 | ||
| 149 | /// Gets the current physical core index for the running host thread. | ||
| 150 | std::size_t CurrentPhysicalCoreIndex() const; | ||
| 151 | |||
| 152 | /// Gets the sole instance of the Scheduler at the current running core. | 149 | /// Gets the sole instance of the Scheduler at the current running core. |
| 153 | Kernel::KScheduler* CurrentScheduler(); | 150 | Kernel::KScheduler* CurrentScheduler(); |
| 154 | 151 | ||
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 890c52198..62fb06c45 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -877,7 +877,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle | |||
| 877 | const u64 thread_ticks = current_thread->GetCpuTime(); | 877 | const u64 thread_ticks = current_thread->GetCpuTime(); |
| 878 | 878 | ||
| 879 | out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); | 879 | out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); |
| 880 | } else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) { | 880 | } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { |
| 881 | out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; | 881 | out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; |
| 882 | } | 882 | } |
| 883 | 883 | ||
diff --git a/src/core/hle/service/ngct/ngct.cpp b/src/core/hle/service/ngct/ngct.cpp new file mode 100644 index 000000000..deb3abb28 --- /dev/null +++ b/src/core/hle/service/ngct/ngct.cpp | |||
| @@ -0,0 +1,46 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included | ||
| 4 | |||
| 5 | #include "common/string_util.h" | ||
| 6 | #include "core/core.h" | ||
| 7 | #include "core/hle/ipc_helpers.h" | ||
| 8 | #include "core/hle/service/ngct/ngct.h" | ||
| 9 | #include "core/hle/service/service.h" | ||
| 10 | |||
| 11 | namespace Service::NGCT { | ||
| 12 | |||
| 13 | class IService final : public ServiceFramework<IService> { | ||
| 14 | public: | ||
| 15 | explicit IService(Core::System& system_) : ServiceFramework{system_, "ngct:u"} { | ||
| 16 | // clang-format off | ||
| 17 | static const FunctionInfo functions[] = { | ||
| 18 | {0, nullptr, "Match"}, | ||
| 19 | {1, &IService::Filter, "Filter"}, | ||
| 20 | }; | ||
| 21 | // clang-format on | ||
| 22 | |||
| 23 | RegisterHandlers(functions); | ||
| 24 | } | ||
| 25 | |||
| 26 | private: | ||
| 27 | void Filter(Kernel::HLERequestContext& ctx) { | ||
| 28 | const auto buffer = ctx.ReadBuffer(); | ||
| 29 | const auto text = Common::StringFromFixedZeroTerminatedBuffer( | ||
| 30 | reinterpret_cast<const char*>(buffer.data()), buffer.size()); | ||
| 31 | |||
| 32 | LOG_WARNING(Service_NGCT, "(STUBBED) called, text={}", text); | ||
| 33 | |||
| 34 | // Return the same string since we don't censor anything | ||
| 35 | ctx.WriteBuffer(buffer); | ||
| 36 | |||
| 37 | IPC::ResponseBuilder rb{ctx, 2}; | ||
| 38 | rb.Push(ResultSuccess); | ||
| 39 | } | ||
| 40 | }; | ||
| 41 | |||
| 42 | void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system) { | ||
| 43 | std::make_shared<IService>(system)->InstallAsService(system.ServiceManager()); | ||
| 44 | } | ||
| 45 | |||
| 46 | } // namespace Service::NGCT | ||
diff --git a/src/core/hle/service/ngct/ngct.h b/src/core/hle/service/ngct/ngct.h new file mode 100644 index 000000000..1f2a47b78 --- /dev/null +++ b/src/core/hle/service/ngct/ngct.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | namespace Core { | ||
| 8 | class System; | ||
| 9 | } | ||
| 10 | |||
| 11 | namespace Service::SM { | ||
| 12 | class ServiceManager; | ||
| 13 | } | ||
| 14 | |||
| 15 | namespace Service::NGCT { | ||
| 16 | |||
| 17 | /// Registers all NGCT services with the specified service manager. | ||
| 18 | void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system); | ||
| 19 | |||
| 20 | } // namespace Service::NGCT | ||
diff --git a/src/core/hle/service/nifm/nifm.cpp b/src/core/hle/service/nifm/nifm.cpp index 0a53c0c81..9decb9290 100644 --- a/src/core/hle/service/nifm/nifm.cpp +++ b/src/core/hle/service/nifm/nifm.cpp | |||
| @@ -277,37 +277,45 @@ private: | |||
| 277 | void GetCurrentNetworkProfile(Kernel::HLERequestContext& ctx) { | 277 | void GetCurrentNetworkProfile(Kernel::HLERequestContext& ctx) { |
| 278 | LOG_WARNING(Service_NIFM, "(STUBBED) called"); | 278 | LOG_WARNING(Service_NIFM, "(STUBBED) called"); |
| 279 | 279 | ||
| 280 | const SfNetworkProfileData network_profile_data{ | 280 | const auto net_iface = Network::GetSelectedNetworkInterface(); |
| 281 | .ip_setting_data{ | 281 | |
| 282 | .ip_address_setting{ | 282 | const SfNetworkProfileData network_profile_data = [&net_iface] { |
| 283 | .is_automatic{true}, | 283 | if (!net_iface) { |
| 284 | .current_address{192, 168, 1, 100}, | 284 | return SfNetworkProfileData{}; |
| 285 | .subnet_mask{255, 255, 255, 0}, | 285 | } |
| 286 | .gateway{192, 168, 1, 1}, | 286 | |
| 287 | }, | 287 | return SfNetworkProfileData{ |
| 288 | .dns_setting{ | 288 | .ip_setting_data{ |
| 289 | .is_automatic{true}, | 289 | .ip_address_setting{ |
| 290 | .primary_dns{1, 1, 1, 1}, | 290 | .is_automatic{true}, |
| 291 | .secondary_dns{1, 0, 0, 1}, | 291 | .current_address{Network::TranslateIPv4(net_iface->ip_address)}, |
| 292 | .subnet_mask{Network::TranslateIPv4(net_iface->subnet_mask)}, | ||
| 293 | .gateway{Network::TranslateIPv4(net_iface->gateway)}, | ||
| 294 | }, | ||
| 295 | .dns_setting{ | ||
| 296 | .is_automatic{true}, | ||
| 297 | .primary_dns{1, 1, 1, 1}, | ||
| 298 | .secondary_dns{1, 0, 0, 1}, | ||
| 299 | }, | ||
| 300 | .proxy_setting{ | ||
| 301 | .enabled{false}, | ||
| 302 | .port{}, | ||
| 303 | .proxy_server{}, | ||
| 304 | .automatic_auth_enabled{}, | ||
| 305 | .user{}, | ||
| 306 | .password{}, | ||
| 307 | }, | ||
| 308 | .mtu{1500}, | ||
| 292 | }, | 309 | }, |
| 293 | .proxy_setting{ | 310 | .uuid{0xdeadbeef, 0xdeadbeef}, |
| 294 | .enabled{false}, | 311 | .network_name{"yuzu Network"}, |
| 295 | .port{}, | 312 | .wireless_setting_data{ |
| 296 | .proxy_server{}, | 313 | .ssid_length{12}, |
| 297 | .automatic_auth_enabled{}, | 314 | .ssid{"yuzu Network"}, |
| 298 | .user{}, | 315 | .passphrase{"yuzupassword"}, |
| 299 | .password{}, | ||
| 300 | }, | 316 | }, |
| 301 | .mtu{1500}, | 317 | }; |
| 302 | }, | 318 | }(); |
| 303 | .uuid{0xdeadbeef, 0xdeadbeef}, | ||
| 304 | .network_name{"yuzu Network"}, | ||
| 305 | .wireless_setting_data{ | ||
| 306 | .ssid_length{12}, | ||
| 307 | .ssid{"yuzu Network"}, | ||
| 308 | .passphrase{"yuzupassword"}, | ||
| 309 | }, | ||
| 310 | }; | ||
| 311 | 319 | ||
| 312 | ctx.WriteBuffer(network_profile_data); | 320 | ctx.WriteBuffer(network_profile_data); |
| 313 | 321 | ||
| @@ -352,38 +360,33 @@ private: | |||
| 352 | LOG_WARNING(Service_NIFM, "(STUBBED) called"); | 360 | LOG_WARNING(Service_NIFM, "(STUBBED) called"); |
| 353 | 361 | ||
| 354 | struct IpConfigInfo { | 362 | struct IpConfigInfo { |
| 355 | IpAddressSetting ip_address_setting; | 363 | IpAddressSetting ip_address_setting{}; |
| 356 | DnsSetting dns_setting; | 364 | DnsSetting dns_setting{}; |
| 357 | }; | 365 | }; |
| 358 | static_assert(sizeof(IpConfigInfo) == sizeof(IpAddressSetting) + sizeof(DnsSetting), | 366 | static_assert(sizeof(IpConfigInfo) == sizeof(IpAddressSetting) + sizeof(DnsSetting), |
| 359 | "IpConfigInfo has incorrect size."); | 367 | "IpConfigInfo has incorrect size."); |
| 360 | 368 | ||
| 361 | IpConfigInfo ip_config_info{ | 369 | const auto net_iface = Network::GetSelectedNetworkInterface(); |
| 362 | .ip_address_setting{ | ||
| 363 | .is_automatic{true}, | ||
| 364 | .current_address{0, 0, 0, 0}, | ||
| 365 | .subnet_mask{255, 255, 255, 0}, | ||
| 366 | .gateway{192, 168, 1, 1}, | ||
| 367 | }, | ||
| 368 | .dns_setting{ | ||
| 369 | .is_automatic{true}, | ||
| 370 | .primary_dns{1, 1, 1, 1}, | ||
| 371 | .secondary_dns{1, 0, 0, 1}, | ||
| 372 | }, | ||
| 373 | }; | ||
| 374 | 370 | ||
| 375 | const auto iface = Network::GetSelectedNetworkInterface(); | 371 | const IpConfigInfo ip_config_info = [&net_iface] { |
| 376 | if (iface) { | 372 | if (!net_iface) { |
| 377 | ip_config_info.ip_address_setting = | 373 | return IpConfigInfo{}; |
| 378 | IpAddressSetting{.is_automatic{true}, | 374 | } |
| 379 | .current_address{Network::TranslateIPv4(iface->ip_address)}, | ||
| 380 | .subnet_mask{Network::TranslateIPv4(iface->subnet_mask)}, | ||
| 381 | .gateway{Network::TranslateIPv4(iface->gateway)}}; | ||
| 382 | 375 | ||
| 383 | } else { | 376 | return IpConfigInfo{ |
| 384 | LOG_ERROR(Service_NIFM, | 377 | .ip_address_setting{ |
| 385 | "Couldn't get host network configuration info, using default values"); | 378 | .is_automatic{true}, |
| 386 | } | 379 | .current_address{Network::TranslateIPv4(net_iface->ip_address)}, |
| 380 | .subnet_mask{Network::TranslateIPv4(net_iface->subnet_mask)}, | ||
| 381 | .gateway{Network::TranslateIPv4(net_iface->gateway)}, | ||
| 382 | }, | ||
| 383 | .dns_setting{ | ||
| 384 | .is_automatic{true}, | ||
| 385 | .primary_dns{1, 1, 1, 1}, | ||
| 386 | .secondary_dns{1, 0, 0, 1}, | ||
| 387 | }, | ||
| 388 | }; | ||
| 389 | }(); | ||
| 387 | 390 | ||
| 388 | IPC::ResponseBuilder rb{ctx, 2 + (sizeof(IpConfigInfo) + 3) / sizeof(u32)}; | 391 | IPC::ResponseBuilder rb{ctx, 2 + (sizeof(IpConfigInfo) + 3) / sizeof(u32)}; |
| 389 | rb.Push(ResultSuccess); | 392 | rb.Push(ResultSuccess); |
diff --git a/src/core/hle/service/nvflinger/buffer_queue.cpp b/src/core/hle/service/nvflinger/buffer_queue.cpp index b4c3a6099..59ddf6298 100644 --- a/src/core/hle/service/nvflinger/buffer_queue.cpp +++ b/src/core/hle/service/nvflinger/buffer_queue.cpp | |||
| @@ -9,20 +9,17 @@ | |||
| 9 | #include "core/core.h" | 9 | #include "core/core.h" |
| 10 | #include "core/hle/kernel/k_writable_event.h" | 10 | #include "core/hle/kernel/k_writable_event.h" |
| 11 | #include "core/hle/kernel/kernel.h" | 11 | #include "core/hle/kernel/kernel.h" |
| 12 | #include "core/hle/service/kernel_helpers.h" | ||
| 13 | #include "core/hle/service/nvflinger/buffer_queue.h" | 12 | #include "core/hle/service/nvflinger/buffer_queue.h" |
| 14 | 13 | ||
| 15 | namespace Service::NVFlinger { | 14 | namespace Service::NVFlinger { |
| 16 | 15 | ||
| 17 | BufferQueue::BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_, | 16 | BufferQueue::BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_) |
| 18 | KernelHelpers::ServiceContext& service_context_) | 17 | : id(id_), layer_id(layer_id_), buffer_wait_event{kernel} { |
| 19 | : id(id_), layer_id(layer_id_), service_context{service_context_} { | 18 | Kernel::KAutoObject::Create(std::addressof(buffer_wait_event)); |
| 20 | buffer_wait_event = service_context.CreateEvent("BufferQueue:WaitEvent"); | 19 | buffer_wait_event.Initialize("BufferQueue:WaitEvent"); |
| 21 | } | 20 | } |
| 22 | 21 | ||
| 23 | BufferQueue::~BufferQueue() { | 22 | BufferQueue::~BufferQueue() = default; |
| 24 | service_context.CloseEvent(buffer_wait_event); | ||
| 25 | } | ||
| 26 | 23 | ||
| 27 | void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) { | 24 | void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) { |
| 28 | ASSERT(slot < buffer_slots); | 25 | ASSERT(slot < buffer_slots); |
| @@ -44,7 +41,7 @@ void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) | |||
| 44 | .multi_fence = {}, | 41 | .multi_fence = {}, |
| 45 | }; | 42 | }; |
| 46 | 43 | ||
| 47 | buffer_wait_event->GetWritableEvent().Signal(); | 44 | buffer_wait_event.GetWritableEvent().Signal(); |
| 48 | } | 45 | } |
| 49 | 46 | ||
| 50 | std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> BufferQueue::DequeueBuffer(u32 width, | 47 | std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> BufferQueue::DequeueBuffer(u32 width, |
| @@ -122,7 +119,7 @@ void BufferQueue::CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& mult | |||
| 122 | } | 119 | } |
| 123 | free_buffers_condition.notify_one(); | 120 | free_buffers_condition.notify_one(); |
| 124 | 121 | ||
| 125 | buffer_wait_event->GetWritableEvent().Signal(); | 122 | buffer_wait_event.GetWritableEvent().Signal(); |
| 126 | } | 123 | } |
| 127 | 124 | ||
| 128 | std::optional<std::reference_wrapper<const BufferQueue::Buffer>> BufferQueue::AcquireBuffer() { | 125 | std::optional<std::reference_wrapper<const BufferQueue::Buffer>> BufferQueue::AcquireBuffer() { |
| @@ -157,7 +154,7 @@ void BufferQueue::ReleaseBuffer(u32 slot) { | |||
| 157 | } | 154 | } |
| 158 | free_buffers_condition.notify_one(); | 155 | free_buffers_condition.notify_one(); |
| 159 | 156 | ||
| 160 | buffer_wait_event->GetWritableEvent().Signal(); | 157 | buffer_wait_event.GetWritableEvent().Signal(); |
| 161 | } | 158 | } |
| 162 | 159 | ||
| 163 | void BufferQueue::Connect() { | 160 | void BufferQueue::Connect() { |
| @@ -172,7 +169,7 @@ void BufferQueue::Disconnect() { | |||
| 172 | std::unique_lock lock{queue_sequence_mutex}; | 169 | std::unique_lock lock{queue_sequence_mutex}; |
| 173 | queue_sequence.clear(); | 170 | queue_sequence.clear(); |
| 174 | } | 171 | } |
| 175 | buffer_wait_event->GetWritableEvent().Signal(); | 172 | buffer_wait_event.GetWritableEvent().Signal(); |
| 176 | is_connect = false; | 173 | is_connect = false; |
| 177 | free_buffers_condition.notify_one(); | 174 | free_buffers_condition.notify_one(); |
| 178 | } | 175 | } |
| @@ -192,11 +189,11 @@ u32 BufferQueue::Query(QueryType type) { | |||
| 192 | } | 189 | } |
| 193 | 190 | ||
| 194 | Kernel::KWritableEvent& BufferQueue::GetWritableBufferWaitEvent() { | 191 | Kernel::KWritableEvent& BufferQueue::GetWritableBufferWaitEvent() { |
| 195 | return buffer_wait_event->GetWritableEvent(); | 192 | return buffer_wait_event.GetWritableEvent(); |
| 196 | } | 193 | } |
| 197 | 194 | ||
| 198 | Kernel::KReadableEvent& BufferQueue::GetBufferWaitEvent() { | 195 | Kernel::KReadableEvent& BufferQueue::GetBufferWaitEvent() { |
| 199 | return buffer_wait_event->GetReadableEvent(); | 196 | return buffer_wait_event.GetReadableEvent(); |
| 200 | } | 197 | } |
| 201 | 198 | ||
| 202 | } // namespace Service::NVFlinger | 199 | } // namespace Service::NVFlinger |
diff --git a/src/core/hle/service/nvflinger/buffer_queue.h b/src/core/hle/service/nvflinger/buffer_queue.h index 759247eb0..61e337ac5 100644 --- a/src/core/hle/service/nvflinger/buffer_queue.h +++ b/src/core/hle/service/nvflinger/buffer_queue.h | |||
| @@ -24,10 +24,6 @@ class KReadableEvent; | |||
| 24 | class KWritableEvent; | 24 | class KWritableEvent; |
| 25 | } // namespace Kernel | 25 | } // namespace Kernel |
| 26 | 26 | ||
| 27 | namespace Service::KernelHelpers { | ||
| 28 | class ServiceContext; | ||
| 29 | } // namespace Service::KernelHelpers | ||
| 30 | |||
| 31 | namespace Service::NVFlinger { | 27 | namespace Service::NVFlinger { |
| 32 | 28 | ||
| 33 | constexpr u32 buffer_slots = 0x40; | 29 | constexpr u32 buffer_slots = 0x40; |
| @@ -58,8 +54,7 @@ public: | |||
| 58 | NativeWindowFormat = 2, | 54 | NativeWindowFormat = 2, |
| 59 | }; | 55 | }; |
| 60 | 56 | ||
| 61 | explicit BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_, | 57 | explicit BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_); |
| 62 | KernelHelpers::ServiceContext& service_context_); | ||
| 63 | ~BufferQueue(); | 58 | ~BufferQueue(); |
| 64 | 59 | ||
| 65 | enum class BufferTransformFlags : u32 { | 60 | enum class BufferTransformFlags : u32 { |
| @@ -135,14 +130,12 @@ private: | |||
| 135 | std::list<u32> free_buffers; | 130 | std::list<u32> free_buffers; |
| 136 | std::array<Buffer, buffer_slots> buffers; | 131 | std::array<Buffer, buffer_slots> buffers; |
| 137 | std::list<u32> queue_sequence; | 132 | std::list<u32> queue_sequence; |
| 138 | Kernel::KEvent* buffer_wait_event{}; | 133 | Kernel::KEvent buffer_wait_event; |
| 139 | 134 | ||
| 140 | std::mutex free_buffers_mutex; | 135 | std::mutex free_buffers_mutex; |
| 141 | std::condition_variable free_buffers_condition; | 136 | std::condition_variable free_buffers_condition; |
| 142 | 137 | ||
| 143 | std::mutex queue_sequence_mutex; | 138 | std::mutex queue_sequence_mutex; |
| 144 | |||
| 145 | KernelHelpers::ServiceContext& service_context; | ||
| 146 | }; | 139 | }; |
| 147 | 140 | ||
| 148 | } // namespace Service::NVFlinger | 141 | } // namespace Service::NVFlinger |
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp index 00bff8caf..941748970 100644 --- a/src/core/hle/service/nvflinger/nvflinger.cpp +++ b/src/core/hle/service/nvflinger/nvflinger.cpp | |||
| @@ -61,13 +61,12 @@ void NVFlinger::SplitVSync() { | |||
| 61 | } | 61 | } |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | NVFlinger::NVFlinger(Core::System& system_) | 64 | NVFlinger::NVFlinger(Core::System& system_) : system(system_) { |
| 65 | : system(system_), service_context(system_, "nvflinger") { | 65 | displays.emplace_back(0, "Default", system); |
| 66 | displays.emplace_back(0, "Default", service_context, system); | 66 | displays.emplace_back(1, "External", system); |
| 67 | displays.emplace_back(1, "External", service_context, system); | 67 | displays.emplace_back(2, "Edid", system); |
| 68 | displays.emplace_back(2, "Edid", service_context, system); | 68 | displays.emplace_back(3, "Internal", system); |
| 69 | displays.emplace_back(3, "Internal", service_context, system); | 69 | displays.emplace_back(4, "Null", system); |
| 70 | displays.emplace_back(4, "Null", service_context, system); | ||
| 71 | guard = std::make_shared<std::mutex>(); | 70 | guard = std::make_shared<std::mutex>(); |
| 72 | 71 | ||
| 73 | // Schedule the screen composition events | 72 | // Schedule the screen composition events |
| @@ -147,7 +146,7 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) { | |||
| 147 | void NVFlinger::CreateLayerAtId(VI::Display& display, u64 layer_id) { | 146 | void NVFlinger::CreateLayerAtId(VI::Display& display, u64 layer_id) { |
| 148 | const u32 buffer_queue_id = next_buffer_queue_id++; | 147 | const u32 buffer_queue_id = next_buffer_queue_id++; |
| 149 | buffer_queues.emplace_back( | 148 | buffer_queues.emplace_back( |
| 150 | std::make_unique<BufferQueue>(system.Kernel(), buffer_queue_id, layer_id, service_context)); | 149 | std::make_unique<BufferQueue>(system.Kernel(), buffer_queue_id, layer_id)); |
| 151 | display.CreateLayer(layer_id, *buffer_queues.back()); | 150 | display.CreateLayer(layer_id, *buffer_queues.back()); |
| 152 | } | 151 | } |
| 153 | 152 | ||
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h index 6d84cafb4..d80fd07ef 100644 --- a/src/core/hle/service/nvflinger/nvflinger.h +++ b/src/core/hle/service/nvflinger/nvflinger.h | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | #include <vector> | 15 | #include <vector> |
| 16 | 16 | ||
| 17 | #include "common/common_types.h" | 17 | #include "common/common_types.h" |
| 18 | #include "core/hle/service/kernel_helpers.h" | ||
| 19 | 18 | ||
| 20 | namespace Common { | 19 | namespace Common { |
| 21 | class Event; | 20 | class Event; |
| @@ -136,8 +135,6 @@ private: | |||
| 136 | std::unique_ptr<std::thread> vsync_thread; | 135 | std::unique_ptr<std::thread> vsync_thread; |
| 137 | std::unique_ptr<Common::Event> wait_event; | 136 | std::unique_ptr<Common::Event> wait_event; |
| 138 | std::atomic<bool> is_running{}; | 137 | std::atomic<bool> is_running{}; |
| 139 | |||
| 140 | KernelHelpers::ServiceContext service_context; | ||
| 141 | }; | 138 | }; |
| 142 | 139 | ||
| 143 | } // namespace Service::NVFlinger | 140 | } // namespace Service::NVFlinger |
diff --git a/src/core/hle/service/service.cpp b/src/core/hle/service/service.cpp index b3e50433b..065133166 100644 --- a/src/core/hle/service/service.cpp +++ b/src/core/hle/service/service.cpp | |||
| @@ -46,6 +46,7 @@ | |||
| 46 | #include "core/hle/service/ncm/ncm.h" | 46 | #include "core/hle/service/ncm/ncm.h" |
| 47 | #include "core/hle/service/nfc/nfc.h" | 47 | #include "core/hle/service/nfc/nfc.h" |
| 48 | #include "core/hle/service/nfp/nfp.h" | 48 | #include "core/hle/service/nfp/nfp.h" |
| 49 | #include "core/hle/service/ngct/ngct.h" | ||
| 49 | #include "core/hle/service/nifm/nifm.h" | 50 | #include "core/hle/service/nifm/nifm.h" |
| 50 | #include "core/hle/service/nim/nim.h" | 51 | #include "core/hle/service/nim/nim.h" |
| 51 | #include "core/hle/service/npns/npns.h" | 52 | #include "core/hle/service/npns/npns.h" |
| @@ -271,6 +272,7 @@ Services::Services(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system | |||
| 271 | NCM::InstallInterfaces(*sm, system); | 272 | NCM::InstallInterfaces(*sm, system); |
| 272 | NFC::InstallInterfaces(*sm, system); | 273 | NFC::InstallInterfaces(*sm, system); |
| 273 | NFP::InstallInterfaces(*sm, system); | 274 | NFP::InstallInterfaces(*sm, system); |
| 275 | NGCT::InstallInterfaces(*sm, system); | ||
| 274 | NIFM::InstallInterfaces(*sm, system); | 276 | NIFM::InstallInterfaces(*sm, system); |
| 275 | NIM::InstallInterfaces(*sm, system); | 277 | NIM::InstallInterfaces(*sm, system); |
| 276 | NPNS::InstallInterfaces(*sm, system); | 278 | NPNS::InstallInterfaces(*sm, system); |
diff --git a/src/core/hle/service/vi/display/vi_display.cpp b/src/core/hle/service/vi/display/vi_display.cpp index b7705c02a..0dd342dbf 100644 --- a/src/core/hle/service/vi/display/vi_display.cpp +++ b/src/core/hle/service/vi/display/vi_display.cpp | |||
| @@ -12,21 +12,18 @@ | |||
| 12 | #include "core/hle/kernel/k_event.h" | 12 | #include "core/hle/kernel/k_event.h" |
| 13 | #include "core/hle/kernel/k_readable_event.h" | 13 | #include "core/hle/kernel/k_readable_event.h" |
| 14 | #include "core/hle/kernel/k_writable_event.h" | 14 | #include "core/hle/kernel/k_writable_event.h" |
| 15 | #include "core/hle/service/kernel_helpers.h" | ||
| 16 | #include "core/hle/service/vi/display/vi_display.h" | 15 | #include "core/hle/service/vi/display/vi_display.h" |
| 17 | #include "core/hle/service/vi/layer/vi_layer.h" | 16 | #include "core/hle/service/vi/layer/vi_layer.h" |
| 18 | 17 | ||
| 19 | namespace Service::VI { | 18 | namespace Service::VI { |
| 20 | 19 | ||
| 21 | Display::Display(u64 id, std::string name_, KernelHelpers::ServiceContext& service_context_, | 20 | Display::Display(u64 id, std::string name_, Core::System& system) |
| 22 | Core::System& system_) | 21 | : display_id{id}, name{std::move(name_)}, vsync_event{system.Kernel()} { |
| 23 | : display_id{id}, name{std::move(name_)}, service_context{service_context_} { | 22 | Kernel::KAutoObject::Create(std::addressof(vsync_event)); |
| 24 | vsync_event = service_context.CreateEvent(fmt::format("Display VSync Event {}", id)); | 23 | vsync_event.Initialize(fmt::format("Display VSync Event {}", id)); |
| 25 | } | 24 | } |
| 26 | 25 | ||
| 27 | Display::~Display() { | 26 | Display::~Display() = default; |
| 28 | service_context.CloseEvent(vsync_event); | ||
| 29 | } | ||
| 30 | 27 | ||
| 31 | Layer& Display::GetLayer(std::size_t index) { | 28 | Layer& Display::GetLayer(std::size_t index) { |
| 32 | return *layers.at(index); | 29 | return *layers.at(index); |
| @@ -37,11 +34,11 @@ const Layer& Display::GetLayer(std::size_t index) const { | |||
| 37 | } | 34 | } |
| 38 | 35 | ||
| 39 | Kernel::KReadableEvent& Display::GetVSyncEvent() { | 36 | Kernel::KReadableEvent& Display::GetVSyncEvent() { |
| 40 | return vsync_event->GetReadableEvent(); | 37 | return vsync_event.GetReadableEvent(); |
| 41 | } | 38 | } |
| 42 | 39 | ||
| 43 | void Display::SignalVSyncEvent() { | 40 | void Display::SignalVSyncEvent() { |
| 44 | vsync_event->GetWritableEvent().Signal(); | 41 | vsync_event.GetWritableEvent().Signal(); |
| 45 | } | 42 | } |
| 46 | 43 | ||
| 47 | void Display::CreateLayer(u64 layer_id, NVFlinger::BufferQueue& buffer_queue) { | 44 | void Display::CreateLayer(u64 layer_id, NVFlinger::BufferQueue& buffer_queue) { |
diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h index 0979fc421..166f2a4cc 100644 --- a/src/core/hle/service/vi/display/vi_display.h +++ b/src/core/hle/service/vi/display/vi_display.h | |||
| @@ -18,9 +18,6 @@ class KEvent; | |||
| 18 | namespace Service::NVFlinger { | 18 | namespace Service::NVFlinger { |
| 19 | class BufferQueue; | 19 | class BufferQueue; |
| 20 | } | 20 | } |
| 21 | namespace Service::KernelHelpers { | ||
| 22 | class ServiceContext; | ||
| 23 | } // namespace Service::KernelHelpers | ||
| 24 | 21 | ||
| 25 | namespace Service::VI { | 22 | namespace Service::VI { |
| 26 | 23 | ||
| @@ -34,13 +31,10 @@ class Display { | |||
| 34 | public: | 31 | public: |
| 35 | /// Constructs a display with a given unique ID and name. | 32 | /// Constructs a display with a given unique ID and name. |
| 36 | /// | 33 | /// |
| 37 | /// @param id The unique ID for this display. | 34 | /// @param id The unique ID for this display. |
| 38 | /// @param service_context_ The ServiceContext for the owning service. | ||
| 39 | /// @param name_ The name for this display. | 35 | /// @param name_ The name for this display. |
| 40 | /// @param system_ The global system instance. | ||
| 41 | /// | 36 | /// |
| 42 | Display(u64 id, std::string name_, KernelHelpers::ServiceContext& service_context_, | 37 | Display(u64 id, std::string name_, Core::System& system); |
| 43 | Core::System& system_); | ||
| 44 | ~Display(); | 38 | ~Display(); |
| 45 | 39 | ||
| 46 | /// Gets the unique ID assigned to this display. | 40 | /// Gets the unique ID assigned to this display. |
| @@ -104,10 +98,9 @@ public: | |||
| 104 | private: | 98 | private: |
| 105 | u64 display_id; | 99 | u64 display_id; |
| 106 | std::string name; | 100 | std::string name; |
| 107 | KernelHelpers::ServiceContext& service_context; | ||
| 108 | 101 | ||
| 109 | std::vector<std::shared_ptr<Layer>> layers; | 102 | std::vector<std::shared_ptr<Layer>> layers; |
| 110 | Kernel::KEvent* vsync_event{}; | 103 | Kernel::KEvent vsync_event; |
| 111 | }; | 104 | }; |
| 112 | 105 | ||
| 113 | } // namespace Service::VI | 106 | } // namespace Service::VI |
diff --git a/src/core/network/network_interface.cpp b/src/core/network/network_interface.cpp index cecc9aa11..6811f21b1 100644 --- a/src/core/network/network_interface.cpp +++ b/src/core/network/network_interface.cpp | |||
| @@ -37,73 +37,73 @@ std::vector<NetworkInterface> GetAvailableNetworkInterfaces() { | |||
| 37 | AF_INET, GAA_FLAG_SKIP_MULTICAST | GAA_FLAG_SKIP_DNS_SERVER | GAA_FLAG_INCLUDE_GATEWAYS, | 37 | AF_INET, GAA_FLAG_SKIP_MULTICAST | GAA_FLAG_SKIP_DNS_SERVER | GAA_FLAG_INCLUDE_GATEWAYS, |
| 38 | nullptr, adapter_addresses.data(), &buf_size); | 38 | nullptr, adapter_addresses.data(), &buf_size); |
| 39 | 39 | ||
| 40 | if (ret == ERROR_BUFFER_OVERFLOW) { | 40 | if (ret != ERROR_BUFFER_OVERFLOW) { |
| 41 | adapter_addresses.resize((buf_size / sizeof(IP_ADAPTER_ADDRESSES)) + 1); | ||
| 42 | } else { | ||
| 43 | break; | 41 | break; |
| 44 | } | 42 | } |
| 43 | |||
| 44 | adapter_addresses.resize((buf_size / sizeof(IP_ADAPTER_ADDRESSES)) + 1); | ||
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | if (ret == NO_ERROR) { | 47 | if (ret != NO_ERROR) { |
| 48 | std::vector<NetworkInterface> result; | 48 | LOG_ERROR(Network, "Failed to get network interfaces with GetAdaptersAddresses"); |
| 49 | return {}; | ||
| 50 | } | ||
| 49 | 51 | ||
| 50 | for (auto current_address = adapter_addresses.data(); current_address != nullptr; | 52 | std::vector<NetworkInterface> result; |
| 51 | current_address = current_address->Next) { | ||
| 52 | if (current_address->FirstUnicastAddress == nullptr || | ||
| 53 | current_address->FirstUnicastAddress->Address.lpSockaddr == nullptr) { | ||
| 54 | continue; | ||
| 55 | } | ||
| 56 | 53 | ||
| 57 | if (current_address->OperStatus != IfOperStatusUp) { | 54 | for (auto current_address = adapter_addresses.data(); current_address != nullptr; |
| 58 | continue; | 55 | current_address = current_address->Next) { |
| 59 | } | 56 | if (current_address->FirstUnicastAddress == nullptr || |
| 57 | current_address->FirstUnicastAddress->Address.lpSockaddr == nullptr) { | ||
| 58 | continue; | ||
| 59 | } | ||
| 60 | 60 | ||
| 61 | const auto ip_addr = Common::BitCast<struct sockaddr_in>( | 61 | if (current_address->OperStatus != IfOperStatusUp) { |
| 62 | *current_address->FirstUnicastAddress->Address.lpSockaddr) | 62 | continue; |
| 63 | .sin_addr; | 63 | } |
| 64 | 64 | ||
| 65 | ULONG mask = 0; | 65 | const auto ip_addr = Common::BitCast<struct sockaddr_in>( |
| 66 | if (ConvertLengthToIpv4Mask(current_address->FirstUnicastAddress->OnLinkPrefixLength, | 66 | *current_address->FirstUnicastAddress->Address.lpSockaddr) |
| 67 | &mask) != NO_ERROR) { | 67 | .sin_addr; |
| 68 | LOG_ERROR(Network, "Failed to convert IPv4 prefix length to subnet mask"); | ||
| 69 | continue; | ||
| 70 | } | ||
| 71 | 68 | ||
| 72 | struct in_addr gateway = {.S_un{.S_addr{0}}}; | 69 | ULONG mask = 0; |
| 73 | if (current_address->FirstGatewayAddress != nullptr && | 70 | if (ConvertLengthToIpv4Mask(current_address->FirstUnicastAddress->OnLinkPrefixLength, |
| 74 | current_address->FirstGatewayAddress->Address.lpSockaddr != nullptr) { | 71 | &mask) != NO_ERROR) { |
| 75 | gateway = Common::BitCast<struct sockaddr_in>( | 72 | LOG_ERROR(Network, "Failed to convert IPv4 prefix length to subnet mask"); |
| 76 | *current_address->FirstGatewayAddress->Address.lpSockaddr) | 73 | continue; |
| 77 | .sin_addr; | 74 | } |
| 78 | } | ||
| 79 | 75 | ||
| 80 | result.push_back(NetworkInterface{ | 76 | struct in_addr gateway = {.S_un{.S_addr{0}}}; |
| 81 | .name{Common::UTF16ToUTF8(std::wstring{current_address->FriendlyName})}, | 77 | if (current_address->FirstGatewayAddress != nullptr && |
| 82 | .ip_address{ip_addr}, | 78 | current_address->FirstGatewayAddress->Address.lpSockaddr != nullptr) { |
| 83 | .subnet_mask = in_addr{.S_un{.S_addr{mask}}}, | 79 | gateway = Common::BitCast<struct sockaddr_in>( |
| 84 | .gateway = gateway}); | 80 | *current_address->FirstGatewayAddress->Address.lpSockaddr) |
| 81 | .sin_addr; | ||
| 85 | } | 82 | } |
| 86 | 83 | ||
| 87 | return result; | 84 | result.emplace_back(NetworkInterface{ |
| 88 | } else { | 85 | .name{Common::UTF16ToUTF8(std::wstring{current_address->FriendlyName})}, |
| 89 | LOG_ERROR(Network, "Failed to get network interfaces with GetAdaptersAddresses"); | 86 | .ip_address{ip_addr}, |
| 90 | return {}; | 87 | .subnet_mask = in_addr{.S_un{.S_addr{mask}}}, |
| 88 | .gateway = gateway}); | ||
| 91 | } | 89 | } |
| 90 | |||
| 91 | return result; | ||
| 92 | } | 92 | } |
| 93 | 93 | ||
| 94 | #else | 94 | #else |
| 95 | 95 | ||
| 96 | std::vector<NetworkInterface> GetAvailableNetworkInterfaces() { | 96 | std::vector<NetworkInterface> GetAvailableNetworkInterfaces() { |
| 97 | std::vector<NetworkInterface> result; | ||
| 98 | |||
| 99 | struct ifaddrs* ifaddr = nullptr; | 97 | struct ifaddrs* ifaddr = nullptr; |
| 100 | 98 | ||
| 101 | if (getifaddrs(&ifaddr) != 0) { | 99 | if (getifaddrs(&ifaddr) != 0) { |
| 102 | LOG_ERROR(Network, "Failed to get network interfaces with getifaddrs: {}", | 100 | LOG_ERROR(Network, "Failed to get network interfaces with getifaddrs: {}", |
| 103 | std::strerror(errno)); | 101 | std::strerror(errno)); |
| 104 | return result; | 102 | return {}; |
| 105 | } | 103 | } |
| 106 | 104 | ||
| 105 | std::vector<NetworkInterface> result; | ||
| 106 | |||
| 107 | for (auto ifa = ifaddr; ifa != nullptr; ifa = ifa->ifa_next) { | 107 | for (auto ifa = ifaddr; ifa != nullptr; ifa = ifa->ifa_next) { |
| 108 | if (ifa->ifa_addr == nullptr || ifa->ifa_netmask == nullptr) { | 108 | if (ifa->ifa_addr == nullptr || ifa->ifa_netmask == nullptr) { |
| 109 | continue; | 109 | continue; |
| @@ -117,55 +117,62 @@ std::vector<NetworkInterface> GetAvailableNetworkInterfaces() { | |||
| 117 | continue; | 117 | continue; |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | std::uint32_t gateway{0}; | 120 | u32 gateway{}; |
| 121 | |||
| 121 | std::ifstream file{"/proc/net/route"}; | 122 | std::ifstream file{"/proc/net/route"}; |
| 122 | if (file.is_open()) { | 123 | if (!file.is_open()) { |
| 124 | LOG_ERROR(Network, "Failed to open \"/proc/net/route\""); | ||
| 123 | 125 | ||
| 124 | // ignore header | 126 | result.emplace_back(NetworkInterface{ |
| 125 | file.ignore(std::numeric_limits<std::streamsize>::max(), '\n'); | 127 | .name{ifa->ifa_name}, |
| 128 | .ip_address{Common::BitCast<struct sockaddr_in>(*ifa->ifa_addr).sin_addr}, | ||
| 129 | .subnet_mask{Common::BitCast<struct sockaddr_in>(*ifa->ifa_netmask).sin_addr}, | ||
| 130 | .gateway{in_addr{.s_addr = gateway}}}); | ||
| 131 | continue; | ||
| 132 | } | ||
| 126 | 133 | ||
| 127 | bool gateway_found = false; | 134 | // ignore header |
| 135 | file.ignore(std::numeric_limits<std::streamsize>::max(), '\n'); | ||
| 128 | 136 | ||
| 129 | for (std::string line; std::getline(file, line);) { | 137 | bool gateway_found = false; |
| 130 | std::istringstream iss{line}; | ||
| 131 | 138 | ||
| 132 | std::string iface_name{}; | 139 | for (std::string line; std::getline(file, line);) { |
| 133 | iss >> iface_name; | 140 | std::istringstream iss{line}; |
| 134 | if (iface_name != ifa->ifa_name) { | ||
| 135 | continue; | ||
| 136 | } | ||
| 137 | 141 | ||
| 138 | iss >> std::hex; | 142 | std::string iface_name; |
| 143 | iss >> iface_name; | ||
| 144 | if (iface_name != ifa->ifa_name) { | ||
| 145 | continue; | ||
| 146 | } | ||
| 139 | 147 | ||
| 140 | std::uint32_t dest{0}; | 148 | iss >> std::hex; |
| 141 | iss >> dest; | ||
| 142 | if (dest != 0) { | ||
| 143 | // not the default route | ||
| 144 | continue; | ||
| 145 | } | ||
| 146 | 149 | ||
| 147 | iss >> gateway; | 150 | u32 dest{}; |
| 151 | iss >> dest; | ||
| 152 | if (dest != 0) { | ||
| 153 | // not the default route | ||
| 154 | continue; | ||
| 155 | } | ||
| 148 | 156 | ||
| 149 | std::uint16_t flags{0}; | 157 | iss >> gateway; |
| 150 | iss >> flags; | ||
| 151 | 158 | ||
| 152 | // flag RTF_GATEWAY (defined in <linux/route.h>) | 159 | u16 flags{}; |
| 153 | if ((flags & 0x2) == 0) { | 160 | iss >> flags; |
| 154 | continue; | ||
| 155 | } | ||
| 156 | 161 | ||
| 157 | gateway_found = true; | 162 | // flag RTF_GATEWAY (defined in <linux/route.h>) |
| 158 | break; | 163 | if ((flags & 0x2) == 0) { |
| 164 | continue; | ||
| 159 | } | 165 | } |
| 160 | 166 | ||
| 161 | if (!gateway_found) { | 167 | gateway_found = true; |
| 162 | gateway = 0; | 168 | break; |
| 163 | } | ||
| 164 | } else { | ||
| 165 | LOG_ERROR(Network, "Failed to open \"/proc/net/route\""); | ||
| 166 | } | 169 | } |
| 167 | 170 | ||
| 168 | result.push_back(NetworkInterface{ | 171 | if (!gateway_found) { |
| 172 | gateway = 0; | ||
| 173 | } | ||
| 174 | |||
| 175 | result.emplace_back(NetworkInterface{ | ||
| 169 | .name{ifa->ifa_name}, | 176 | .name{ifa->ifa_name}, |
| 170 | .ip_address{Common::BitCast<struct sockaddr_in>(*ifa->ifa_addr).sin_addr}, | 177 | .ip_address{Common::BitCast<struct sockaddr_in>(*ifa->ifa_addr).sin_addr}, |
| 171 | .subnet_mask{Common::BitCast<struct sockaddr_in>(*ifa->ifa_netmask).sin_addr}, | 178 | .subnet_mask{Common::BitCast<struct sockaddr_in>(*ifa->ifa_netmask).sin_addr}, |
| @@ -180,11 +187,11 @@ std::vector<NetworkInterface> GetAvailableNetworkInterfaces() { | |||
| 180 | #endif | 187 | #endif |
| 181 | 188 | ||
| 182 | std::optional<NetworkInterface> GetSelectedNetworkInterface() { | 189 | std::optional<NetworkInterface> GetSelectedNetworkInterface() { |
| 183 | const std::string& selected_network_interface = Settings::values.network_interface.GetValue(); | 190 | const auto& selected_network_interface = Settings::values.network_interface.GetValue(); |
| 184 | const auto network_interfaces = Network::GetAvailableNetworkInterfaces(); | 191 | const auto network_interfaces = Network::GetAvailableNetworkInterfaces(); |
| 185 | if (network_interfaces.size() == 0) { | 192 | if (network_interfaces.size() == 0) { |
| 186 | LOG_ERROR(Network, "GetAvailableNetworkInterfaces returned no interfaces"); | 193 | LOG_ERROR(Network, "GetAvailableNetworkInterfaces returned no interfaces"); |
| 187 | return {}; | 194 | return std::nullopt; |
| 188 | } | 195 | } |
| 189 | 196 | ||
| 190 | const auto res = | 197 | const auto res = |
| @@ -192,12 +199,12 @@ std::optional<NetworkInterface> GetSelectedNetworkInterface() { | |||
| 192 | return iface.name == selected_network_interface; | 199 | return iface.name == selected_network_interface; |
| 193 | }); | 200 | }); |
| 194 | 201 | ||
| 195 | if (res != network_interfaces.end()) { | 202 | if (res == network_interfaces.end()) { |
| 196 | return *res; | ||
| 197 | } else { | ||
| 198 | LOG_ERROR(Network, "Couldn't find selected interface \"{}\"", selected_network_interface); | 203 | LOG_ERROR(Network, "Couldn't find selected interface \"{}\"", selected_network_interface); |
| 199 | return {}; | 204 | return std::nullopt; |
| 200 | } | 205 | } |
| 206 | |||
| 207 | return *res; | ||
| 201 | } | 208 | } |
| 202 | 209 | ||
| 203 | } // namespace Network | 210 | } // namespace Network |
diff --git a/src/video_core/command_classes/codecs/vp9.cpp b/src/video_core/command_classes/codecs/vp9.cpp index 70030066a..d7e749485 100644 --- a/src/video_core/command_classes/codecs/vp9.cpp +++ b/src/video_core/command_classes/codecs/vp9.cpp | |||
| @@ -742,6 +742,7 @@ VpxBitStreamWriter VP9::ComposeUncompressedHeader() { | |||
| 742 | uncomp_writer.WriteDeltaQ(current_frame_info.uv_dc_delta_q); | 742 | uncomp_writer.WriteDeltaQ(current_frame_info.uv_dc_delta_q); |
| 743 | uncomp_writer.WriteDeltaQ(current_frame_info.uv_ac_delta_q); | 743 | uncomp_writer.WriteDeltaQ(current_frame_info.uv_ac_delta_q); |
| 744 | 744 | ||
| 745 | ASSERT(!current_frame_info.segment_enabled); | ||
| 745 | uncomp_writer.WriteBit(false); // Segmentation enabled (TODO). | 746 | uncomp_writer.WriteBit(false); // Segmentation enabled (TODO). |
| 746 | 747 | ||
| 747 | const s32 min_tile_cols_log2 = CalcMinLog2TileCols(current_frame_info.frame_size.width); | 748 | const s32 min_tile_cols_log2 = CalcMinLog2TileCols(current_frame_info.frame_size.width); |
diff --git a/src/video_core/command_classes/codecs/vp9_types.h b/src/video_core/command_classes/codecs/vp9_types.h index 87eafdb03..3b1ed4b3a 100644 --- a/src/video_core/command_classes/codecs/vp9_types.h +++ b/src/video_core/command_classes/codecs/vp9_types.h | |||
| @@ -22,7 +22,7 @@ struct Vp9FrameDimensions { | |||
| 22 | }; | 22 | }; |
| 23 | static_assert(sizeof(Vp9FrameDimensions) == 0x8, "Vp9 Vp9FrameDimensions is an invalid size"); | 23 | static_assert(sizeof(Vp9FrameDimensions) == 0x8, "Vp9 Vp9FrameDimensions is an invalid size"); |
| 24 | 24 | ||
| 25 | enum FrameFlags : u32 { | 25 | enum class FrameFlags : u32 { |
| 26 | IsKeyFrame = 1 << 0, | 26 | IsKeyFrame = 1 << 0, |
| 27 | LastFrameIsKeyFrame = 1 << 1, | 27 | LastFrameIsKeyFrame = 1 << 1, |
| 28 | FrameSizeChanged = 1 << 2, | 28 | FrameSizeChanged = 1 << 2, |
| @@ -30,6 +30,7 @@ enum FrameFlags : u32 { | |||
| 30 | LastShowFrame = 1 << 4, | 30 | LastShowFrame = 1 << 4, |
| 31 | IntraOnly = 1 << 5, | 31 | IntraOnly = 1 << 5, |
| 32 | }; | 32 | }; |
| 33 | DECLARE_ENUM_FLAG_OPERATORS(FrameFlags) | ||
| 33 | 34 | ||
| 34 | enum class TxSize { | 35 | enum class TxSize { |
| 35 | Tx4x4 = 0, // 4x4 transform | 36 | Tx4x4 = 0, // 4x4 transform |
| @@ -92,44 +93,34 @@ struct Vp9EntropyProbs { | |||
| 92 | static_assert(sizeof(Vp9EntropyProbs) == 0x7B4, "Vp9EntropyProbs is an invalid size"); | 93 | static_assert(sizeof(Vp9EntropyProbs) == 0x7B4, "Vp9EntropyProbs is an invalid size"); |
| 93 | 94 | ||
| 94 | struct Vp9PictureInfo { | 95 | struct Vp9PictureInfo { |
| 95 | bool is_key_frame; | 96 | u32 bitstream_size; |
| 96 | bool intra_only; | 97 | std::array<u64, 4> frame_offsets; |
| 97 | bool last_frame_was_key; | ||
| 98 | bool frame_size_changed; | ||
| 99 | bool error_resilient_mode; | ||
| 100 | bool last_frame_shown; | ||
| 101 | bool show_frame; | ||
| 102 | std::array<s8, 4> ref_frame_sign_bias; | 98 | std::array<s8, 4> ref_frame_sign_bias; |
| 103 | s32 base_q_index; | 99 | s32 base_q_index; |
| 104 | s32 y_dc_delta_q; | 100 | s32 y_dc_delta_q; |
| 105 | s32 uv_dc_delta_q; | 101 | s32 uv_dc_delta_q; |
| 106 | s32 uv_ac_delta_q; | 102 | s32 uv_ac_delta_q; |
| 107 | bool lossless; | ||
| 108 | s32 transform_mode; | 103 | s32 transform_mode; |
| 109 | bool allow_high_precision_mv; | ||
| 110 | s32 interp_filter; | 104 | s32 interp_filter; |
| 111 | s32 reference_mode; | 105 | s32 reference_mode; |
| 112 | s8 comp_fixed_ref; | ||
| 113 | std::array<s8, 2> comp_var_ref; | ||
| 114 | s32 log2_tile_cols; | 106 | s32 log2_tile_cols; |
| 115 | s32 log2_tile_rows; | 107 | s32 log2_tile_rows; |
| 116 | bool segment_enabled; | ||
| 117 | bool segment_map_update; | ||
| 118 | bool segment_map_temporal_update; | ||
| 119 | s32 segment_abs_delta; | ||
| 120 | std::array<u32, 8> segment_feature_enable; | ||
| 121 | std::array<std::array<s16, 4>, 8> segment_feature_data; | ||
| 122 | bool mode_ref_delta_enabled; | ||
| 123 | bool use_prev_in_find_mv_refs; | ||
| 124 | std::array<s8, 4> ref_deltas; | 108 | std::array<s8, 4> ref_deltas; |
| 125 | std::array<s8, 2> mode_deltas; | 109 | std::array<s8, 2> mode_deltas; |
| 126 | Vp9EntropyProbs entropy; | 110 | Vp9EntropyProbs entropy; |
| 127 | Vp9FrameDimensions frame_size; | 111 | Vp9FrameDimensions frame_size; |
| 128 | u8 first_level; | 112 | u8 first_level; |
| 129 | u8 sharpness_level; | 113 | u8 sharpness_level; |
| 130 | u32 bitstream_size; | 114 | bool is_key_frame; |
| 131 | std::array<u64, 4> frame_offsets; | 115 | bool intra_only; |
| 132 | std::array<bool, 4> refresh_frame; | 116 | bool last_frame_was_key; |
| 117 | bool error_resilient_mode; | ||
| 118 | bool last_frame_shown; | ||
| 119 | bool show_frame; | ||
| 120 | bool lossless; | ||
| 121 | bool allow_high_precision_mv; | ||
| 122 | bool segment_enabled; | ||
| 123 | bool mode_ref_delta_enabled; | ||
| 133 | }; | 124 | }; |
| 134 | 125 | ||
| 135 | struct Vp9FrameContainer { | 126 | struct Vp9FrameContainer { |
| @@ -145,7 +136,7 @@ struct PictureInfo { | |||
| 145 | Vp9FrameDimensions golden_frame_size; ///< 0x50 | 136 | Vp9FrameDimensions golden_frame_size; ///< 0x50 |
| 146 | Vp9FrameDimensions alt_frame_size; ///< 0x58 | 137 | Vp9FrameDimensions alt_frame_size; ///< 0x58 |
| 147 | Vp9FrameDimensions current_frame_size; ///< 0x60 | 138 | Vp9FrameDimensions current_frame_size; ///< 0x60 |
| 148 | u32 vp9_flags; ///< 0x68 | 139 | FrameFlags vp9_flags; ///< 0x68 |
| 149 | std::array<s8, 4> ref_frame_sign_bias; ///< 0x6C | 140 | std::array<s8, 4> ref_frame_sign_bias; ///< 0x6C |
| 150 | u8 first_level; ///< 0x70 | 141 | u8 first_level; ///< 0x70 |
| 151 | u8 sharpness_level; ///< 0x71 | 142 | u8 sharpness_level; ///< 0x71 |
| @@ -158,60 +149,43 @@ struct PictureInfo { | |||
| 158 | u8 allow_high_precision_mv; ///< 0x78 | 149 | u8 allow_high_precision_mv; ///< 0x78 |
| 159 | u8 interp_filter; ///< 0x79 | 150 | u8 interp_filter; ///< 0x79 |
| 160 | u8 reference_mode; ///< 0x7A | 151 | u8 reference_mode; ///< 0x7A |
| 161 | s8 comp_fixed_ref; ///< 0x7B | 152 | INSERT_PADDING_BYTES_NOINIT(3); ///< 0x7B |
| 162 | std::array<s8, 2> comp_var_ref; ///< 0x7C | ||
| 163 | u8 log2_tile_cols; ///< 0x7E | 153 | u8 log2_tile_cols; ///< 0x7E |
| 164 | u8 log2_tile_rows; ///< 0x7F | 154 | u8 log2_tile_rows; ///< 0x7F |
| 165 | Segmentation segmentation; ///< 0x80 | 155 | Segmentation segmentation; ///< 0x80 |
| 166 | LoopFilter loop_filter; ///< 0xE4 | 156 | LoopFilter loop_filter; ///< 0xE4 |
| 167 | INSERT_PADDING_BYTES_NOINIT(5); ///< 0xEB | 157 | INSERT_PADDING_BYTES_NOINIT(21); ///< 0xEB |
| 168 | u32 surface_params; ///< 0xF0 | ||
| 169 | INSERT_PADDING_WORDS_NOINIT(3); ///< 0xF4 | ||
| 170 | 158 | ||
| 171 | [[nodiscard]] Vp9PictureInfo Convert() const { | 159 | [[nodiscard]] Vp9PictureInfo Convert() const { |
| 172 | return { | 160 | return { |
| 173 | .is_key_frame = (vp9_flags & FrameFlags::IsKeyFrame) != 0, | 161 | .bitstream_size = bitstream_size, |
| 174 | .intra_only = (vp9_flags & FrameFlags::IntraOnly) != 0, | 162 | .frame_offsets{}, |
| 175 | .last_frame_was_key = (vp9_flags & FrameFlags::LastFrameIsKeyFrame) != 0, | ||
| 176 | .frame_size_changed = (vp9_flags & FrameFlags::FrameSizeChanged) != 0, | ||
| 177 | .error_resilient_mode = (vp9_flags & FrameFlags::ErrorResilientMode) != 0, | ||
| 178 | .last_frame_shown = (vp9_flags & FrameFlags::LastShowFrame) != 0, | ||
| 179 | .show_frame = true, | ||
| 180 | .ref_frame_sign_bias = ref_frame_sign_bias, | 163 | .ref_frame_sign_bias = ref_frame_sign_bias, |
| 181 | .base_q_index = base_q_index, | 164 | .base_q_index = base_q_index, |
| 182 | .y_dc_delta_q = y_dc_delta_q, | 165 | .y_dc_delta_q = y_dc_delta_q, |
| 183 | .uv_dc_delta_q = uv_dc_delta_q, | 166 | .uv_dc_delta_q = uv_dc_delta_q, |
| 184 | .uv_ac_delta_q = uv_ac_delta_q, | 167 | .uv_ac_delta_q = uv_ac_delta_q, |
| 185 | .lossless = lossless != 0, | ||
| 186 | .transform_mode = tx_mode, | 168 | .transform_mode = tx_mode, |
| 187 | .allow_high_precision_mv = allow_high_precision_mv != 0, | ||
| 188 | .interp_filter = interp_filter, | 169 | .interp_filter = interp_filter, |
| 189 | .reference_mode = reference_mode, | 170 | .reference_mode = reference_mode, |
| 190 | .comp_fixed_ref = comp_fixed_ref, | ||
| 191 | .comp_var_ref = comp_var_ref, | ||
| 192 | .log2_tile_cols = log2_tile_cols, | 171 | .log2_tile_cols = log2_tile_cols, |
| 193 | .log2_tile_rows = log2_tile_rows, | 172 | .log2_tile_rows = log2_tile_rows, |
| 194 | .segment_enabled = segmentation.enabled != 0, | ||
| 195 | .segment_map_update = segmentation.update_map != 0, | ||
| 196 | .segment_map_temporal_update = segmentation.temporal_update != 0, | ||
| 197 | .segment_abs_delta = segmentation.abs_delta, | ||
| 198 | .segment_feature_enable = segmentation.feature_mask, | ||
| 199 | .segment_feature_data = segmentation.feature_data, | ||
| 200 | .mode_ref_delta_enabled = loop_filter.mode_ref_delta_enabled != 0, | ||
| 201 | .use_prev_in_find_mv_refs = !(vp9_flags == (FrameFlags::ErrorResilientMode)) && | ||
| 202 | !(vp9_flags == (FrameFlags::FrameSizeChanged)) && | ||
| 203 | !(vp9_flags == (FrameFlags::IntraOnly)) && | ||
| 204 | (vp9_flags == (FrameFlags::LastShowFrame)) && | ||
| 205 | !(vp9_flags == (FrameFlags::LastFrameIsKeyFrame)), | ||
| 206 | .ref_deltas = loop_filter.ref_deltas, | 173 | .ref_deltas = loop_filter.ref_deltas, |
| 207 | .mode_deltas = loop_filter.mode_deltas, | 174 | .mode_deltas = loop_filter.mode_deltas, |
| 208 | .entropy{}, | 175 | .entropy{}, |
| 209 | .frame_size = current_frame_size, | 176 | .frame_size = current_frame_size, |
| 210 | .first_level = first_level, | 177 | .first_level = first_level, |
| 211 | .sharpness_level = sharpness_level, | 178 | .sharpness_level = sharpness_level, |
| 212 | .bitstream_size = bitstream_size, | 179 | .is_key_frame = True(vp9_flags & FrameFlags::IsKeyFrame), |
| 213 | .frame_offsets{}, | 180 | .intra_only = True(vp9_flags & FrameFlags::IntraOnly), |
| 214 | .refresh_frame{}, | 181 | .last_frame_was_key = True(vp9_flags & FrameFlags::LastFrameIsKeyFrame), |
| 182 | .error_resilient_mode = True(vp9_flags & FrameFlags::ErrorResilientMode), | ||
| 183 | .last_frame_shown = True(vp9_flags & FrameFlags::LastShowFrame), | ||
| 184 | .show_frame = true, | ||
| 185 | .lossless = lossless != 0, | ||
| 186 | .allow_high_precision_mv = allow_high_precision_mv != 0, | ||
| 187 | .segment_enabled = segmentation.enabled != 0, | ||
| 188 | .mode_ref_delta_enabled = loop_filter.mode_ref_delta_enabled != 0, | ||
| 215 | }; | 189 | }; |
| 216 | } | 190 | } |
| 217 | }; | 191 | }; |
| @@ -316,7 +290,6 @@ ASSERT_POSITION(last_frame_size, 0x48); | |||
| 316 | ASSERT_POSITION(first_level, 0x70); | 290 | ASSERT_POSITION(first_level, 0x70); |
| 317 | ASSERT_POSITION(segmentation, 0x80); | 291 | ASSERT_POSITION(segmentation, 0x80); |
| 318 | ASSERT_POSITION(loop_filter, 0xE4); | 292 | ASSERT_POSITION(loop_filter, 0xE4); |
| 319 | ASSERT_POSITION(surface_params, 0xF0); | ||
| 320 | #undef ASSERT_POSITION | 293 | #undef ASSERT_POSITION |
| 321 | 294 | ||
| 322 | #define ASSERT_POSITION(field_name, position) \ | 295 | #define ASSERT_POSITION(field_name, position) \ |
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp index 5c43b8acf..cb0580182 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp +++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp | |||
| @@ -159,11 +159,13 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, | |||
| 159 | 159 | ||
| 160 | const VAddr framebuffer_addr = framebuffer.address + framebuffer.offset; | 160 | const VAddr framebuffer_addr = framebuffer.address + framebuffer.offset; |
| 161 | const u8* const host_ptr = cpu_memory.GetPointer(framebuffer_addr); | 161 | const u8* const host_ptr = cpu_memory.GetPointer(framebuffer_addr); |
| 162 | const size_t size_bytes = GetSizeInBytes(framebuffer); | ||
| 163 | 162 | ||
| 164 | // TODO(Rodrigo): Read this from HLE | 163 | // TODO(Rodrigo): Read this from HLE |
| 165 | constexpr u32 block_height_log2 = 4; | 164 | constexpr u32 block_height_log2 = 4; |
| 166 | const u32 bytes_per_pixel = GetBytesPerPixel(framebuffer); | 165 | const u32 bytes_per_pixel = GetBytesPerPixel(framebuffer); |
| 166 | const u64 size_bytes{Tegra::Texture::CalculateSize(true, bytes_per_pixel, | ||
| 167 | framebuffer.stride, framebuffer.height, | ||
| 168 | 1, block_height_log2, 0)}; | ||
| 167 | Tegra::Texture::UnswizzleTexture( | 169 | Tegra::Texture::UnswizzleTexture( |
| 168 | mapped_span.subspan(image_offset, size_bytes), std::span(host_ptr, size_bytes), | 170 | mapped_span.subspan(image_offset, size_bytes), std::span(host_ptr, size_bytes), |
| 169 | bytes_per_pixel, framebuffer.width, framebuffer.height, 1, block_height_log2, 0); | 171 | bytes_per_pixel, framebuffer.width, framebuffer.height, 1, block_height_log2, 0); |
diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp index c010b9353..24e943e4c 100644 --- a/src/video_core/textures/decoders.cpp +++ b/src/video_core/textures/decoders.cpp | |||
| @@ -63,14 +63,6 @@ void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 | |||
| 63 | const u32 unswizzled_offset = | 63 | const u32 unswizzled_offset = |
| 64 | slice * pitch * height + line * pitch + column * BYTES_PER_PIXEL; | 64 | slice * pitch * height + line * pitch + column * BYTES_PER_PIXEL; |
| 65 | 65 | ||
| 66 | if (const auto offset = (TO_LINEAR ? unswizzled_offset : swizzled_offset); | ||
| 67 | offset >= input.size()) { | ||
| 68 | // TODO(Rodrigo): This is an out of bounds access that should never happen. To | ||
| 69 | // avoid crashing the emulator, break. | ||
| 70 | ASSERT_MSG(false, "offset {} exceeds input size {}!", offset, input.size()); | ||
| 71 | break; | ||
| 72 | } | ||
| 73 | |||
| 74 | u8* const dst = &output[TO_LINEAR ? swizzled_offset : unswizzled_offset]; | 66 | u8* const dst = &output[TO_LINEAR ? swizzled_offset : unswizzled_offset]; |
| 75 | const u8* const src = &input[TO_LINEAR ? unswizzled_offset : swizzled_offset]; | 67 | const u8* const src = &input[TO_LINEAR ? unswizzled_offset : swizzled_offset]; |
| 76 | 68 | ||