diff options
| author | 2021-01-19 21:05:24 -0800 | |
|---|---|---|
| committer | 2021-01-28 21:42:26 -0800 | |
| commit | c0f5830323ca5d5bdc2e5e494fcaeaf27fffeb6b (patch) | |
| tree | e138e7d0ecb6a306261e2871fd0da405571deaab /src | |
| parent | common: common_funcs: Add useful kernel macro R_SUCCEED_IF. (diff) | |
| download | yuzu-c0f5830323ca5d5bdc2e5e494fcaeaf27fffeb6b.tar.gz yuzu-c0f5830323ca5d5bdc2e5e494fcaeaf27fffeb6b.tar.xz yuzu-c0f5830323ca5d5bdc2e5e494fcaeaf27fffeb6b.zip | |
hle: kernel: TimeManager: Simplify to not rely on previous EmuThreadHandle implementation.
Diffstat (limited to 'src')
| -rw-r--r-- | src/core/hle/kernel/k_address_arbiter.cpp | 16 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_condition_variable.cpp | 8 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h | 14 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_synchronization_object.cpp | 8 | ||||
| -rw-r--r-- | src/core/hle/kernel/time_manager.cpp | 40 | ||||
| -rw-r--r-- | src/core/hle/kernel/time_manager.h | 8 |
6 files changed, 25 insertions, 69 deletions
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp index 282f02257..1685d25bb 100644 --- a/src/core/hle/kernel/k_address_arbiter.cpp +++ b/src/core/hle/kernel/k_address_arbiter.cpp | |||
| @@ -232,10 +232,9 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 | |||
| 232 | ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) { | 232 | ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) { |
| 233 | // Prepare to wait. | 233 | // Prepare to wait. |
| 234 | KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); | 234 | KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); |
| 235 | Handle timer = InvalidHandle; | ||
| 236 | 235 | ||
| 237 | { | 236 | { |
| 238 | KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout); | 237 | KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; |
| 239 | 238 | ||
| 240 | // Check that the thread isn't terminating. | 239 | // Check that the thread isn't terminating. |
| 241 | if (cur_thread->IsTerminationRequested()) { | 240 | if (cur_thread->IsTerminationRequested()) { |
| @@ -280,10 +279,7 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement | |||
| 280 | } | 279 | } |
| 281 | 280 | ||
| 282 | // Cancel the timer wait. | 281 | // Cancel the timer wait. |
| 283 | if (timer != InvalidHandle) { | 282 | kernel.TimeManager().UnscheduleTimeEvent(cur_thread); |
| 284 | auto& time_manager = kernel.TimeManager(); | ||
| 285 | time_manager.UnscheduleTimeEvent(timer); | ||
| 286 | } | ||
| 287 | 283 | ||
| 288 | // Remove from the address arbiter. | 284 | // Remove from the address arbiter. |
| 289 | { | 285 | { |
| @@ -303,10 +299,9 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement | |||
| 303 | ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { | 299 | ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { |
| 304 | // Prepare to wait. | 300 | // Prepare to wait. |
| 305 | KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); | 301 | KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); |
| 306 | Handle timer = InvalidHandle; | ||
| 307 | 302 | ||
| 308 | { | 303 | { |
| 309 | KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout); | 304 | KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; |
| 310 | 305 | ||
| 311 | // Check that the thread isn't terminating. | 306 | // Check that the thread isn't terminating. |
| 312 | if (cur_thread->IsTerminationRequested()) { | 307 | if (cur_thread->IsTerminationRequested()) { |
| @@ -344,10 +339,7 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { | |||
| 344 | } | 339 | } |
| 345 | 340 | ||
| 346 | // Cancel the timer wait. | 341 | // Cancel the timer wait. |
| 347 | if (timer != InvalidHandle) { | 342 | kernel.TimeManager().UnscheduleTimeEvent(cur_thread); |
| 348 | auto& time_manager = kernel.TimeManager(); | ||
| 349 | time_manager.UnscheduleTimeEvent(timer); | ||
| 350 | } | ||
| 351 | 343 | ||
| 352 | // Remove from the address arbiter. | 344 | // Remove from the address arbiter. |
| 353 | { | 345 | { |
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index 2fa2d5289..f0ad8b390 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp | |||
| @@ -258,10 +258,9 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { | |||
| 258 | ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { | 258 | ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { |
| 259 | // Prepare to wait. | 259 | // Prepare to wait. |
| 260 | KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); | 260 | KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); |
| 261 | Handle timer = InvalidHandle; | ||
| 262 | 261 | ||
| 263 | { | 262 | { |
| 264 | KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout); | 263 | KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; |
| 265 | 264 | ||
| 266 | // Set the synced object. | 265 | // Set the synced object. |
| 267 | cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut); | 266 | cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut); |
| @@ -322,10 +321,7 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) | |||
| 322 | } | 321 | } |
| 323 | 322 | ||
| 324 | // Cancel the timer wait. | 323 | // Cancel the timer wait. |
| 325 | if (timer != InvalidHandle) { | 324 | kernel.TimeManager().UnscheduleTimeEvent(cur_thread); |
| 326 | auto& time_manager = kernel.TimeManager(); | ||
| 327 | time_manager.UnscheduleTimeEvent(timer); | ||
| 328 | } | ||
| 329 | 325 | ||
| 330 | // Remove from the condition variable. | 326 | // Remove from the condition variable. |
| 331 | { | 327 | { |
diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h index fac39aeb7..f8189e107 100644 --- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h +++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h | |||
| @@ -17,19 +17,16 @@ namespace Kernel { | |||
| 17 | 17 | ||
| 18 | class KScopedSchedulerLockAndSleep { | 18 | class KScopedSchedulerLockAndSleep { |
| 19 | public: | 19 | public: |
| 20 | explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, KThread* t, | 20 | explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, KThread* t, s64 timeout) |
| 21 | s64 timeout) | 21 | : kernel(kernel), thread(t), timeout_tick(timeout) { |
| 22 | : kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) { | ||
| 23 | event_handle = InvalidHandle; | ||
| 24 | |||
| 25 | // Lock the scheduler. | 22 | // Lock the scheduler. |
| 26 | kernel.GlobalSchedulerContext().scheduler_lock.Lock(); | 23 | kernel.GlobalSchedulerContext().scheduler_lock.Lock(); |
| 27 | } | 24 | } |
| 28 | 25 | ||
| 29 | ~KScopedSchedulerLockAndSleep() { | 26 | ~KScopedSchedulerLockAndSleep() { |
| 30 | // Register the sleep. | 27 | // Register the sleep. |
| 31 | if (this->timeout_tick > 0) { | 28 | if (timeout_tick > 0) { |
| 32 | kernel.TimeManager().ScheduleTimeEvent(event_handle, this->thread, this->timeout_tick); | 29 | kernel.TimeManager().ScheduleTimeEvent(thread, timeout_tick); |
| 33 | } | 30 | } |
| 34 | 31 | ||
| 35 | // Unlock the scheduler. | 32 | // Unlock the scheduler. |
| @@ -37,12 +34,11 @@ public: | |||
| 37 | } | 34 | } |
| 38 | 35 | ||
| 39 | void CancelSleep() { | 36 | void CancelSleep() { |
| 40 | this->timeout_tick = 0; | 37 | timeout_tick = 0; |
| 41 | } | 38 | } |
| 42 | 39 | ||
| 43 | private: | 40 | private: |
| 44 | KernelCore& kernel; | 41 | KernelCore& kernel; |
| 45 | Handle& event_handle; | ||
| 46 | KThread* thread{}; | 42 | KThread* thread{}; |
| 47 | s64 timeout_tick{}; | 43 | s64 timeout_tick{}; |
| 48 | }; | 44 | }; |
diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp index 18e7026f5..a3b34f82f 100644 --- a/src/core/hle/kernel/k_synchronization_object.cpp +++ b/src/core/hle/kernel/k_synchronization_object.cpp | |||
| @@ -21,11 +21,10 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, | |||
| 21 | 21 | ||
| 22 | // Prepare for wait. | 22 | // Prepare for wait. |
| 23 | KThread* thread = kernel.CurrentScheduler()->GetCurrentThread(); | 23 | KThread* thread = kernel.CurrentScheduler()->GetCurrentThread(); |
| 24 | Handle timer = InvalidHandle; | ||
| 25 | 24 | ||
| 26 | { | 25 | { |
| 27 | // Setup the scheduling lock and sleep. | 26 | // Setup the scheduling lock and sleep. |
| 28 | KScopedSchedulerLockAndSleep slp(kernel, timer, thread, timeout); | 27 | KScopedSchedulerLockAndSleep slp{kernel, thread, timeout}; |
| 29 | 28 | ||
| 30 | // Check if any of the objects are already signaled. | 29 | // Check if any of the objects are already signaled. |
| 31 | for (auto i = 0; i < num_objects; ++i) { | 30 | for (auto i = 0; i < num_objects; ++i) { |
| @@ -90,10 +89,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, | |||
| 90 | thread->SetWaitObjectsForDebugging({}); | 89 | thread->SetWaitObjectsForDebugging({}); |
| 91 | 90 | ||
| 92 | // Cancel the timer as needed. | 91 | // Cancel the timer as needed. |
| 93 | if (timer != InvalidHandle) { | 92 | kernel.TimeManager().UnscheduleTimeEvent(thread); |
| 94 | auto& time_manager = kernel.TimeManager(); | ||
| 95 | time_manager.UnscheduleTimeEvent(timer); | ||
| 96 | } | ||
| 97 | 93 | ||
| 98 | // Get the wait result. | 94 | // Get the wait result. |
| 99 | ResultCode wait_result{RESULT_SUCCESS}; | 95 | ResultCode wait_result{RESULT_SUCCESS}; |
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index aaeef3033..fd0630019 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp | |||
| @@ -21,47 +21,27 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} { | |||
| 21 | std::shared_ptr<KThread> thread; | 21 | std::shared_ptr<KThread> thread; |
| 22 | { | 22 | { |
| 23 | std::lock_guard lock{mutex}; | 23 | std::lock_guard lock{mutex}; |
| 24 | const auto proper_handle = static_cast<Handle>(thread_handle); | 24 | thread = SharedFrom<KThread>(reinterpret_cast<KThread*>(thread_handle)); |
| 25 | if (cancelled_events[proper_handle]) { | ||
| 26 | return; | ||
| 27 | } | ||
| 28 | thread = system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle); | ||
| 29 | } | ||
| 30 | |||
| 31 | if (thread) { | ||
| 32 | // Thread can be null if process has exited | ||
| 33 | thread->Wakeup(); | ||
| 34 | } | 25 | } |
| 26 | thread->Wakeup(); | ||
| 35 | }); | 27 | }); |
| 36 | } | 28 | } |
| 37 | 29 | ||
| 38 | void TimeManager::ScheduleTimeEvent(Handle& event_handle, KThread* timetask, s64 nanoseconds) { | 30 | void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) { |
| 39 | std::lock_guard lock{mutex}; | 31 | std::lock_guard lock{mutex}; |
| 40 | event_handle = timetask->GetGlobalHandle(); | ||
| 41 | if (nanoseconds > 0) { | 32 | if (nanoseconds > 0) { |
| 42 | ASSERT(timetask); | 33 | ASSERT(thread); |
| 43 | ASSERT(timetask->GetState() != ThreadState::Runnable); | 34 | ASSERT(thread->GetState() != ThreadState::Runnable); |
| 44 | system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds}, | 35 | system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds}, |
| 45 | time_manager_event_type, event_handle); | 36 | time_manager_event_type, |
| 46 | } else { | 37 | reinterpret_cast<uintptr_t>(thread)); |
| 47 | event_handle = InvalidHandle; | ||
| 48 | } | ||
| 49 | cancelled_events[event_handle] = false; | ||
| 50 | } | ||
| 51 | |||
| 52 | void TimeManager::UnscheduleTimeEvent(Handle event_handle) { | ||
| 53 | std::lock_guard lock{mutex}; | ||
| 54 | if (event_handle == InvalidHandle) { | ||
| 55 | return; | ||
| 56 | } | 38 | } |
| 57 | system.CoreTiming().UnscheduleEvent(time_manager_event_type, event_handle); | ||
| 58 | cancelled_events[event_handle] = true; | ||
| 59 | } | 39 | } |
| 60 | 40 | ||
| 61 | void TimeManager::CancelTimeEvent(KThread* time_task) { | 41 | void TimeManager::UnscheduleTimeEvent(KThread* thread) { |
| 62 | std::lock_guard lock{mutex}; | 42 | std::lock_guard lock{mutex}; |
| 63 | const Handle event_handle = time_task->GetGlobalHandle(); | 43 | system.CoreTiming().UnscheduleEvent(time_manager_event_type, |
| 64 | UnscheduleTimeEvent(event_handle); | 44 | reinterpret_cast<uintptr_t>(thread)); |
| 65 | } | 45 | } |
| 66 | 46 | ||
| 67 | } // namespace Kernel | 47 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/time_manager.h b/src/core/hle/kernel/time_manager.h index 7cc702bec..0d7f05f30 100644 --- a/src/core/hle/kernel/time_manager.h +++ b/src/core/hle/kernel/time_manager.h | |||
| @@ -31,18 +31,14 @@ public: | |||
| 31 | explicit TimeManager(Core::System& system); | 31 | explicit TimeManager(Core::System& system); |
| 32 | 32 | ||
| 33 | /// Schedule a time event on `timetask` thread that will expire in 'nanoseconds' | 33 | /// Schedule a time event on `timetask` thread that will expire in 'nanoseconds' |
| 34 | /// returns a non-invalid handle in `event_handle` if correctly scheduled | 34 | void ScheduleTimeEvent(KThread* time_task, s64 nanoseconds); |
| 35 | void ScheduleTimeEvent(Handle& event_handle, KThread* timetask, s64 nanoseconds); | ||
| 36 | 35 | ||
| 37 | /// Unschedule an existing time event | 36 | /// Unschedule an existing time event |
| 38 | void UnscheduleTimeEvent(Handle event_handle); | 37 | void UnscheduleTimeEvent(KThread* thread); |
| 39 | |||
| 40 | void CancelTimeEvent(KThread* time_task); | ||
| 41 | 38 | ||
| 42 | private: | 39 | private: |
| 43 | Core::System& system; | 40 | Core::System& system; |
| 44 | std::shared_ptr<Core::Timing::EventType> time_manager_event_type; | 41 | std::shared_ptr<Core::Timing::EventType> time_manager_event_type; |
| 45 | std::unordered_map<Handle, bool> cancelled_events; | ||
| 46 | std::mutex mutex; | 42 | std::mutex mutex; |
| 47 | }; | 43 | }; |
| 48 | 44 | ||