diff options
| author | 2020-03-03 15:50:38 -0400 | |
|---|---|---|
| committer | 2020-06-27 11:35:23 -0400 | |
| commit | 07993ac8c8c66bbf638dddc7750106f6dfb0e09b (patch) | |
| tree | dae8c8249f66657e232dc2620489d237e17459c8 /src | |
| parent | Kernel: Correct Signal on Thread Death and Setup Sync Objects on Thread for D... (diff) | |
| download | yuzu-07993ac8c8c66bbf638dddc7750106f6dfb0e09b.tar.gz yuzu-07993ac8c8c66bbf638dddc7750106f6dfb0e09b.tar.xz yuzu-07993ac8c8c66bbf638dddc7750106f6dfb0e09b.zip | |
Kernel: Corrections to Scheduling.
Diffstat (limited to 'src')
| -rw-r--r-- | src/core/core_timing.cpp | 11 | ||||
| -rw-r--r-- | src/core/core_timing.h | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 26 | ||||
| -rw-r--r-- | src/core/hle/kernel/scheduler.h | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc.cpp | 1 |
5 files changed, 23 insertions, 19 deletions
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp index 5a7abcfca..c91ae9975 100644 --- a/src/core/core_timing.cpp +++ b/src/core/core_timing.cpp | |||
| @@ -154,7 +154,7 @@ void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { | |||
| 154 | basic_lock.unlock(); | 154 | basic_lock.unlock(); |
| 155 | } | 155 | } |
| 156 | 156 | ||
| 157 | std::optional<u64> CoreTiming::Advance() { | 157 | std::optional<s64> CoreTiming::Advance() { |
| 158 | advance_lock.lock(); | 158 | advance_lock.lock(); |
| 159 | basic_lock.lock(); | 159 | basic_lock.lock(); |
| 160 | global_timer = GetGlobalTimeNs().count(); | 160 | global_timer = GetGlobalTimeNs().count(); |
| @@ -170,10 +170,11 @@ std::optional<u64> CoreTiming::Advance() { | |||
| 170 | } | 170 | } |
| 171 | 171 | ||
| 172 | basic_lock.lock(); | 172 | basic_lock.lock(); |
| 173 | global_timer = GetGlobalTimeNs().count(); | ||
| 173 | } | 174 | } |
| 174 | 175 | ||
| 175 | if (!event_queue.empty()) { | 176 | if (!event_queue.empty()) { |
| 176 | const u64 next_time = event_queue.front().time - global_timer; | 177 | const s64 next_time = event_queue.front().time - global_timer; |
| 177 | basic_lock.unlock(); | 178 | basic_lock.unlock(); |
| 178 | advance_lock.unlock(); | 179 | advance_lock.unlock(); |
| 179 | return next_time; | 180 | return next_time; |
| @@ -191,8 +192,10 @@ void CoreTiming::ThreadLoop() { | |||
| 191 | paused_set = false; | 192 | paused_set = false; |
| 192 | const auto next_time = Advance(); | 193 | const auto next_time = Advance(); |
| 193 | if (next_time) { | 194 | if (next_time) { |
| 194 | std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time); | 195 | if (*next_time > 0) { |
| 195 | event.WaitFor(next_time_ns); | 196 | std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time); |
| 197 | event.WaitFor(next_time_ns); | ||
| 198 | } | ||
| 196 | } else { | 199 | } else { |
| 197 | wait_set = true; | 200 | wait_set = true; |
| 198 | event.Wait(); | 201 | event.Wait(); |
diff --git a/src/core/core_timing.h b/src/core/core_timing.h index c70b605c8..032eb08aa 100644 --- a/src/core/core_timing.h +++ b/src/core/core_timing.h | |||
| @@ -110,7 +110,7 @@ public: | |||
| 110 | std::chrono::nanoseconds GetGlobalTimeNs() const; | 110 | std::chrono::nanoseconds GetGlobalTimeNs() const; |
| 111 | 111 | ||
| 112 | /// Checks for events manually and returns time in nanoseconds for next event, threadsafe. | 112 | /// Checks for events manually and returns time in nanoseconds for next event, threadsafe. |
| 113 | std::optional<u64> Advance(); | 113 | std::optional<s64> Advance(); |
| 114 | 114 | ||
| 115 | private: | 115 | private: |
| 116 | struct Event; | 116 | struct Event; |
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index d67d3c5cd..da77967dd 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -47,13 +47,13 @@ u32 GlobalScheduler::SelectThreads() { | |||
| 47 | ASSERT(is_locked); | 47 | ASSERT(is_locked); |
| 48 | const auto update_thread = [](Thread* thread, Scheduler& sched) { | 48 | const auto update_thread = [](Thread* thread, Scheduler& sched) { |
| 49 | sched.guard.lock(); | 49 | sched.guard.lock(); |
| 50 | if (thread != sched.selected_thread.get()) { | 50 | if (thread != sched.selected_thread_set.get()) { |
| 51 | if (thread == nullptr) { | 51 | if (thread == nullptr) { |
| 52 | ++sched.idle_selection_count; | 52 | ++sched.idle_selection_count; |
| 53 | } | 53 | } |
| 54 | sched.selected_thread = SharedFrom(thread); | 54 | sched.selected_thread_set = SharedFrom(thread); |
| 55 | } | 55 | } |
| 56 | const bool reschedule_pending = sched.selected_thread != sched.current_thread; | 56 | const bool reschedule_pending = sched.selected_thread_set != sched.current_thread; |
| 57 | sched.is_context_switch_pending = reschedule_pending; | 57 | sched.is_context_switch_pending = reschedule_pending; |
| 58 | std::atomic_thread_fence(std::memory_order_seq_cst); | 58 | std::atomic_thread_fence(std::memory_order_seq_cst); |
| 59 | sched.guard.unlock(); | 59 | sched.guard.unlock(); |
| @@ -118,6 +118,8 @@ u32 GlobalScheduler::SelectThreads() { | |||
| 118 | suggested); | 118 | suggested); |
| 119 | top_threads[candidate_core] = next; | 119 | top_threads[candidate_core] = next; |
| 120 | break; | 120 | break; |
| 121 | } else { | ||
| 122 | suggested = nullptr; | ||
| 121 | } | 123 | } |
| 122 | } | 124 | } |
| 123 | } | 125 | } |
| @@ -590,7 +592,7 @@ void Scheduler::OnThreadStart() { | |||
| 590 | } | 592 | } |
| 591 | 593 | ||
| 592 | void Scheduler::SwitchContextStep2() { | 594 | void Scheduler::SwitchContextStep2() { |
| 593 | Thread* previous_thread = current_thread.get(); | 595 | Thread* previous_thread = current_thread_prev.get(); |
| 594 | Thread* new_thread = selected_thread.get(); | 596 | Thread* new_thread = selected_thread.get(); |
| 595 | 597 | ||
| 596 | // Load context of new thread | 598 | // Load context of new thread |
| @@ -606,8 +608,6 @@ void Scheduler::SwitchContextStep2() { | |||
| 606 | "Thread must be ready to become running."); | 608 | "Thread must be ready to become running."); |
| 607 | 609 | ||
| 608 | // Cancel any outstanding wakeup events for this thread | 610 | // Cancel any outstanding wakeup events for this thread |
| 609 | current_thread = SharedFrom(new_thread); | ||
| 610 | new_thread->SetStatus(ThreadStatus::Running); | ||
| 611 | new_thread->SetIsRunning(true); | 611 | new_thread->SetIsRunning(true); |
| 612 | 612 | ||
| 613 | auto* const thread_owner_process = current_thread->GetOwnerProcess(); | 613 | auto* const thread_owner_process = current_thread->GetOwnerProcess(); |
| @@ -622,21 +622,21 @@ void Scheduler::SwitchContextStep2() { | |||
| 622 | cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0()); | 622 | cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0()); |
| 623 | cpu_core.ClearExclusiveState(); | 623 | cpu_core.ClearExclusiveState(); |
| 624 | } | 624 | } |
| 625 | } else { | ||
| 626 | current_thread = nullptr; | ||
| 627 | // Note: We do not reset the current process and current page table when idling because | ||
| 628 | // technically we haven't changed processes, our threads are just paused. | ||
| 629 | } | 625 | } |
| 630 | guard.unlock(); | 626 | |
| 627 | TryDoContextSwitch(); | ||
| 631 | } | 628 | } |
| 632 | 629 | ||
| 633 | void Scheduler::SwitchContext() { | 630 | void Scheduler::SwitchContext() { |
| 634 | Thread* previous_thread = current_thread.get(); | 631 | current_thread_prev = current_thread; |
| 632 | selected_thread = selected_thread_set; | ||
| 633 | Thread* previous_thread = current_thread_prev.get(); | ||
| 635 | Thread* new_thread = selected_thread.get(); | 634 | Thread* new_thread = selected_thread.get(); |
| 635 | current_thread = selected_thread; | ||
| 636 | 636 | ||
| 637 | is_context_switch_pending = false; | 637 | is_context_switch_pending = false; |
| 638 | guard.unlock(); | ||
| 638 | if (new_thread == previous_thread) { | 639 | if (new_thread == previous_thread) { |
| 639 | guard.unlock(); | ||
| 640 | return; | 640 | return; |
| 641 | } | 641 | } |
| 642 | 642 | ||
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index f26a554f5..f73ca777e 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h | |||
| @@ -249,6 +249,8 @@ private: | |||
| 249 | 249 | ||
| 250 | std::shared_ptr<Thread> current_thread = nullptr; | 250 | std::shared_ptr<Thread> current_thread = nullptr; |
| 251 | std::shared_ptr<Thread> selected_thread = nullptr; | 251 | std::shared_ptr<Thread> selected_thread = nullptr; |
| 252 | std::shared_ptr<Thread> current_thread_prev = nullptr; | ||
| 253 | std::shared_ptr<Thread> selected_thread_set = nullptr; | ||
| 252 | std::shared_ptr<Thread> idle_thread = nullptr; | 254 | std::shared_ptr<Thread> idle_thread = nullptr; |
| 253 | 255 | ||
| 254 | Core::System& system; | 256 | Core::System& system; |
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 9f46a1758..5e9dd43bf 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -316,7 +316,6 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle, | |||
| 316 | 316 | ||
| 317 | /// Makes a blocking IPC call to an OS service. | 317 | /// Makes a blocking IPC call to an OS service. |
| 318 | static ResultCode SendSyncRequest(Core::System& system, Handle handle) { | 318 | static ResultCode SendSyncRequest(Core::System& system, Handle handle) { |
| 319 | std::lock_guard lock{HLE::g_hle_lock}; | ||
| 320 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | 319 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); |
| 321 | std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle); | 320 | std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle); |
| 322 | if (!session) { | 321 | if (!session) { |