summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/scheduler.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
-rw-r--r--src/core/hle/kernel/scheduler.cpp21
1 files changed, 10 insertions, 11 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 2b12c0dbf..7b929781c 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -6,6 +6,7 @@
6// licensed under GPLv2 or later under exception provided by the author. 6// licensed under GPLv2 or later under exception provided by the author.
7 7
8#include <algorithm> 8#include <algorithm>
9#include <mutex>
9#include <set> 10#include <set>
10#include <unordered_set> 11#include <unordered_set>
11#include <utility> 12#include <utility>
@@ -31,22 +32,20 @@ GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {}
31GlobalScheduler::~GlobalScheduler() = default; 32GlobalScheduler::~GlobalScheduler() = default;
32 33
33void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) { 34void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) {
34 global_list_guard.lock(); 35 std::scoped_lock lock{global_list_guard};
35 thread_list.push_back(std::move(thread)); 36 thread_list.push_back(std::move(thread));
36 global_list_guard.unlock();
37} 37}
38 38
39void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) { 39void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) {
40 global_list_guard.lock(); 40 std::scoped_lock lock{global_list_guard};
41 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), 41 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
42 thread_list.end()); 42 thread_list.end());
43 global_list_guard.unlock();
44} 43}
45 44
46u32 GlobalScheduler::SelectThreads() { 45u32 GlobalScheduler::SelectThreads() {
47 ASSERT(is_locked); 46 ASSERT(is_locked);
48 const auto update_thread = [](Thread* thread, Scheduler& sched) { 47 const auto update_thread = [](Thread* thread, Scheduler& sched) {
49 sched.guard.lock(); 48 std::scoped_lock lock{sched.guard};
50 if (thread != sched.selected_thread_set.get()) { 49 if (thread != sched.selected_thread_set.get()) {
51 if (thread == nullptr) { 50 if (thread == nullptr) {
52 ++sched.idle_selection_count; 51 ++sched.idle_selection_count;
@@ -57,7 +56,6 @@ u32 GlobalScheduler::SelectThreads() {
57 sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread); 56 sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread);
58 sched.is_context_switch_pending = reschedule_pending; 57 sched.is_context_switch_pending = reschedule_pending;
59 std::atomic_thread_fence(std::memory_order_seq_cst); 58 std::atomic_thread_fence(std::memory_order_seq_cst);
60 sched.guard.unlock();
61 return reschedule_pending; 59 return reschedule_pending;
62 }; 60 };
63 if (!is_reselection_pending.load()) { 61 if (!is_reselection_pending.load()) {
@@ -757,11 +755,12 @@ void Scheduler::OnSwitch(void* this_scheduler) {
757 755
758void Scheduler::SwitchToCurrent() { 756void Scheduler::SwitchToCurrent() {
759 while (true) { 757 while (true) {
760 guard.lock(); 758 {
761 selected_thread = selected_thread_set; 759 std::scoped_lock lock{guard};
762 current_thread = selected_thread; 760 selected_thread = selected_thread_set;
763 is_context_switch_pending = false; 761 current_thread = selected_thread;
764 guard.unlock(); 762 is_context_switch_pending = false;
763 }
765 while (!is_context_switch_pending) { 764 while (!is_context_switch_pending) {
766 if (current_thread != nullptr && !current_thread->IsHLEThread()) { 765 if (current_thread != nullptr && !current_thread->IsHLEThread()) {
767 current_thread->context_guard.lock(); 766 current_thread->context_guard.lock();