summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/scheduler.cpp
diff options
context:
space:
mode:
authorGravatar Fernando Sahmkow2019-04-02 08:03:44 -0400
committerGravatar FernandoS272019-10-15 11:55:11 -0400
commit3a94e7ea3386cbd14e74255e0a4c7f8615a396c9 (patch)
tree1b1956b4439e2f8ab56811db38771d001296cda0 /src/core/hle/kernel/scheduler.cpp
parentAdd PrepareReschedule where required. (diff)
downloadyuzu-3a94e7ea3386cbd14e74255e0a4c7f8615a396c9.tar.gz
yuzu-3a94e7ea3386cbd14e74255e0a4c7f8615a396c9.tar.xz
yuzu-3a94e7ea3386cbd14e74255e0a4c7f8615a396c9.zip
Comment and reorganize the scheduler
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
-rw-r--r--src/core/hle/kernel/scheduler.cpp164
1 files changed, 71 insertions, 93 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 878aeed6d..537640152 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -19,6 +19,11 @@
19 19
20namespace Kernel { 20namespace Kernel {
21 21
22/*
23 * SelectThreads, Yield functions originally by TuxSH.
24 * licensed under GPLv2 or later under exception provided by the author.
25 */
26
22void GlobalScheduler::AddThread(SharedPtr<Thread> thread) { 27void GlobalScheduler::AddThread(SharedPtr<Thread> thread) {
23 thread_list.push_back(std::move(thread)); 28 thread_list.push_back(std::move(thread));
24} 29}
@@ -29,15 +34,23 @@ void GlobalScheduler::RemoveThread(Thread* thread) {
29} 34}
30 35
31/* 36/*
32 * SelectThreads, Yield functions originally by TuxSH. 37 * UnloadThread selects a core and forces it to unload its current thread's context
33 * licensed under GPLv2 or later under exception provided by the author.
34 */ 38 */
35
36void GlobalScheduler::UnloadThread(s32 core) { 39void GlobalScheduler::UnloadThread(s32 core) {
37 Scheduler& sched = Core::System::GetInstance().Scheduler(core); 40 Scheduler& sched = Core::System::GetInstance().Scheduler(core);
38 sched.UnloadThread(); 41 sched.UnloadThread();
39} 42}
40 43
44/*
45 * SelectThread takes care of selecting the new scheduled thread.
46 * It does it in 3 steps:
47 * - First a thread is selected from the top of the priority queue. If no thread
48 * is obtained then we move to step two, else we are done.
49 * - Second we try to get a suggested thread that's not assigned to any core or
50 * that is not the top thread in that core.
51 * - Third is no suggested thread is found, we do a second pass and pick a running
52 * thread in another core and swap it with its current thread.
53 */
41void GlobalScheduler::SelectThread(u32 core) { 54void GlobalScheduler::SelectThread(u32 core) {
42 auto update_thread = [](Thread* thread, Scheduler& sched) { 55 auto update_thread = [](Thread* thread, Scheduler& sched) {
43 if (thread != sched.selected_thread) { 56 if (thread != sched.selected_thread) {
@@ -51,105 +64,58 @@ void GlobalScheduler::SelectThread(u32 core) {
51 }; 64 };
52 Scheduler& sched = Core::System::GetInstance().Scheduler(core); 65 Scheduler& sched = Core::System::GetInstance().Scheduler(core);
53 Thread* current_thread = nullptr; 66 Thread* current_thread = nullptr;
67 // Step 1: Get top thread in schedule queue.
54 current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front(); 68 current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
55 if (!current_thread) { 69 if (current_thread) {
56 Thread* winner = nullptr; 70 update_thread(current_thread, sched);
57 std::set<s32> sug_cores; 71 return;
58 for (auto thread : suggested_queue[core]) { 72 }
59 s32 this_core = thread->GetProcessorID(); 73 // Step 2: Try selecting a suggested thread.
60 Thread* thread_on_core = nullptr; 74 Thread* winner = nullptr;
61 if (this_core >= 0) { 75 std::set<s32> sug_cores;
62 thread_on_core = scheduled_queue[this_core].front(); 76 for (auto thread : suggested_queue[core]) {
63 } 77 s32 this_core = thread->GetProcessorID();
64 if (this_core < 0 || thread != thread_on_core) { 78 Thread* thread_on_core = nullptr;
65 winner = thread; 79 if (this_core >= 0) {
66 break; 80 thread_on_core = scheduled_queue[this_core].front();
67 }
68 sug_cores.insert(this_core);
69 } 81 }
70 if (winner && winner->GetPriority() > 2) { 82 if (this_core < 0 || thread != thread_on_core) {
71 if (winner->IsRunning()) { 83 winner = thread;
72 UnloadThread(winner->GetProcessorID()); 84 break;
73 }
74 TransferToCore(winner->GetPriority(), core, winner);
75 current_thread = winner;
76 } else {
77 for (auto& src_core : sug_cores) {
78 auto it = scheduled_queue[src_core].begin();
79 it++;
80 if (it != scheduled_queue[src_core].end()) {
81 Thread* thread_on_core = scheduled_queue[src_core].front();
82 Thread* to_change = *it;
83 if (thread_on_core->IsRunning() || to_change->IsRunning()) {
84 UnloadThread(src_core);
85 }
86 TransferToCore(thread_on_core->GetPriority(), core, thread_on_core);
87 current_thread = thread_on_core;
88 }
89 }
90 } 85 }
86 sug_cores.insert(this_core);
91 } 87 }
92 update_thread(current_thread, sched); 88 // if we got a suggested thread, select it, else do a second pass.
93} 89 if (winner && winner->GetPriority() > 2) {
94 90 if (winner->IsRunning()) {
95void GlobalScheduler::SelectThreads() { 91 UnloadThread(winner->GetProcessorID());
96 auto update_thread = [](Thread* thread, Scheduler& sched) {
97 if (thread != sched.selected_thread) {
98 if (thread == nullptr) {
99 ++sched.idle_selection_count;
100 }
101 sched.selected_thread = thread;
102 } 92 }
103 sched.context_switch_pending = sched.selected_thread != sched.current_thread; 93 TransferToCore(winner->GetPriority(), core, winner);
104 std::atomic_thread_fence(std::memory_order_seq_cst); 94 update_thread(winner, sched);
105 }; 95 return;
106
107 auto& system = Core::System::GetInstance();
108
109 std::unordered_set<Thread*> picked_threads;
110 // This maintain the "current thread is on front of queue" invariant
111 std::array<Thread*, NUM_CPU_CORES> current_threads;
112 for (u32 i = 0; i < NUM_CPU_CORES; i++) {
113 Scheduler& sched = system.Scheduler(i);
114 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
115 if (current_threads[i])
116 picked_threads.insert(current_threads[i]);
117 update_thread(current_threads[i], sched);
118 } 96 }
119 97 // Step 3: Select a suggested thread from another core
120 // Do some load-balancing. Allow second pass. 98 for (auto& src_core : sug_cores) {
121 std::array<Thread*, NUM_CPU_CORES> current_threads_2 = current_threads; 99 auto it = scheduled_queue[src_core].begin();
122 for (u32 i = 0; i < NUM_CPU_CORES; i++) { 100 it++;
123 if (!scheduled_queue[i].empty()) { 101 if (it != scheduled_queue[src_core].end()) {
124 continue; 102 Thread* thread_on_core = scheduled_queue[src_core].front();
125 } 103 Thread* to_change = *it;
126 Thread* winner = nullptr; 104 if (thread_on_core->IsRunning() || to_change->IsRunning()) {
127 for (auto thread : suggested_queue[i]) { 105 UnloadThread(src_core);
128 if (thread->GetProcessorID() < 0 || thread != current_threads[i]) {
129 if (picked_threads.count(thread) == 0 && !thread->IsRunning()) {
130 winner = thread;
131 break;
132 }
133 } 106 }
134 } 107 TransferToCore(thread_on_core->GetPriority(), core, thread_on_core);
135 if (winner) { 108 current_thread = thread_on_core;
136 TransferToCore(winner->GetPriority(), i, winner); 109 break;
137 current_threads_2[i] = winner;
138 picked_threads.insert(winner);
139 }
140 }
141
142 // See which to-be-current threads have changed & update accordingly
143 for (u32 i = 0; i < NUM_CPU_CORES; i++) {
144 Scheduler& sched = system.Scheduler(i);
145 if (current_threads_2[i] != current_threads[i]) {
146 update_thread(current_threads_2[i], sched);
147 } 110 }
148 } 111 }
149 112 update_thread(current_thread, sched);
150 reselection_pending.store(false, std::memory_order_release);
151} 113}
152 114
115/*
116 * YieldThread takes a thread and moves it to the back of the it's priority list
117 * This operation can be redundant and no scheduling is changed if marked as so.
118 */
153void GlobalScheduler::YieldThread(Thread* yielding_thread) { 119void GlobalScheduler::YieldThread(Thread* yielding_thread) {
154 // Note: caller should use critical section, etc. 120 // Note: caller should use critical section, etc.
155 u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); 121 u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
@@ -164,6 +130,12 @@ void GlobalScheduler::YieldThread(Thread* yielding_thread) {
164 AskForReselectionOrMarkRedundant(yielding_thread, winner); 130 AskForReselectionOrMarkRedundant(yielding_thread, winner);
165} 131}
166 132
133/*
134 * YieldThreadAndBalanceLoad takes a thread and moves it to the back of the it's priority list.
135 * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
136 * a better priority than the next thread in the core.
137 * This operation can be redundant and no scheduling is changed if marked as so.
138 */
167void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { 139void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
168 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, 140 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
169 // etc. 141 // etc.
@@ -213,6 +185,12 @@ void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
213 AskForReselectionOrMarkRedundant(yielding_thread, winner); 185 AskForReselectionOrMarkRedundant(yielding_thread, winner);
214} 186}
215 187
188/*
189 * YieldThreadAndWaitForLoadBalancing takes a thread and moves it out of the scheduling queue
190 * and into the suggested queue. If no thread can be squeduled afterwards in that core,
191 * a suggested thread is obtained instead.
192 * This operation can be redundant and no scheduling is changed if marked as so.
193 */
216void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) { 194void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
217 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, 195 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
218 // etc. 196 // etc.
@@ -256,8 +234,8 @@ void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
256 234
257void GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) { 235void GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
258 if (current_thread == winner) { 236 if (current_thread == winner) {
259 // Nintendo (not us) has a nullderef bug on current_thread->owner, but which is never 237 // TODO(blinkhawk): manage redundant operations, this is not implemented.
260 // triggered. 238 // as its mostly an optimization.
261 // current_thread->SetRedundantSchedulerOperation(); 239 // current_thread->SetRedundantSchedulerOperation();
262 } else { 240 } else {
263 reselection_pending.store(true, std::memory_order_release); 241 reselection_pending.store(true, std::memory_order_release);