summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/scheduler.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
-rw-r--r--src/core/hle/kernel/scheduler.cpp16
1 files changed, 9 insertions, 7 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index df4e9b799..451fd8077 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -118,7 +118,7 @@ void GlobalScheduler::SelectThread(u32 core) {
118 * YieldThread takes a thread and moves it to the back of the it's priority list 118 * YieldThread takes a thread and moves it to the back of the it's priority list
119 * This operation can be redundant and no scheduling is changed if marked as so. 119 * This operation can be redundant and no scheduling is changed if marked as so.
120 */ 120 */
121void GlobalScheduler::YieldThread(Thread* yielding_thread) { 121bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
122 // Note: caller should use critical section, etc. 122 // Note: caller should use critical section, etc.
123 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); 123 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
124 const u32 priority = yielding_thread->GetPriority(); 124 const u32 priority = yielding_thread->GetPriority();
@@ -129,7 +129,7 @@ void GlobalScheduler::YieldThread(Thread* yielding_thread) {
129 scheduled_queue[core_id].yield(priority); 129 scheduled_queue[core_id].yield(priority);
130 130
131 Thread* winner = scheduled_queue[core_id].front(priority); 131 Thread* winner = scheduled_queue[core_id].front(priority);
132 AskForReselectionOrMarkRedundant(yielding_thread, winner); 132 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
133} 133}
134 134
135/* 135/*
@@ -138,7 +138,7 @@ void GlobalScheduler::YieldThread(Thread* yielding_thread) {
138 * a better priority than the next thread in the core. 138 * a better priority than the next thread in the core.
139 * This operation can be redundant and no scheduling is changed if marked as so. 139 * This operation can be redundant and no scheduling is changed if marked as so.
140 */ 140 */
141void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { 141bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
142 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, 142 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
143 // etc. 143 // etc.
144 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); 144 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
@@ -186,7 +186,7 @@ void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
186 winner = next_thread; 186 winner = next_thread;
187 } 187 }
188 188
189 AskForReselectionOrMarkRedundant(yielding_thread, winner); 189 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
190} 190}
191 191
192/* 192/*
@@ -195,7 +195,7 @@ void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
195 * a suggested thread is obtained instead. 195 * a suggested thread is obtained instead.
196 * This operation can be redundant and no scheduling is changed if marked as so. 196 * This operation can be redundant and no scheduling is changed if marked as so.
197 */ 197 */
198void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) { 198bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
199 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, 199 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
200 // etc. 200 // etc.
201 Thread* winner = nullptr; 201 Thread* winner = nullptr;
@@ -235,7 +235,7 @@ void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
235 } 235 }
236 } 236 }
237 237
238 AskForReselectionOrMarkRedundant(yielding_thread, winner); 238 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
239} 239}
240 240
241void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) { 241void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) {
@@ -248,13 +248,15 @@ void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) {
248 scheduled_queue[core].add(thread, priority, false); 248 scheduled_queue[core].add(thread, priority, false);
249} 249}
250 250
251void GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) { 251bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
252 if (current_thread == winner) { 252 if (current_thread == winner) {
253 // TODO(blinkhawk): manage redundant operations, this is not implemented. 253 // TODO(blinkhawk): manage redundant operations, this is not implemented.
254 // as its mostly an optimization. 254 // as its mostly an optimization.
255 // current_thread->SetRedundantSchedulerOperation(); 255 // current_thread->SetRedundantSchedulerOperation();
256 return true;
256 } else { 257 } else {
257 reselection_pending.store(true, std::memory_order_release); 258 reselection_pending.store(true, std::memory_order_release);
259 return false;
258 } 260 }
259} 261}
260 262