summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/hle/kernel/scheduler.cpp16
-rw-r--r--src/core/hle/kernel/scheduler.h8
-rw-r--r--src/core/hle/kernel/svc.cpp13
-rw-r--r--src/core/hle/kernel/thread.cpp12
-rw-r--r--src/core/hle/kernel/thread.h6
5 files changed, 31 insertions, 24 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index df4e9b799..451fd8077 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -118,7 +118,7 @@ void GlobalScheduler::SelectThread(u32 core) {
118 * YieldThread takes a thread and moves it to the back of the it's priority list 118 * YieldThread takes a thread and moves it to the back of the it's priority list
119 * This operation can be redundant and no scheduling is changed if marked as so. 119 * This operation can be redundant and no scheduling is changed if marked as so.
120 */ 120 */
121void GlobalScheduler::YieldThread(Thread* yielding_thread) { 121bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
122 // Note: caller should use critical section, etc. 122 // Note: caller should use critical section, etc.
123 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); 123 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
124 const u32 priority = yielding_thread->GetPriority(); 124 const u32 priority = yielding_thread->GetPriority();
@@ -129,7 +129,7 @@ void GlobalScheduler::YieldThread(Thread* yielding_thread) {
129 scheduled_queue[core_id].yield(priority); 129 scheduled_queue[core_id].yield(priority);
130 130
131 Thread* winner = scheduled_queue[core_id].front(priority); 131 Thread* winner = scheduled_queue[core_id].front(priority);
132 AskForReselectionOrMarkRedundant(yielding_thread, winner); 132 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
133} 133}
134 134
135/* 135/*
@@ -138,7 +138,7 @@ void GlobalScheduler::YieldThread(Thread* yielding_thread) {
138 * a better priority than the next thread in the core. 138 * a better priority than the next thread in the core.
139 * This operation can be redundant and no scheduling is changed if marked as so. 139 * This operation can be redundant and no scheduling is changed if marked as so.
140 */ 140 */
141void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { 141bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
142 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, 142 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
143 // etc. 143 // etc.
144 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); 144 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
@@ -186,7 +186,7 @@ void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
186 winner = next_thread; 186 winner = next_thread;
187 } 187 }
188 188
189 AskForReselectionOrMarkRedundant(yielding_thread, winner); 189 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
190} 190}
191 191
192/* 192/*
@@ -195,7 +195,7 @@ void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
195 * a suggested thread is obtained instead. 195 * a suggested thread is obtained instead.
196 * This operation can be redundant and no scheduling is changed if marked as so. 196 * This operation can be redundant and no scheduling is changed if marked as so.
197 */ 197 */
198void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) { 198bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
199 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, 199 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
200 // etc. 200 // etc.
201 Thread* winner = nullptr; 201 Thread* winner = nullptr;
@@ -235,7 +235,7 @@ void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
235 } 235 }
236 } 236 }
237 237
238 AskForReselectionOrMarkRedundant(yielding_thread, winner); 238 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
239} 239}
240 240
241void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) { 241void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) {
@@ -248,13 +248,15 @@ void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) {
248 scheduled_queue[core].add(thread, priority, false); 248 scheduled_queue[core].add(thread, priority, false);
249} 249}
250 250
251void GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) { 251bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
252 if (current_thread == winner) { 252 if (current_thread == winner) {
253 // TODO(blinkhawk): manage redundant operations, this is not implemented. 253 // TODO(blinkhawk): manage redundant operations, this is not implemented.
254 // as its mostly an optimization. 254 // as its mostly an optimization.
255 // current_thread->SetRedundantSchedulerOperation(); 255 // current_thread->SetRedundantSchedulerOperation();
256 return true;
256 } else { 257 } else {
257 reselection_pending.store(true, std::memory_order_release); 258 reselection_pending.store(true, std::memory_order_release);
259 return false;
258 } 260 }
259} 261}
260 262
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 1c9d8a30f..8fcc86bae 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -115,7 +115,7 @@ public:
115 * YieldThread takes a thread and moves it to the back of the it's priority list 115 * YieldThread takes a thread and moves it to the back of the it's priority list
116 * This operation can be redundant and no scheduling is changed if marked as so. 116 * This operation can be redundant and no scheduling is changed if marked as so.
117 */ 117 */
118 void YieldThread(Thread* thread); 118 bool YieldThread(Thread* thread);
119 119
120 /* 120 /*
121 * YieldThreadAndBalanceLoad takes a thread and moves it to the back of the it's priority list. 121 * YieldThreadAndBalanceLoad takes a thread and moves it to the back of the it's priority list.
@@ -123,7 +123,7 @@ public:
123 * a better priority than the next thread in the core. 123 * a better priority than the next thread in the core.
124 * This operation can be redundant and no scheduling is changed if marked as so. 124 * This operation can be redundant and no scheduling is changed if marked as so.
125 */ 125 */
126 void YieldThreadAndBalanceLoad(Thread* thread); 126 bool YieldThreadAndBalanceLoad(Thread* thread);
127 127
128 /* 128 /*
129 * YieldThreadAndWaitForLoadBalancing takes a thread and moves it out of the scheduling queue 129 * YieldThreadAndWaitForLoadBalancing takes a thread and moves it out of the scheduling queue
@@ -131,7 +131,7 @@ public:
131 * a suggested thread is obtained instead. 131 * a suggested thread is obtained instead.
132 * This operation can be redundant and no scheduling is changed if marked as so. 132 * This operation can be redundant and no scheduling is changed if marked as so.
133 */ 133 */
134 void YieldThreadAndWaitForLoadBalancing(Thread* thread); 134 bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
135 135
136 u32 CpuCoresCount() const { 136 u32 CpuCoresCount() const {
137 return NUM_CPU_CORES; 137 return NUM_CPU_CORES;
@@ -146,7 +146,7 @@ public:
146 } 146 }
147 147
148private: 148private:
149 void AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner); 149 bool AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner);
150 150
151 static constexpr u32 min_regular_priority = 2; 151 static constexpr u32 min_regular_priority = 2;
152 std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> scheduled_queue; 152 std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> scheduled_queue;
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index d520ed033..bd67fc96d 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -1556,17 +1556,18 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
1556 1556
1557 auto& scheduler = system.CurrentScheduler(); 1557 auto& scheduler = system.CurrentScheduler();
1558 auto* const current_thread = scheduler.GetCurrentThread(); 1558 auto* const current_thread = scheduler.GetCurrentThread();
1559 bool redundant = false;
1559 1560
1560 if (nanoseconds <= 0) { 1561 if (nanoseconds <= 0) {
1561 switch (static_cast<SleepType>(nanoseconds)) { 1562 switch (static_cast<SleepType>(nanoseconds)) {
1562 case SleepType::YieldWithoutLoadBalancing: 1563 case SleepType::YieldWithoutLoadBalancing:
1563 current_thread->YieldSimple(); 1564 redundant = current_thread->YieldSimple();
1564 break; 1565 break;
1565 case SleepType::YieldWithLoadBalancing: 1566 case SleepType::YieldWithLoadBalancing:
1566 current_thread->YieldAndBalanceLoad(); 1567 redundant = current_thread->YieldAndBalanceLoad();
1567 break; 1568 break;
1568 case SleepType::YieldAndWaitForLoadBalancing: 1569 case SleepType::YieldAndWaitForLoadBalancing:
1569 current_thread->YieldAndWaitForLoadBalancing(); 1570 redundant = current_thread->YieldAndWaitForLoadBalancing();
1570 break; 1571 break;
1571 default: 1572 default:
1572 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); 1573 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
@@ -1575,7 +1576,11 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
1575 current_thread->Sleep(nanoseconds); 1576 current_thread->Sleep(nanoseconds);
1576 } 1577 }
1577 1578
1578 system.PrepareReschedule(current_thread->GetProcessorID()); 1579 if (redundant) {
1580 system.CoreTiming().Idle();
1581 } else {
1582 system.PrepareReschedule(current_thread->GetProcessorID());
1583 }
1579} 1584}
1580 1585
1581/// Wait process wide key atomic 1586/// Wait process wide key atomic
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 8cf0a7ec7..ae62609e3 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -373,19 +373,19 @@ void Thread::Sleep(s64 nanoseconds) {
373 WakeAfterDelay(nanoseconds); 373 WakeAfterDelay(nanoseconds);
374} 374}
375 375
376void Thread::YieldSimple() { 376bool Thread::YieldSimple() {
377 auto& scheduler = kernel.GlobalScheduler(); 377 auto& scheduler = kernel.GlobalScheduler();
378 scheduler.YieldThread(this); 378 return scheduler.YieldThread(this);
379} 379}
380 380
381void Thread::YieldAndBalanceLoad() { 381bool Thread::YieldAndBalanceLoad() {
382 auto& scheduler = kernel.GlobalScheduler(); 382 auto& scheduler = kernel.GlobalScheduler();
383 scheduler.YieldThreadAndBalanceLoad(this); 383 return scheduler.YieldThreadAndBalanceLoad(this);
384} 384}
385 385
386void Thread::YieldAndWaitForLoadBalancing() { 386bool Thread::YieldAndWaitForLoadBalancing() {
387 auto& scheduler = kernel.GlobalScheduler(); 387 auto& scheduler = kernel.GlobalScheduler();
388 scheduler.YieldThreadAndWaitForLoadBalancing(this); 388 return scheduler.YieldThreadAndWaitForLoadBalancing(this);
389} 389}
390 390
391void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { 391void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index bf0cae959..88255099f 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -408,13 +408,13 @@ public:
408 void Sleep(s64 nanoseconds); 408 void Sleep(s64 nanoseconds);
409 409
410 /// Yields this thread without rebalancing loads. 410 /// Yields this thread without rebalancing loads.
411 void YieldSimple(); 411 bool YieldSimple();
412 412
413 /// Yields this thread and does a load rebalancing. 413 /// Yields this thread and does a load rebalancing.
414 void YieldAndBalanceLoad(); 414 bool YieldAndBalanceLoad();
415 415
416 /// Yields this thread and if the core is left idle, loads are rebalanced 416 /// Yields this thread and if the core is left idle, loads are rebalanced
417 void YieldAndWaitForLoadBalancing(); 417 bool YieldAndWaitForLoadBalancing();
418 418
419 ThreadSchedStatus GetSchedulingStatus() const { 419 ThreadSchedStatus GetSchedulingStatus() const {
420 return static_cast<ThreadSchedStatus>(scheduling_state & ThreadSchedMasks::LowMask); 420 return static_cast<ThreadSchedStatus>(scheduling_state & ThreadSchedMasks::LowMask);