summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/hle/kernel/kernel.h2
-rw-r--r--src/core/hle/kernel/scheduler.cpp53
-rw-r--r--src/core/hle/kernel/scheduler.h65
-rw-r--r--src/core/hle/kernel/svc.cpp12
-rw-r--r--src/core/hle/kernel/thread.cpp17
-rw-r--r--src/core/hle/kernel/thread.h16
6 files changed, 98 insertions, 67 deletions
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 0fc4d1f36..9fb8f52ec 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -21,11 +21,11 @@ namespace Kernel {
21 21
22class AddressArbiter; 22class AddressArbiter;
23class ClientPort; 23class ClientPort;
24class GlobalScheduler;
24class HandleTable; 25class HandleTable;
25class Process; 26class Process;
26class ResourceLimit; 27class ResourceLimit;
27class Thread; 28class Thread;
28class GlobalScheduler;
29 29
30/// Represents a single instance of the kernel. 30/// Represents a single instance of the kernel.
31class KernelCore { 31class KernelCore {
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 122106267..dabeb05d6 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -23,7 +23,7 @@
23namespace Kernel { 23namespace Kernel {
24 24
25GlobalScheduler::GlobalScheduler(Core::System& system) : system{system} { 25GlobalScheduler::GlobalScheduler(Core::System& system) : system{system} {
26 reselection_pending = false; 26 is_reselection_pending = false;
27} 27}
28 28
29void GlobalScheduler::AddThread(SharedPtr<Thread> thread) { 29void GlobalScheduler::AddThread(SharedPtr<Thread> thread) {
@@ -61,7 +61,7 @@ void GlobalScheduler::SelectThread(u32 core) {
61 } 61 }
62 sched.selected_thread = thread; 62 sched.selected_thread = thread;
63 } 63 }
64 sched.context_switch_pending = sched.selected_thread != sched.current_thread; 64 sched.is_context_switch_pending = sched.selected_thread != sched.current_thread;
65 std::atomic_thread_fence(std::memory_order_seq_cst); 65 std::atomic_thread_fence(std::memory_order_seq_cst);
66 }; 66 };
67 Scheduler& sched = system.Scheduler(core); 67 Scheduler& sched = system.Scheduler(core);
@@ -318,10 +318,18 @@ void GlobalScheduler::PreemptThreads() {
318 } 318 }
319 } 319 }
320 320
321 reselection_pending.store(true, std::memory_order_release); 321 is_reselection_pending.store(true, std::memory_order_release);
322 } 322 }
323} 323}
324 324
325void GlobalScheduler::Suggest(u32 priority, u32 core, Thread* thread) {
326 suggested_queue[core].add(thread, priority);
327}
328
329void GlobalScheduler::Unsuggest(u32 priority, u32 core, Thread* thread) {
330 suggested_queue[core].remove(thread, priority);
331}
332
325void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) { 333void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) {
326 ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core."); 334 ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core.");
327 scheduled_queue[core].add(thread, priority); 335 scheduled_queue[core].add(thread, priority);
@@ -332,12 +340,40 @@ void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) {
332 scheduled_queue[core].add(thread, priority, false); 340 scheduled_queue[core].add(thread, priority, false);
333} 341}
334 342
343void GlobalScheduler::Reschedule(u32 priority, u32 core, Thread* thread) {
344 scheduled_queue[core].remove(thread, priority);
345 scheduled_queue[core].add(thread, priority);
346}
347
348void GlobalScheduler::Unschedule(u32 priority, u32 core, Thread* thread) {
349 scheduled_queue[core].remove(thread, priority);
350}
351
352void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
353 const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
354 const s32 source_core = thread->GetProcessorID();
355 if (source_core == destination_core || !schedulable) {
356 return;
357 }
358 thread->SetProcessorID(destination_core);
359 if (source_core >= 0) {
360 Unschedule(priority, source_core, thread);
361 }
362 if (destination_core >= 0) {
363 Unsuggest(priority, destination_core, thread);
364 Schedule(priority, destination_core, thread);
365 }
366 if (source_core >= 0) {
367 Suggest(priority, source_core, thread);
368 }
369}
370
335bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) { 371bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
336 if (current_thread == winner) { 372 if (current_thread == winner) {
337 current_thread->IncrementYieldCount(); 373 current_thread->IncrementYieldCount();
338 return true; 374 return true;
339 } else { 375 } else {
340 reselection_pending.store(true, std::memory_order_release); 376 is_reselection_pending.store(true, std::memory_order_release);
341 return false; 377 return false;
342 } 378 }
343} 379}
@@ -378,7 +414,7 @@ u64 Scheduler::GetLastContextSwitchTicks() const {
378} 414}
379 415
380void Scheduler::TryDoContextSwitch() { 416void Scheduler::TryDoContextSwitch() {
381 if (context_switch_pending) { 417 if (is_context_switch_pending ) {
382 SwitchContext(); 418 SwitchContext();
383 } 419 }
384} 420}
@@ -409,7 +445,7 @@ void Scheduler::SwitchContext() {
409 Thread* const previous_thread = GetCurrentThread(); 445 Thread* const previous_thread = GetCurrentThread();
410 Thread* const new_thread = GetSelectedThread(); 446 Thread* const new_thread = GetSelectedThread();
411 447
412 context_switch_pending = false; 448 is_context_switch_pending = false;
413 if (new_thread == previous_thread) { 449 if (new_thread == previous_thread) {
414 return; 450 return;
415 } 451 }
@@ -477,4 +513,9 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
477 last_context_switch_time = most_recent_switch_ticks; 513 last_context_switch_time = most_recent_switch_ticks;
478} 514}
479 515
516void Scheduler::Shutdown() {
517 current_thread = nullptr;
518 selected_thread = nullptr;
519}
520
480} // namespace Kernel 521} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 617553ae3..fcae28e0a 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -39,15 +39,11 @@ public:
39 39
40 // Add a thread to the suggested queue of a cpu core. Suggested threads may be 40 // Add a thread to the suggested queue of a cpu core. Suggested threads may be
41 // picked if no thread is scheduled to run on the core. 41 // picked if no thread is scheduled to run on the core.
42 void Suggest(u32 priority, u32 core, Thread* thread) { 42 void Suggest(u32 priority, u32 core, Thread* thread);
43 suggested_queue[core].add(thread, priority);
44 }
45 43
46 // Remove a thread to the suggested queue of a cpu core. Suggested threads may be 44 // Remove a thread to the suggested queue of a cpu core. Suggested threads may be
47 // picked if no thread is scheduled to run on the core. 45 // picked if no thread is scheduled to run on the core.
48 void Unsuggest(u32 priority, u32 core, Thread* thread) { 46 void Unsuggest(u32 priority, u32 core, Thread* thread);
49 suggested_queue[core].remove(thread, priority);
50 }
51 47
52 // Add a thread to the scheduling queue of a cpu core. The thread is added at the 48 // Add a thread to the scheduling queue of a cpu core. The thread is added at the
53 // back the queue in its priority level 49 // back the queue in its priority level
@@ -58,37 +54,15 @@ public:
58 void SchedulePrepend(u32 priority, u32 core, Thread* thread); 54 void SchedulePrepend(u32 priority, u32 core, Thread* thread);
59 55
60 // Reschedule an already scheduled thread based on a new priority 56 // Reschedule an already scheduled thread based on a new priority
61 void Reschedule(u32 priority, u32 core, Thread* thread) { 57 void Reschedule(u32 priority, u32 core, Thread* thread);
62 scheduled_queue[core].remove(thread, priority);
63 scheduled_queue[core].add(thread, priority);
64 }
65 58
66 // Unschedule a thread. 59 // Unschedule a thread.
67 void Unschedule(u32 priority, u32 core, Thread* thread) { 60 void Unschedule(u32 priority, u32 core, Thread* thread);
68 scheduled_queue[core].remove(thread, priority);
69 }
70 61
71 // Transfers a thread into an specific core. If the destination_core is -1 62 // Transfers a thread into an specific core. If the destination_core is -1
72 // it will be unscheduled from its source code and added into its suggested 63 // it will be unscheduled from its source code and added into its suggested
73 // queue. 64 // queue.
74 void TransferToCore(u32 priority, s32 destination_core, Thread* thread) { 65 void TransferToCore(u32 priority, s32 destination_core, Thread* thread);
75 const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
76 const s32 source_core = thread->GetProcessorID();
77 if (source_core == destination_core || !schedulable) {
78 return;
79 }
80 thread->SetProcessorID(destination_core);
81 if (source_core >= 0) {
82 Unschedule(priority, source_core, thread);
83 }
84 if (destination_core >= 0) {
85 Unsuggest(priority, destination_core, thread);
86 Schedule(priority, destination_core, thread);
87 }
88 if (source_core >= 0) {
89 Suggest(priority, source_core, thread);
90 }
91 }
92 66
93 /* 67 /*
94 * UnloadThread selects a core and forces it to unload its current thread's context 68 * UnloadThread selects a core and forces it to unload its current thread's context
@@ -133,6 +107,12 @@ public:
133 */ 107 */
134 bool YieldThreadAndWaitForLoadBalancing(Thread* thread); 108 bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
135 109
110 /*
111 * PreemptThreads this operation rotates the scheduling queues of threads at
112 * a preemption priority and then does some core rebalancing. Preemption priorities
113 * can be found in the array 'preemption_priorities'. This operation happens
114 * every 10ms.
115 */
136 void PreemptThreads(); 116 void PreemptThreads();
137 117
138 u32 CpuCoresCount() const { 118 u32 CpuCoresCount() const {
@@ -140,11 +120,11 @@ public:
140 } 120 }
141 121
142 void SetReselectionPending() { 122 void SetReselectionPending() {
143 reselection_pending.store(true, std::memory_order_release); 123 is_reselection_pending.store(true, std::memory_order_release);
144 } 124 }
145 125
146 bool IsReselectionPending() const { 126 bool IsReselectionPending() const {
147 return reselection_pending.load(); 127 return is_reselection_pending.load(std::memory_order_acquire);
148 } 128 }
149 129
150 void Shutdown(); 130 void Shutdown();
@@ -155,8 +135,10 @@ private:
155 static constexpr u32 min_regular_priority = 2; 135 static constexpr u32 min_regular_priority = 2;
156 std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> scheduled_queue; 136 std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> scheduled_queue;
157 std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> suggested_queue; 137 std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> suggested_queue;
158 std::atomic<bool> reselection_pending; 138 std::atomic<bool> is_reselection_pending;
159 139
140 // `preemption_priorities` are the priority levels at which the global scheduler
141 // preempts threads every 10 ms. They are ordered from Core 0 to Core 3
160 std::array<u32, NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62}; 142 std::array<u32, NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
161 143
162 /// Lists all thread ids that aren't deleted/etc. 144 /// Lists all thread ids that aren't deleted/etc.
@@ -166,7 +148,7 @@ private:
166 148
167class Scheduler final { 149class Scheduler final {
168public: 150public:
169 explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, const u32 core_id); 151 explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 core_id);
170 ~Scheduler(); 152 ~Scheduler();
171 153
172 /// Returns whether there are any threads that are ready to run. 154 /// Returns whether there are any threads that are ready to run.
@@ -175,26 +157,27 @@ public:
175 /// Reschedules to the next available thread (call after current thread is suspended) 157 /// Reschedules to the next available thread (call after current thread is suspended)
176 void TryDoContextSwitch(); 158 void TryDoContextSwitch();
177 159
160 /// Unloads currently running thread
178 void UnloadThread(); 161 void UnloadThread();
179 162
163 /// Select the threads in top of the scheduling multilist.
180 void SelectThreads(); 164 void SelectThreads();
181 165
182 /// Gets the current running thread 166 /// Gets the current running thread
183 Thread* GetCurrentThread() const; 167 Thread* GetCurrentThread() const;
184 168
169 /// Gets the currently selected thread from the top of the multilevel queue
185 Thread* GetSelectedThread() const; 170 Thread* GetSelectedThread() const;
186 171
187 /// Gets the timestamp for the last context switch in ticks. 172 /// Gets the timestamp for the last context switch in ticks.
188 u64 GetLastContextSwitchTicks() const; 173 u64 GetLastContextSwitchTicks() const;
189 174
190 bool ContextSwitchPending() const { 175 bool ContextSwitchPending() const {
191 return context_switch_pending; 176 return is_context_switch_pending;
192 } 177 }
193 178
194 void Shutdown() { 179 /// Shutdowns the scheduler.
195 current_thread = nullptr; 180 void Shutdown();
196 selected_thread = nullptr;
197 }
198 181
199private: 182private:
200 friend class GlobalScheduler; 183 friend class GlobalScheduler;
@@ -226,7 +209,7 @@ private:
226 u64 idle_selection_count = 0; 209 u64 idle_selection_count = 0;
227 const u32 core_id; 210 const u32 core_id;
228 211
229 bool context_switch_pending = false; 212 bool is_context_switch_pending = false;
230}; 213};
231 214
232} // namespace Kernel 215} // namespace Kernel
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 151acf33a..f64236be1 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -1556,18 +1556,18 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
1556 1556
1557 auto& scheduler = system.CurrentScheduler(); 1557 auto& scheduler = system.CurrentScheduler();
1558 auto* const current_thread = scheduler.GetCurrentThread(); 1558 auto* const current_thread = scheduler.GetCurrentThread();
1559 bool redundant = false; 1559 bool is_redundant = false;
1560 1560
1561 if (nanoseconds <= 0) { 1561 if (nanoseconds <= 0) {
1562 switch (static_cast<SleepType>(nanoseconds)) { 1562 switch (static_cast<SleepType>(nanoseconds)) {
1563 case SleepType::YieldWithoutLoadBalancing: 1563 case SleepType::YieldWithoutLoadBalancing:
1564 redundant = current_thread->YieldSimple(); 1564 is_redundant = current_thread->YieldSimple();
1565 break; 1565 break;
1566 case SleepType::YieldWithLoadBalancing: 1566 case SleepType::YieldWithLoadBalancing:
1567 redundant = current_thread->YieldAndBalanceLoad(); 1567 is_redundant = current_thread->YieldAndBalanceLoad();
1568 break; 1568 break;
1569 case SleepType::YieldAndWaitForLoadBalancing: 1569 case SleepType::YieldAndWaitForLoadBalancing:
1570 redundant = current_thread->YieldAndWaitForLoadBalancing(); 1570 is_redundant = current_thread->YieldAndWaitForLoadBalancing();
1571 break; 1571 break;
1572 default: 1572 default:
1573 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); 1573 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
@@ -1576,9 +1576,9 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
1576 current_thread->Sleep(nanoseconds); 1576 current_thread->Sleep(nanoseconds);
1577 } 1577 }
1578 1578
1579 if (redundant) { 1579 if (is_redundant) {
1580 // If it's redundant, the core is pretty much idle. Some games keep idling 1580 // If it's redundant, the core is pretty much idle. Some games keep idling
1581 // a core while it's doing nothing, we advance timing to avoid costly continuos 1581 // a core while it's doing nothing, we advance timing to avoid costly continuous
1582 // calls. 1582 // calls.
1583 system.CoreTiming().AddTicks(2000); 1583 system.CoreTiming().AddTicks(2000);
1584 } 1584 }
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 7208bbb11..8663fe5ee 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -389,13 +389,13 @@ bool Thread::YieldAndWaitForLoadBalancing() {
389 389
390void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { 390void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
391 const u32 old_flags = scheduling_state; 391 const u32 old_flags = scheduling_state;
392 scheduling_state = 392 scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
393 (scheduling_state & ThreadSchedMasks::HighMask) | static_cast<u32>(new_status); 393 static_cast<u32>(new_status);
394 AdjustSchedulingOnStatus(old_flags); 394 AdjustSchedulingOnStatus(old_flags);
395} 395}
396 396
397void Thread::SetCurrentPriority(u32 new_priority) { 397void Thread::SetCurrentPriority(u32 new_priority) {
398 u32 old_priority = std::exchange(current_priority, new_priority); 398 const u32 old_priority = std::exchange(current_priority, new_priority);
399 AdjustSchedulingOnPriority(old_priority); 399 AdjustSchedulingOnPriority(old_priority);
400} 400}
401 401
@@ -410,10 +410,9 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
410 }; 410 };
411 411
412 const bool use_override = affinity_override_count != 0; 412 const bool use_override = affinity_override_count != 0;
413 // The value -3 is "do not change the ideal core". 413 if (new_core == static_cast<s32>(CoreFlags::DontChangeIdealCore)) {
414 if (new_core == -3) {
415 new_core = use_override ? ideal_core_override : ideal_core; 414 new_core = use_override ? ideal_core_override : ideal_core;
416 if ((new_affinity_mask & (1 << new_core)) == 0) { 415 if ((new_affinity_mask & (1ULL << new_core)) == 0) {
417 return ERR_INVALID_COMBINATION; 416 return ERR_INVALID_COMBINATION;
418 } 417 }
419 } 418 }
@@ -444,14 +443,14 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) {
444 } 443 }
445 444
446 auto& scheduler = kernel.GlobalScheduler(); 445 auto& scheduler = kernel.GlobalScheduler();
447 if (static_cast<ThreadSchedStatus>(old_flags & ThreadSchedMasks::LowMask) == 446 if (static_cast<ThreadSchedStatus>(old_flags & static_cast<u32>(ThreadSchedMasks::LowMask)) ==
448 ThreadSchedStatus::Runnable) { 447 ThreadSchedStatus::Runnable) {
449 // In this case the thread was running, now it's pausing/exitting 448 // In this case the thread was running, now it's pausing/exitting
450 if (processor_id >= 0) { 449 if (processor_id >= 0) {
451 scheduler.Unschedule(current_priority, processor_id, this); 450 scheduler.Unschedule(current_priority, processor_id, this);
452 } 451 }
453 452
454 for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { 453 for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
455 if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { 454 if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
456 scheduler.Unsuggest(current_priority, core, this); 455 scheduler.Unsuggest(current_priority, core, this);
457 } 456 }
@@ -462,7 +461,7 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) {
462 scheduler.Schedule(current_priority, processor_id, this); 461 scheduler.Schedule(current_priority, processor_id, this);
463 } 462 }
464 463
465 for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { 464 for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
466 if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { 465 if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
467 scheduler.Suggest(current_priority, core, this); 466 scheduler.Suggest(current_priority, core, this);
468 } 467 }
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index ceb4d5159..e0f3b6204 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -82,19 +82,25 @@ enum class ThreadSchedStatus : u32 {
82 Exited = 3, 82 Exited = 3,
83}; 83};
84 84
85enum ThreadSchedFlags : u32 { 85enum class ThreadSchedFlags : u32 {
86 ProcessPauseFlag = 1 << 4, 86 ProcessPauseFlag = 1 << 4,
87 ThreadPauseFlag = 1 << 5, 87 ThreadPauseFlag = 1 << 5,
88 ProcessDebugPauseFlag = 1 << 6, 88 ProcessDebugPauseFlag = 1 << 6,
89 KernelInitPauseFlag = 1 << 8, 89 KernelInitPauseFlag = 1 << 8,
90}; 90};
91 91
92enum ThreadSchedMasks : u32 { 92enum class ThreadSchedMasks : u32 {
93 LowMask = 0x000f, 93 LowMask = 0x000f,
94 HighMask = 0xfff0, 94 HighMask = 0xfff0,
95 ForcePauseMask = 0x0070, 95 ForcePauseMask = 0x0070,
96}; 96};
97 97
98enum class CoreFlags : s32 {
99 IgnoreIdealCore = -1,
100 ProcessIdealCore = -2,
101 DontChangeIdealCore = -3,
102};
103
98class Thread final : public WaitObject { 104class Thread final : public WaitObject {
99public: 105public:
100 using MutexWaitingThreads = std::vector<SharedPtr<Thread>>; 106 using MutexWaitingThreads = std::vector<SharedPtr<Thread>>;
@@ -428,7 +434,8 @@ public:
428 } 434 }
429 435
430 ThreadSchedStatus GetSchedulingStatus() const { 436 ThreadSchedStatus GetSchedulingStatus() const {
431 return static_cast<ThreadSchedStatus>(scheduling_state & ThreadSchedMasks::LowMask); 437 return static_cast<ThreadSchedStatus>(scheduling_state &
438 static_cast<u32>(ThreadSchedMasks::LowMask));
432 } 439 }
433 440
434 bool IsRunning() const { 441 bool IsRunning() const {
@@ -471,7 +478,8 @@ private:
471 478
472 u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. 479 u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
473 u64 last_running_ticks = 0; ///< CPU tick when thread was last running 480 u64 last_running_ticks = 0; ///< CPU tick when thread was last running
474 u64 yield_count = 0; ///< Number of innecessaries yields occured. 481 u64 yield_count = 0; ///< Number of redundant yields carried by this thread.
482 ///< a redundant yield is one where no scheduling is changed
475 483
476 s32 processor_id = 0; 484 s32 processor_id = 0;
477 485