summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar bunnei2020-12-05 00:02:30 -0800
committerGravatar bunnei2020-12-06 00:27:13 -0800
commit960500cfd2558c52597fff69c1bb0ea38d922b6a (patch)
treebe424ba265693bb65959a1e71d28238027d7a278 /src
parenthle: kernel: KPriorityQueue: Various style fixes based on code review feedback. (diff)
downloadyuzu-960500cfd2558c52597fff69c1bb0ea38d922b6a.tar.gz
yuzu-960500cfd2558c52597fff69c1bb0ea38d922b6a.tar.xz
yuzu-960500cfd2558c52597fff69c1bb0ea38d922b6a.zip
hle: kernel: KScheduler: Various style fixes based on code review feedback.
Diffstat (limited to 'src')
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp42
-rw-r--r--src/core/hle/kernel/k_scheduler.h49
2 files changed, 41 insertions, 50 deletions
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index cc2f8ef0e..c5fd82a6b 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -29,8 +29,8 @@ static void IncrementScheduledCount(Kernel::Thread* thread) {
29 } 29 }
30} 30}
31 31
32/*static*/ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, 32void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
33 Core::EmuThreadHandle global_thread) { 33 Core::EmuThreadHandle global_thread) {
34 u32 current_core = global_thread.host_handle; 34 u32 current_core = global_thread.host_handle;
35 bool must_context_switch = global_thread.guest_handle != InvalidHandle && 35 bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
36 (current_core < Core::Hardware::NUM_CPU_CORES); 36 (current_core < Core::Hardware::NUM_CPU_CORES);
@@ -81,7 +81,7 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
81 } 81 }
82} 82}
83 83
84/*static*/ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { 84u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
85 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 85 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
86 86
87 // Clear that we need to update. 87 // Clear that we need to update.
@@ -94,7 +94,7 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
94 /// We want to go over all cores, finding the highest priority thread and determining if 94 /// We want to go over all cores, finding the highest priority thread and determining if
95 /// scheduling is needed for that core. 95 /// scheduling is needed for that core.
96 for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { 96 for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
97 Thread* top_thread = priority_queue.GetScheduledFront((s32)core_id); 97 Thread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
98 if (top_thread != nullptr) { 98 if (top_thread != nullptr) {
99 // If the thread has no waiters, we need to check if the process has a thread pinned. 99 // If the thread has no waiters, we need to check if the process has a thread pinned.
100 // TODO(bunnei): Implement thread pinning 100 // TODO(bunnei): Implement thread pinning
@@ -180,8 +180,7 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
180 return cores_needing_scheduling; 180 return cores_needing_scheduling;
181} 181}
182 182
183/*static*/ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, 183void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state) {
184 u32 old_state) {
185 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 184 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
186 185
187 // Check if the state has changed, because if it hasn't there's nothing to do. 186 // Check if the state has changed, because if it hasn't there's nothing to do.
@@ -204,8 +203,8 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
204 } 203 }
205} 204}
206 205
207/*static*/ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, 206void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
208 Thread* current_thread, u32 old_priority) { 207 u32 old_priority) {
209 208
210 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 209 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
211 210
@@ -218,9 +217,8 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
218 } 217 }
219} 218}
220 219
221/*static*/ void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, 220void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
222 const KAffinityMask& old_affinity, 221 const KAffinityMask& old_affinity, s32 old_core) {
223 s32 old_core) {
224 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 222 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
225 223
226 // If the thread is runnable, we want to change its affinity in the queue. 224 // If the thread is runnable, we want to change its affinity in the queue.
@@ -331,38 +329,38 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
331 SetSchedulerUpdateNeeded(kernel); 329 SetSchedulerUpdateNeeded(kernel);
332} 330}
333 331
334/*static*/ bool KScheduler::CanSchedule(KernelCore& kernel) { 332bool KScheduler::CanSchedule(KernelCore& kernel) {
335 return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1; 333 return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1;
336} 334}
337 335
338/*static*/ bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) { 336bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
339 return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire); 337 return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire);
340} 338}
341 339
342/*static*/ void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) { 340void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) {
343 kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release); 341 kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release);
344} 342}
345 343
346/*static*/ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { 344void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
347 kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release); 345 kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release);
348} 346}
349 347
350/*static*/ void KScheduler::DisableScheduling(KernelCore& kernel) { 348void KScheduler::DisableScheduling(KernelCore& kernel) {
351 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { 349 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
352 ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0); 350 ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0);
353 scheduler->GetCurrentThread()->DisableDispatch(); 351 scheduler->GetCurrentThread()->DisableDispatch();
354 } 352 }
355} 353}
356 354
357/*static*/ void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, 355void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
358 Core::EmuThreadHandle global_thread) { 356 Core::EmuThreadHandle global_thread) {
359 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { 357 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
360 scheduler->GetCurrentThread()->EnableDispatch(); 358 scheduler->GetCurrentThread()->EnableDispatch();
361 } 359 }
362 RescheduleCores(kernel, cores_needing_scheduling, global_thread); 360 RescheduleCores(kernel, cores_needing_scheduling, global_thread);
363} 361}
364 362
365/*static*/ u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { 363u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
366 if (IsSchedulerUpdateNeeded(kernel)) { 364 if (IsSchedulerUpdateNeeded(kernel)) {
367 return UpdateHighestPriorityThreadsImpl(kernel); 365 return UpdateHighestPriorityThreadsImpl(kernel);
368 } else { 366 } else {
@@ -370,7 +368,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
370 } 368 }
371} 369}
372 370
373/*static*/ KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) { 371KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) {
374 return kernel.GlobalSchedulerContext().priority_queue; 372 return kernel.GlobalSchedulerContext().priority_queue;
375} 373}
376 374
@@ -585,7 +583,7 @@ void KScheduler::YieldToAnyThread() {
585 583
586KScheduler::KScheduler(Core::System& system, std::size_t core_id) 584KScheduler::KScheduler(Core::System& system, std::size_t core_id)
587 : system(system), core_id(core_id) { 585 : system(system), core_id(core_id) {
588 switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this); 586 switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this);
589 this->state.needs_scheduling = true; 587 this->state.needs_scheduling = true;
590 this->state.interrupt_task_thread_runnable = false; 588 this->state.interrupt_task_thread_runnable = false;
591 this->state.should_count_idle = false; 589 this->state.should_count_idle = false;
@@ -722,7 +720,7 @@ void KScheduler::SwitchToCurrent() {
722 } 720 }
723 const auto is_switch_pending = [this] { 721 const auto is_switch_pending = [this] {
724 std::scoped_lock lock{guard}; 722 std::scoped_lock lock{guard};
725 return !!this->state.needs_scheduling; 723 return state.needs_scheduling.load(std::memory_order_relaxed);
726 }; 724 };
727 do { 725 do {
728 if (current_thread != nullptr && !current_thread->IsHLEThread()) { 726 if (current_thread != nullptr && !current_thread->IsHLEThread()) {
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index d52ecc0db..e84abc84c 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -51,32 +51,28 @@ public:
51 void Reload(Thread* thread); 51 void Reload(Thread* thread);
52 52
53 /// Gets the current running thread 53 /// Gets the current running thread
54 Thread* GetCurrentThread() const; 54 [[nodiscard]] Thread* GetCurrentThread() const;
55 55
56 /// Gets the timestamp for the last context switch in ticks. 56 /// Gets the timestamp for the last context switch in ticks.
57 u64 GetLastContextSwitchTicks() const; 57 [[nodiscard]] u64 GetLastContextSwitchTicks() const;
58 58
59 bool ContextSwitchPending() const { 59 [[nodiscard]] bool ContextSwitchPending() const {
60 return this->state.needs_scheduling; 60 return state.needs_scheduling.load(std::memory_order_relaxed);
61 } 61 }
62 62
63 void Initialize(); 63 void Initialize();
64 64
65 void OnThreadStart(); 65 void OnThreadStart();
66 66
67 std::shared_ptr<Common::Fiber>& ControlContext() { 67 [[nodiscard]] std::shared_ptr<Common::Fiber>& ControlContext() {
68 return switch_fiber; 68 return switch_fiber;
69 } 69 }
70 70
71 const std::shared_ptr<Common::Fiber>& ControlContext() const { 71 [[nodiscard]] const std::shared_ptr<Common::Fiber>& ControlContext() const {
72 return switch_fiber; 72 return switch_fiber;
73 } 73 }
74 74
75 std::size_t CurrentCoreId() const { 75 [[nodiscard]] u64 UpdateHighestPriorityThread(Thread* highest_thread);
76 return core_id;
77 }
78
79 u64 UpdateHighestPriorityThread(Thread* highest_thread);
80 76
81 /** 77 /**
82 * Takes a thread and moves it to the back of the it's priority list. 78 * Takes a thread and moves it to the back of the it's priority list.
@@ -114,7 +110,18 @@ public:
114 static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, 110 static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
115 const KAffinityMask& old_affinity, s32 old_core); 111 const KAffinityMask& old_affinity, s32 old_core);
116 112
113 static bool CanSchedule(KernelCore& kernel);
114 static bool IsSchedulerUpdateNeeded(const KernelCore& kernel);
115 static void SetSchedulerUpdateNeeded(KernelCore& kernel);
116 static void ClearSchedulerUpdateNeeded(KernelCore& kernel);
117 static void DisableScheduling(KernelCore& kernel);
118 static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
119 Core::EmuThreadHandle global_thread);
120 [[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
121
117private: 122private:
123 friend class GlobalSchedulerContext;
124
118 /** 125 /**
119 * Takes care of selecting the new scheduled threads in three steps: 126 * Takes care of selecting the new scheduled threads in three steps:
120 * 127 *
@@ -129,24 +136,11 @@ private:
129 * 136 *
130 * returns the cores needing scheduling. 137 * returns the cores needing scheduling.
131 */ 138 */
132 static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel); 139 [[nodiscard]] static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
133 140
134 void RotateScheduledQueue(s32 core_id, s32 priority); 141 [[nodiscard]] static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel);
135 142
136public: 143 void RotateScheduledQueue(s32 core_id, s32 priority);
137 static bool CanSchedule(KernelCore& kernel);
138 static bool IsSchedulerUpdateNeeded(const KernelCore& kernel);
139 static void SetSchedulerUpdateNeeded(KernelCore& kernel);
140 static void ClearSchedulerUpdateNeeded(KernelCore& kernel);
141 static void DisableScheduling(KernelCore& kernel);
142 static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
143 Core::EmuThreadHandle global_thread);
144 static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
145
146private:
147 friend class GlobalSchedulerContext;
148
149 static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel);
150 144
151 void Schedule() { 145 void Schedule() {
152 ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1); 146 ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
@@ -175,7 +169,6 @@ private:
175 static void OnSwitch(void* this_scheduler); 169 static void OnSwitch(void* this_scheduler);
176 void SwitchToCurrent(); 170 void SwitchToCurrent();
177 171
178private:
179 Thread* current_thread{}; 172 Thread* current_thread{};
180 Thread* idle_thread{}; 173 Thread* idle_thread{};
181 174