diff options
| author | 2020-12-04 23:37:35 -0800 | |
|---|---|---|
| committer | 2020-12-06 00:03:24 -0800 | |
| commit | b1326d9230f7c1f33960d2816042b1a41d3b839f (patch) | |
| tree | b75c28a51c1d5612d829c8bd35373844ef644fa3 /src/core/hle/kernel | |
| parent | kernel: KScopedSchedulerLockAndSleep: Remove unused ctor. (diff) | |
| download | yuzu-b1326d9230f7c1f33960d2816042b1a41d3b839f.tar.gz yuzu-b1326d9230f7c1f33960d2816042b1a41d3b839f.tar.xz yuzu-b1326d9230f7c1f33960d2816042b1a41d3b839f.zip | |
hle: kernel: Use C++ style comments in KScheduler, etc.
Diffstat (limited to 'src/core/hle/kernel')
| -rw-r--r-- | src/core/hle/kernel/k_priority_queue.h | 63 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 203 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler_lock.h | 16 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h | 6 |
4 files changed, 136 insertions, 152 deletions
diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h index 88e4e1ed4..d374f5c00 100644 --- a/src/core/hle/kernel/k_priority_queue.h +++ b/src/core/hle/kernel/k_priority_queue.h | |||
| @@ -87,15 +87,15 @@ public: | |||
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | constexpr bool PushBack(s32 core, Member* member) { | 89 | constexpr bool PushBack(s32 core, Member* member) { |
| 90 | /* Get the entry associated with the member. */ | 90 | // Get the entry associated with the member. |
| 91 | Entry& member_entry = member->GetPriorityQueueEntry(core); | 91 | Entry& member_entry = member->GetPriorityQueueEntry(core); |
| 92 | 92 | ||
| 93 | /* Get the entry associated with the end of the queue. */ | 93 | // Get the entry associated with the end of the queue. |
| 94 | Member* tail = this->root[core].GetPrev(); | 94 | Member* tail = this->root[core].GetPrev(); |
| 95 | Entry& tail_entry = | 95 | Entry& tail_entry = |
| 96 | (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core]; | 96 | (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core]; |
| 97 | 97 | ||
| 98 | /* Link the entries. */ | 98 | // Link the entries. |
| 99 | member_entry.SetPrev(tail); | 99 | member_entry.SetPrev(tail); |
| 100 | member_entry.SetNext(nullptr); | 100 | member_entry.SetNext(nullptr); |
| 101 | tail_entry.SetNext(member); | 101 | tail_entry.SetNext(member); |
| @@ -105,15 +105,15 @@ public: | |||
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | constexpr bool PushFront(s32 core, Member* member) { | 107 | constexpr bool PushFront(s32 core, Member* member) { |
| 108 | /* Get the entry associated with the member. */ | 108 | // Get the entry associated with the member. |
| 109 | Entry& member_entry = member->GetPriorityQueueEntry(core); | 109 | Entry& member_entry = member->GetPriorityQueueEntry(core); |
| 110 | 110 | ||
| 111 | /* Get the entry associated with the front of the queue. */ | 111 | // Get the entry associated with the front of the queue. |
| 112 | Member* head = this->root[core].GetNext(); | 112 | Member* head = this->root[core].GetNext(); |
| 113 | Entry& head_entry = | 113 | Entry& head_entry = |
| 114 | (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core]; | 114 | (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core]; |
| 115 | 115 | ||
| 116 | /* Link the entries. */ | 116 | // Link the entries. |
| 117 | member_entry.SetPrev(nullptr); | 117 | member_entry.SetPrev(nullptr); |
| 118 | member_entry.SetNext(head); | 118 | member_entry.SetNext(head); |
| 119 | head_entry.SetPrev(member); | 119 | head_entry.SetPrev(member); |
| @@ -123,10 +123,10 @@ public: | |||
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | constexpr bool Remove(s32 core, Member* member) { | 125 | constexpr bool Remove(s32 core, Member* member) { |
| 126 | /* Get the entry associated with the member. */ | 126 | // Get the entry associated with the member. |
| 127 | Entry& member_entry = member->GetPriorityQueueEntry(core); | 127 | Entry& member_entry = member->GetPriorityQueueEntry(core); |
| 128 | 128 | ||
| 129 | /* Get the entries associated with next and prev. */ | 129 | // Get the entries associated with next and prev. |
| 130 | Member* prev = member_entry.GetPrev(); | 130 | Member* prev = member_entry.GetPrev(); |
| 131 | Member* next = member_entry.GetNext(); | 131 | Member* next = member_entry.GetNext(); |
| 132 | Entry& prev_entry = | 132 | Entry& prev_entry = |
| @@ -134,7 +134,7 @@ public: | |||
| 134 | Entry& next_entry = | 134 | Entry& next_entry = |
| 135 | (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core]; | 135 | (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core]; |
| 136 | 136 | ||
| 137 | /* Unlink. */ | 137 | // Unlink. |
| 138 | prev_entry.SetNext(next); | 138 | prev_entry.SetNext(next); |
| 139 | next_entry.SetPrev(prev); | 139 | next_entry.SetPrev(prev); |
| 140 | 140 | ||
| @@ -152,8 +152,7 @@ public: | |||
| 152 | Common::BitSet64<NumPriority> available_priorities[NumCores]; | 152 | Common::BitSet64<NumPriority> available_priorities[NumCores]; |
| 153 | 153 | ||
| 154 | public: | 154 | public: |
| 155 | constexpr KPriorityQueueImpl() : queues(), available_priorities() { /* ... */ | 155 | constexpr KPriorityQueueImpl() : queues(), available_priorities() {} |
| 156 | } | ||
| 157 | 156 | ||
| 158 | constexpr void PushBack(s32 priority, s32 core, Member* member) { | 157 | constexpr void PushBack(s32 priority, s32 core, Member* member) { |
| 159 | ASSERT(IsValidCore(core)); | 158 | ASSERT(IsValidCore(core)); |
| @@ -267,14 +266,14 @@ private: | |||
| 267 | constexpr void PushBack(s32 priority, Member* member) { | 266 | constexpr void PushBack(s32 priority, Member* member) { |
| 268 | ASSERT(IsValidPriority(priority)); | 267 | ASSERT(IsValidPriority(priority)); |
| 269 | 268 | ||
| 270 | /* Push onto the scheduled queue for its core, if we can. */ | 269 | // Push onto the scheduled queue for its core, if we can. |
| 271 | u64 affinity = member->GetAffinityMask().GetAffinityMask(); | 270 | u64 affinity = member->GetAffinityMask().GetAffinityMask(); |
| 272 | if (const s32 core = member->GetActiveCore(); core >= 0) { | 271 | if (const s32 core = member->GetActiveCore(); core >= 0) { |
| 273 | this->scheduled_queue.PushBack(priority, core, member); | 272 | this->scheduled_queue.PushBack(priority, core, member); |
| 274 | ClearAffinityBit(affinity, core); | 273 | ClearAffinityBit(affinity, core); |
| 275 | } | 274 | } |
| 276 | 275 | ||
| 277 | /* And suggest the thread for all other cores. */ | 276 | // And suggest the thread for all other cores. |
| 278 | while (affinity) { | 277 | while (affinity) { |
| 279 | this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); | 278 | this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); |
| 280 | } | 279 | } |
| @@ -283,15 +282,15 @@ private: | |||
| 283 | constexpr void PushFront(s32 priority, Member* member) { | 282 | constexpr void PushFront(s32 priority, Member* member) { |
| 284 | ASSERT(IsValidPriority(priority)); | 283 | ASSERT(IsValidPriority(priority)); |
| 285 | 284 | ||
| 286 | /* Push onto the scheduled queue for its core, if we can. */ | 285 | // Push onto the scheduled queue for its core, if we can. |
| 287 | u64 affinity = member->GetAffinityMask().GetAffinityMask(); | 286 | u64 affinity = member->GetAffinityMask().GetAffinityMask(); |
| 288 | if (const s32 core = member->GetActiveCore(); core >= 0) { | 287 | if (const s32 core = member->GetActiveCore(); core >= 0) { |
| 289 | this->scheduled_queue.PushFront(priority, core, member); | 288 | this->scheduled_queue.PushFront(priority, core, member); |
| 290 | ClearAffinityBit(affinity, core); | 289 | ClearAffinityBit(affinity, core); |
| 291 | } | 290 | } |
| 292 | 291 | ||
| 293 | /* And suggest the thread for all other cores. */ | 292 | // And suggest the thread for all other cores. |
| 294 | /* Note: Nintendo pushes onto the back of the suggested queue, not the front. */ | 293 | // Note: Nintendo pushes onto the back of the suggested queue, not the front. |
| 295 | while (affinity) { | 294 | while (affinity) { |
| 296 | this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); | 295 | this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); |
| 297 | } | 296 | } |
| @@ -300,24 +299,24 @@ private: | |||
| 300 | constexpr void Remove(s32 priority, Member* member) { | 299 | constexpr void Remove(s32 priority, Member* member) { |
| 301 | ASSERT(IsValidPriority(priority)); | 300 | ASSERT(IsValidPriority(priority)); |
| 302 | 301 | ||
| 303 | /* Remove from the scheduled queue for its core. */ | 302 | // Remove from the scheduled queue for its core. |
| 304 | u64 affinity = member->GetAffinityMask().GetAffinityMask(); | 303 | u64 affinity = member->GetAffinityMask().GetAffinityMask(); |
| 305 | if (const s32 core = member->GetActiveCore(); core >= 0) { | 304 | if (const s32 core = member->GetActiveCore(); core >= 0) { |
| 306 | this->scheduled_queue.Remove(priority, core, member); | 305 | this->scheduled_queue.Remove(priority, core, member); |
| 307 | ClearAffinityBit(affinity, core); | 306 | ClearAffinityBit(affinity, core); |
| 308 | } | 307 | } |
| 309 | 308 | ||
| 310 | /* Remove from the suggested queue for all other cores. */ | 309 | // Remove from the suggested queue for all other cores. |
| 311 | while (affinity) { | 310 | while (affinity) { |
| 312 | this->suggested_queue.Remove(priority, GetNextCore(affinity), member); | 311 | this->suggested_queue.Remove(priority, GetNextCore(affinity), member); |
| 313 | } | 312 | } |
| 314 | } | 313 | } |
| 315 | 314 | ||
| 316 | public: | 315 | public: |
| 317 | constexpr KPriorityQueue() : scheduled_queue(), suggested_queue() { /* ... */ | 316 | constexpr KPriorityQueue() : scheduled_queue(), suggested_queue() { // ... |
| 318 | } | 317 | } |
| 319 | 318 | ||
| 320 | /* Getters. */ | 319 | // Getters. |
| 321 | constexpr Member* GetScheduledFront(s32 core) const { | 320 | constexpr Member* GetScheduledFront(s32 core) const { |
| 322 | return this->scheduled_queue.GetFront(core); | 321 | return this->scheduled_queue.GetFront(core); |
| 323 | } | 322 | } |
| @@ -346,7 +345,7 @@ public: | |||
| 346 | return member->GetPriorityQueueEntry(core).GetNext(); | 345 | return member->GetPriorityQueueEntry(core).GetNext(); |
| 347 | } | 346 | } |
| 348 | 347 | ||
| 349 | /* Mutators. */ | 348 | // Mutators. |
| 350 | constexpr void PushBack(Member* member) { | 349 | constexpr void PushBack(Member* member) { |
| 351 | this->PushBack(member->GetPriority(), member); | 350 | this->PushBack(member->GetPriority(), member); |
| 352 | } | 351 | } |
| @@ -364,15 +363,15 @@ public: | |||
| 364 | member); | 363 | member); |
| 365 | } | 364 | } |
| 366 | 365 | ||
| 367 | /* First class fancy operations. */ | 366 | // First class fancy operations. |
| 368 | constexpr void ChangePriority(s32 prev_priority, bool is_running, Member* member) { | 367 | constexpr void ChangePriority(s32 prev_priority, bool is_running, Member* member) { |
| 369 | ASSERT(IsValidPriority(prev_priority)); | 368 | ASSERT(IsValidPriority(prev_priority)); |
| 370 | 369 | ||
| 371 | /* Remove the member from the queues. */ | 370 | // Remove the member from the queues. |
| 372 | const s32 new_priority = member->GetPriority(); | 371 | const s32 new_priority = member->GetPriority(); |
| 373 | this->Remove(prev_priority, member); | 372 | this->Remove(prev_priority, member); |
| 374 | 373 | ||
| 375 | /* And enqueue. If the member is running, we want to keep it running. */ | 374 | // And enqueue. If the member is running, we want to keep it running. |
| 376 | if (is_running) { | 375 | if (is_running) { |
| 377 | this->PushFront(new_priority, member); | 376 | this->PushFront(new_priority, member); |
| 378 | } else { | 377 | } else { |
| @@ -382,12 +381,12 @@ public: | |||
| 382 | 381 | ||
| 383 | constexpr void ChangeAffinityMask(s32 prev_core, const AffinityMaskType& prev_affinity, | 382 | constexpr void ChangeAffinityMask(s32 prev_core, const AffinityMaskType& prev_affinity, |
| 384 | Member* member) { | 383 | Member* member) { |
| 385 | /* Get the new information. */ | 384 | // Get the new information. |
| 386 | const s32 priority = member->GetPriority(); | 385 | const s32 priority = member->GetPriority(); |
| 387 | const AffinityMaskType& new_affinity = member->GetAffinityMask(); | 386 | const AffinityMaskType& new_affinity = member->GetAffinityMask(); |
| 388 | const s32 new_core = member->GetActiveCore(); | 387 | const s32 new_core = member->GetActiveCore(); |
| 389 | 388 | ||
| 390 | /* Remove the member from all queues it was in before. */ | 389 | // Remove the member from all queues it was in before. |
| 391 | for (s32 core = 0; core < static_cast<s32>(NumCores); core++) { | 390 | for (s32 core = 0; core < static_cast<s32>(NumCores); core++) { |
| 392 | if (prev_affinity.GetAffinity(core)) { | 391 | if (prev_affinity.GetAffinity(core)) { |
| 393 | if (core == prev_core) { | 392 | if (core == prev_core) { |
| @@ -398,7 +397,7 @@ public: | |||
| 398 | } | 397 | } |
| 399 | } | 398 | } |
| 400 | 399 | ||
| 401 | /* And add the member to all queues it should be in now. */ | 400 | // And add the member to all queues it should be in now. |
| 402 | for (s32 core = 0; core < static_cast<s32>(NumCores); core++) { | 401 | for (s32 core = 0; core < static_cast<s32>(NumCores); core++) { |
| 403 | if (new_affinity.GetAffinity(core)) { | 402 | if (new_affinity.GetAffinity(core)) { |
| 404 | if (core == new_core) { | 403 | if (core == new_core) { |
| @@ -411,18 +410,18 @@ public: | |||
| 411 | } | 410 | } |
| 412 | 411 | ||
| 413 | constexpr void ChangeCore(s32 prev_core, Member* member, bool to_front = false) { | 412 | constexpr void ChangeCore(s32 prev_core, Member* member, bool to_front = false) { |
| 414 | /* Get the new information. */ | 413 | // Get the new information. |
| 415 | const s32 new_core = member->GetActiveCore(); | 414 | const s32 new_core = member->GetActiveCore(); |
| 416 | const s32 priority = member->GetPriority(); | 415 | const s32 priority = member->GetPriority(); |
| 417 | 416 | ||
| 418 | /* We don't need to do anything if the core is the same. */ | 417 | // We don't need to do anything if the core is the same. |
| 419 | if (prev_core != new_core) { | 418 | if (prev_core != new_core) { |
| 420 | /* Remove from the scheduled queue for the previous core. */ | 419 | // Remove from the scheduled queue for the previous core. |
| 421 | if (prev_core >= 0) { | 420 | if (prev_core >= 0) { |
| 422 | this->scheduled_queue.Remove(priority, prev_core, member); | 421 | this->scheduled_queue.Remove(priority, prev_core, member); |
| 423 | } | 422 | } |
| 424 | 423 | ||
| 425 | /* Remove from the suggested queue and add to the scheduled queue for the new core. */ | 424 | // Remove from the suggested queue and add to the scheduled queue for the new core. |
| 426 | if (new_core >= 0) { | 425 | if (new_core >= 0) { |
| 427 | this->suggested_queue.Remove(priority, new_core, member); | 426 | this->suggested_queue.Remove(priority, new_core, member); |
| 428 | if (to_front) { | 427 | if (to_front) { |
| @@ -432,7 +431,7 @@ public: | |||
| 432 | } | 431 | } |
| 433 | } | 432 | } |
| 434 | 433 | ||
| 435 | /* Add to the suggested queue for the previous core. */ | 434 | // Add to the suggested queue for the previous core. |
| 436 | if (prev_core >= 0) { | 435 | if (prev_core >= 0) { |
| 437 | this->suggested_queue.PushBack(priority, prev_core, member); | 436 | this->suggested_queue.PushBack(priority, prev_core, member); |
| 438 | } | 437 | } |
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 9645fee22..cc2f8ef0e 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -84,35 +84,20 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { | |||
| 84 | /*static*/ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { | 84 | /*static*/ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { |
| 85 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | 85 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); |
| 86 | 86 | ||
| 87 | /* Clear that we need to update. */ | 87 | // Clear that we need to update. |
| 88 | ClearSchedulerUpdateNeeded(kernel); | 88 | ClearSchedulerUpdateNeeded(kernel); |
| 89 | 89 | ||
| 90 | u64 cores_needing_scheduling = 0, idle_cores = 0; | 90 | u64 cores_needing_scheduling = 0, idle_cores = 0; |
| 91 | Thread* top_threads[Core::Hardware::NUM_CPU_CORES]; | 91 | Thread* top_threads[Core::Hardware::NUM_CPU_CORES]; |
| 92 | auto& priority_queue = GetPriorityQueue(kernel); | 92 | auto& priority_queue = GetPriorityQueue(kernel); |
| 93 | 93 | ||
| 94 | /* We want to go over all cores, finding the highest priority thread and determining if | 94 | /// We want to go over all cores, finding the highest priority thread and determining if |
| 95 | * scheduling is needed for that core. */ | 95 | /// scheduling is needed for that core. |
| 96 | for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | 96 | for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { |
| 97 | Thread* top_thread = priority_queue.GetScheduledFront((s32)core_id); | 97 | Thread* top_thread = priority_queue.GetScheduledFront((s32)core_id); |
| 98 | if (top_thread != nullptr) { | 98 | if (top_thread != nullptr) { |
| 99 | ///* If the thread has no waiters, we need to check if the process has a thread pinned. | 99 | // If the thread has no waiters, we need to check if the process has a thread pinned. |
| 100 | ///*/ | 100 | // TODO(bunnei): Implement thread pinning |
| 101 | // if (top_thread->GetNumKernelWaiters() == 0) { | ||
| 102 | // if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) { | ||
| 103 | // if (Thread* pinned = parent->GetPinnedThread(core_id); | ||
| 104 | // pinned != nullptr && pinned != top_thread) { | ||
| 105 | // /* We prefer our parent's pinned thread if possible. However, we also | ||
| 106 | // don't | ||
| 107 | // * want to schedule un-runnable threads. */ | ||
| 108 | // if (pinned->GetRawState() == Thread::ThreadState_Runnable) { | ||
| 109 | // top_thread = pinned; | ||
| 110 | // } else { | ||
| 111 | // top_thread = nullptr; | ||
| 112 | // } | ||
| 113 | // } | ||
| 114 | // } | ||
| 115 | //} | ||
| 116 | } else { | 101 | } else { |
| 117 | idle_cores |= (1ULL << core_id); | 102 | idle_cores |= (1ULL << core_id); |
| 118 | } | 103 | } |
| @@ -122,27 +107,27 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { | |||
| 122 | kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]); | 107 | kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]); |
| 123 | } | 108 | } |
| 124 | 109 | ||
| 125 | /* Idle cores are bad. We're going to try to migrate threads to each idle core in turn. */ | 110 | // Idle cores are bad. We're going to try to migrate threads to each idle core in turn. |
| 126 | while (idle_cores != 0) { | 111 | while (idle_cores != 0) { |
| 127 | u32 core_id = Common::CountTrailingZeroes64(idle_cores); | 112 | u32 core_id = Common::CountTrailingZeroes64(idle_cores); |
| 128 | if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) { | 113 | if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) { |
| 129 | s32 migration_candidates[Core::Hardware::NUM_CPU_CORES]; | 114 | s32 migration_candidates[Core::Hardware::NUM_CPU_CORES]; |
| 130 | size_t num_candidates = 0; | 115 | size_t num_candidates = 0; |
| 131 | 116 | ||
| 132 | /* While we have a suggested thread, try to migrate it! */ | 117 | // While we have a suggested thread, try to migrate it! |
| 133 | while (suggested != nullptr) { | 118 | while (suggested != nullptr) { |
| 134 | /* Check if the suggested thread is the top thread on its core. */ | 119 | // Check if the suggested thread is the top thread on its core. |
| 135 | const s32 suggested_core = suggested->GetActiveCore(); | 120 | const s32 suggested_core = suggested->GetActiveCore(); |
| 136 | if (Thread* top_thread = | 121 | if (Thread* top_thread = |
| 137 | (suggested_core >= 0) ? top_threads[suggested_core] : nullptr; | 122 | (suggested_core >= 0) ? top_threads[suggested_core] : nullptr; |
| 138 | top_thread != suggested) { | 123 | top_thread != suggested) { |
| 139 | /* Make sure we're not dealing with threads too high priority for migration. */ | 124 | // Make sure we're not dealing with threads too high priority for migration. |
| 140 | if (top_thread != nullptr && | 125 | if (top_thread != nullptr && |
| 141 | top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) { | 126 | top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) { |
| 142 | break; | 127 | break; |
| 143 | } | 128 | } |
| 144 | 129 | ||
| 145 | /* The suggested thread isn't bound to its core, so we can migrate it! */ | 130 | // The suggested thread isn't bound to its core, so we can migrate it! |
| 146 | suggested->SetActiveCore(core_id); | 131 | suggested->SetActiveCore(core_id); |
| 147 | priority_queue.ChangeCore(suggested_core, suggested); | 132 | priority_queue.ChangeCore(suggested_core, suggested); |
| 148 | 133 | ||
| @@ -152,30 +137,30 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { | |||
| 152 | break; | 137 | break; |
| 153 | } | 138 | } |
| 154 | 139 | ||
| 155 | /* Note this core as a candidate for migration. */ | 140 | // Note this core as a candidate for migration. |
| 156 | ASSERT(num_candidates < Core::Hardware::NUM_CPU_CORES); | 141 | ASSERT(num_candidates < Core::Hardware::NUM_CPU_CORES); |
| 157 | migration_candidates[num_candidates++] = suggested_core; | 142 | migration_candidates[num_candidates++] = suggested_core; |
| 158 | suggested = priority_queue.GetSuggestedNext(core_id, suggested); | 143 | suggested = priority_queue.GetSuggestedNext(core_id, suggested); |
| 159 | } | 144 | } |
| 160 | 145 | ||
| 161 | /* If suggested is nullptr, we failed to migrate a specific thread. So let's try all our | 146 | // If suggested is nullptr, we failed to migrate a specific thread. So let's try all our |
| 162 | * candidate cores' top threads. */ | 147 | // candidate cores' top threads. |
| 163 | if (suggested == nullptr) { | 148 | if (suggested == nullptr) { |
| 164 | for (size_t i = 0; i < num_candidates; i++) { | 149 | for (size_t i = 0; i < num_candidates; i++) { |
| 165 | /* Check if there's some other thread that can run on the candidate core. */ | 150 | // Check if there's some other thread that can run on the candidate core. |
| 166 | const s32 candidate_core = migration_candidates[i]; | 151 | const s32 candidate_core = migration_candidates[i]; |
| 167 | suggested = top_threads[candidate_core]; | 152 | suggested = top_threads[candidate_core]; |
| 168 | if (Thread* next_on_candidate_core = | 153 | if (Thread* next_on_candidate_core = |
| 169 | priority_queue.GetScheduledNext(candidate_core, suggested); | 154 | priority_queue.GetScheduledNext(candidate_core, suggested); |
| 170 | next_on_candidate_core != nullptr) { | 155 | next_on_candidate_core != nullptr) { |
| 171 | /* The candidate core can run some other thread! We'll migrate its current | 156 | // The candidate core can run some other thread! We'll migrate its current |
| 172 | * top thread to us. */ | 157 | // top thread to us. |
| 173 | top_threads[candidate_core] = next_on_candidate_core; | 158 | top_threads[candidate_core] = next_on_candidate_core; |
| 174 | cores_needing_scheduling |= | 159 | cores_needing_scheduling |= |
| 175 | kernel.Scheduler(candidate_core) | 160 | kernel.Scheduler(candidate_core) |
| 176 | .UpdateHighestPriorityThread(top_threads[candidate_core]); | 161 | .UpdateHighestPriorityThread(top_threads[candidate_core]); |
| 177 | 162 | ||
| 178 | /* Perform the migration. */ | 163 | // Perform the migration. |
| 179 | suggested->SetActiveCore(core_id); | 164 | suggested->SetActiveCore(core_id); |
| 180 | priority_queue.ChangeCore(candidate_core, suggested); | 165 | priority_queue.ChangeCore(candidate_core, suggested); |
| 181 | 166 | ||
| @@ -199,20 +184,20 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { | |||
| 199 | u32 old_state) { | 184 | u32 old_state) { |
| 200 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | 185 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); |
| 201 | 186 | ||
| 202 | /* Check if the state has changed, because if it hasn't there's nothing to do. */ | 187 | // Check if the state has changed, because if it hasn't there's nothing to do. |
| 203 | const auto cur_state = thread->scheduling_state; | 188 | const auto cur_state = thread->scheduling_state; |
| 204 | if (cur_state == old_state) { | 189 | if (cur_state == old_state) { |
| 205 | return; | 190 | return; |
| 206 | } | 191 | } |
| 207 | 192 | ||
| 208 | /* Update the priority queues. */ | 193 | // Update the priority queues. |
| 209 | if (old_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { | 194 | if (old_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { |
| 210 | /* If we were previously runnable, then we're not runnable now, and we should remove. */ | 195 | // If we were previously runnable, then we're not runnable now, and we should remove. |
| 211 | GetPriorityQueue(kernel).Remove(thread); | 196 | GetPriorityQueue(kernel).Remove(thread); |
| 212 | IncrementScheduledCount(thread); | 197 | IncrementScheduledCount(thread); |
| 213 | SetSchedulerUpdateNeeded(kernel); | 198 | SetSchedulerUpdateNeeded(kernel); |
| 214 | } else if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { | 199 | } else if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { |
| 215 | /* If we're now runnable, then we weren't previously, and we should add. */ | 200 | // If we're now runnable, then we weren't previously, and we should add. |
| 216 | GetPriorityQueue(kernel).PushBack(thread); | 201 | GetPriorityQueue(kernel).PushBack(thread); |
| 217 | IncrementScheduledCount(thread); | 202 | IncrementScheduledCount(thread); |
| 218 | SetSchedulerUpdateNeeded(kernel); | 203 | SetSchedulerUpdateNeeded(kernel); |
| @@ -224,7 +209,7 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { | |||
| 224 | 209 | ||
| 225 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | 210 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); |
| 226 | 211 | ||
| 227 | /* If the thread is runnable, we want to change its priority in the queue. */ | 212 | // If the thread is runnable, we want to change its priority in the queue. |
| 228 | if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { | 213 | if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { |
| 229 | GetPriorityQueue(kernel).ChangePriority( | 214 | GetPriorityQueue(kernel).ChangePriority( |
| 230 | old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread); | 215 | old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread); |
| @@ -238,7 +223,7 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { | |||
| 238 | s32 old_core) { | 223 | s32 old_core) { |
| 239 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | 224 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); |
| 240 | 225 | ||
| 241 | /* If the thread is runnable, we want to change its affinity in the queue. */ | 226 | // If the thread is runnable, we want to change its affinity in the queue. |
| 242 | if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { | 227 | if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { |
| 243 | GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread); | 228 | GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread); |
| 244 | IncrementScheduledCount(thread); | 229 | IncrementScheduledCount(thread); |
| @@ -249,11 +234,11 @@ u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { | |||
| 249 | void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { | 234 | void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { |
| 250 | ASSERT(system.GlobalSchedulerContext().IsLocked()); | 235 | ASSERT(system.GlobalSchedulerContext().IsLocked()); |
| 251 | 236 | ||
| 252 | /* Get a reference to the priority queue. */ | 237 | // Get a reference to the priority queue. |
| 253 | auto& kernel = system.Kernel(); | 238 | auto& kernel = system.Kernel(); |
| 254 | auto& priority_queue = GetPriorityQueue(kernel); | 239 | auto& priority_queue = GetPriorityQueue(kernel); |
| 255 | 240 | ||
| 256 | /* Rotate the front of the queue to the end. */ | 241 | // Rotate the front of the queue to the end. |
| 257 | Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority); | 242 | Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority); |
| 258 | Thread* next_thread = nullptr; | 243 | Thread* next_thread = nullptr; |
| 259 | if (top_thread != nullptr) { | 244 | if (top_thread != nullptr) { |
| @@ -264,27 +249,27 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { | |||
| 264 | } | 249 | } |
| 265 | } | 250 | } |
| 266 | 251 | ||
| 267 | /* While we have a suggested thread, try to migrate it! */ | 252 | // While we have a suggested thread, try to migrate it! |
| 268 | { | 253 | { |
| 269 | Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority); | 254 | Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority); |
| 270 | while (suggested != nullptr) { | 255 | while (suggested != nullptr) { |
| 271 | /* Check if the suggested thread is the top thread on its core. */ | 256 | // Check if the suggested thread is the top thread on its core. |
| 272 | const s32 suggested_core = suggested->GetActiveCore(); | 257 | const s32 suggested_core = suggested->GetActiveCore(); |
| 273 | if (Thread* top_on_suggested_core = | 258 | if (Thread* top_on_suggested_core = |
| 274 | (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) | 259 | (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) |
| 275 | : nullptr; | 260 | : nullptr; |
| 276 | top_on_suggested_core != suggested) { | 261 | top_on_suggested_core != suggested) { |
| 277 | /* If the next thread is a new thread that has been waiting longer than our | 262 | // If the next thread is a new thread that has been waiting longer than our |
| 278 | * suggestion, we prefer it to our suggestion. */ | 263 | // suggestion, we prefer it to our suggestion. |
| 279 | if (top_thread != next_thread && next_thread != nullptr && | 264 | if (top_thread != next_thread && next_thread != nullptr && |
| 280 | next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) { | 265 | next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) { |
| 281 | suggested = nullptr; | 266 | suggested = nullptr; |
| 282 | break; | 267 | break; |
| 283 | } | 268 | } |
| 284 | 269 | ||
| 285 | /* If we're allowed to do a migration, do one. */ | 270 | // If we're allowed to do a migration, do one. |
| 286 | /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion | 271 | // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion |
| 287 | * to the front of the queue. */ | 272 | // to the front of the queue. |
| 288 | if (top_on_suggested_core == nullptr || | 273 | if (top_on_suggested_core == nullptr || |
| 289 | top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { | 274 | top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { |
| 290 | suggested->SetActiveCore(core_id); | 275 | suggested->SetActiveCore(core_id); |
| @@ -294,38 +279,38 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { | |||
| 294 | } | 279 | } |
| 295 | } | 280 | } |
| 296 | 281 | ||
| 297 | /* Get the next suggestion. */ | 282 | // Get the next suggestion. |
| 298 | suggested = priority_queue.GetSamePriorityNext(core_id, suggested); | 283 | suggested = priority_queue.GetSamePriorityNext(core_id, suggested); |
| 299 | } | 284 | } |
| 300 | } | 285 | } |
| 301 | 286 | ||
| 302 | /* Now that we might have migrated a thread with the same priority, check if we can do better. | 287 | // Now that we might have migrated a thread with the same priority, check if we can do better. |
| 303 | */ | 288 | |
| 304 | { | 289 | { |
| 305 | Thread* best_thread = priority_queue.GetScheduledFront(core_id); | 290 | Thread* best_thread = priority_queue.GetScheduledFront(core_id); |
| 306 | if (best_thread == GetCurrentThread()) { | 291 | if (best_thread == GetCurrentThread()) { |
| 307 | best_thread = priority_queue.GetScheduledNext(core_id, best_thread); | 292 | best_thread = priority_queue.GetScheduledNext(core_id, best_thread); |
| 308 | } | 293 | } |
| 309 | 294 | ||
| 310 | /* If the best thread we can choose has a priority the same or worse than ours, try to | 295 | // If the best thread we can choose has a priority the same or worse than ours, try to |
| 311 | * migrate a higher priority thread. */ | 296 | // migrate a higher priority thread. |
| 312 | if (best_thread != nullptr && best_thread->GetPriority() >= static_cast<u32>(priority)) { | 297 | if (best_thread != nullptr && best_thread->GetPriority() >= static_cast<u32>(priority)) { |
| 313 | Thread* suggested = priority_queue.GetSuggestedFront(core_id); | 298 | Thread* suggested = priority_queue.GetSuggestedFront(core_id); |
| 314 | while (suggested != nullptr) { | 299 | while (suggested != nullptr) { |
| 315 | /* If the suggestion's priority is the same as ours, don't bother. */ | 300 | // If the suggestion's priority is the same as ours, don't bother. |
| 316 | if (suggested->GetPriority() >= best_thread->GetPriority()) { | 301 | if (suggested->GetPriority() >= best_thread->GetPriority()) { |
| 317 | break; | 302 | break; |
| 318 | } | 303 | } |
| 319 | 304 | ||
| 320 | /* Check if the suggested thread is the top thread on its core. */ | 305 | // Check if the suggested thread is the top thread on its core. |
| 321 | const s32 suggested_core = suggested->GetActiveCore(); | 306 | const s32 suggested_core = suggested->GetActiveCore(); |
| 322 | if (Thread* top_on_suggested_core = | 307 | if (Thread* top_on_suggested_core = |
| 323 | (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) | 308 | (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) |
| 324 | : nullptr; | 309 | : nullptr; |
| 325 | top_on_suggested_core != suggested) { | 310 | top_on_suggested_core != suggested) { |
| 326 | /* If we're allowed to do a migration, do one. */ | 311 | // If we're allowed to do a migration, do one. |
| 327 | /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the | 312 | // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the |
| 328 | * suggestion to the front of the queue. */ | 313 | // suggestion to the front of the queue. |
| 329 | if (top_on_suggested_core == nullptr || | 314 | if (top_on_suggested_core == nullptr || |
| 330 | top_on_suggested_core->GetPriority() >= | 315 | top_on_suggested_core->GetPriority() >= |
| 331 | HighestCoreMigrationAllowedPriority) { | 316 | HighestCoreMigrationAllowedPriority) { |
| @@ -336,13 +321,13 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { | |||
| 336 | } | 321 | } |
| 337 | } | 322 | } |
| 338 | 323 | ||
| 339 | /* Get the next suggestion. */ | 324 | // Get the next suggestion. |
| 340 | suggested = priority_queue.GetSuggestedNext(core_id, suggested); | 325 | suggested = priority_queue.GetSuggestedNext(core_id, suggested); |
| 341 | } | 326 | } |
| 342 | } | 327 | } |
| 343 | } | 328 | } |
| 344 | 329 | ||
| 345 | /* After a rotation, we need a scheduler update. */ | 330 | // After a rotation, we need a scheduler update. |
| 346 | SetSchedulerUpdateNeeded(kernel); | 331 | SetSchedulerUpdateNeeded(kernel); |
| 347 | } | 332 | } |
| 348 | 333 | ||
| @@ -392,38 +377,38 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { | |||
| 392 | void KScheduler::YieldWithoutCoreMigration() { | 377 | void KScheduler::YieldWithoutCoreMigration() { |
| 393 | auto& kernel = system.Kernel(); | 378 | auto& kernel = system.Kernel(); |
| 394 | 379 | ||
| 395 | /* Validate preconditions. */ | 380 | // Validate preconditions. |
| 396 | ASSERT(CanSchedule(kernel)); | 381 | ASSERT(CanSchedule(kernel)); |
| 397 | ASSERT(kernel.CurrentProcess() != nullptr); | 382 | ASSERT(kernel.CurrentProcess() != nullptr); |
| 398 | 383 | ||
| 399 | /* Get the current thread and process. */ | 384 | // Get the current thread and process. |
| 400 | Thread& cur_thread = *GetCurrentThread(); | 385 | Thread& cur_thread = *GetCurrentThread(); |
| 401 | Process& cur_process = *kernel.CurrentProcess(); | 386 | Process& cur_process = *kernel.CurrentProcess(); |
| 402 | 387 | ||
| 403 | /* If the thread's yield count matches, there's nothing for us to do. */ | 388 | // If the thread's yield count matches, there's nothing for us to do. |
| 404 | if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { | 389 | if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { |
| 405 | return; | 390 | return; |
| 406 | } | 391 | } |
| 407 | 392 | ||
| 408 | /* Get a reference to the priority queue. */ | 393 | // Get a reference to the priority queue. |
| 409 | auto& priority_queue = GetPriorityQueue(kernel); | 394 | auto& priority_queue = GetPriorityQueue(kernel); |
| 410 | 395 | ||
| 411 | /* Perform the yield. */ | 396 | // Perform the yield. |
| 412 | { | 397 | { |
| 413 | KScopedSchedulerLock lock(kernel); | 398 | KScopedSchedulerLock lock(kernel); |
| 414 | 399 | ||
| 415 | const auto cur_state = cur_thread.scheduling_state; | 400 | const auto cur_state = cur_thread.scheduling_state; |
| 416 | if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { | 401 | if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { |
| 417 | /* Put the current thread at the back of the queue. */ | 402 | // Put the current thread at the back of the queue. |
| 418 | Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); | 403 | Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); |
| 419 | IncrementScheduledCount(std::addressof(cur_thread)); | 404 | IncrementScheduledCount(std::addressof(cur_thread)); |
| 420 | 405 | ||
| 421 | /* If the next thread is different, we have an update to perform. */ | 406 | // If the next thread is different, we have an update to perform. |
| 422 | if (next_thread != std::addressof(cur_thread)) { | 407 | if (next_thread != std::addressof(cur_thread)) { |
| 423 | SetSchedulerUpdateNeeded(kernel); | 408 | SetSchedulerUpdateNeeded(kernel); |
| 424 | } else { | 409 | } else { |
| 425 | /* Otherwise, set the thread's yield count so that we won't waste work until the | 410 | // Otherwise, set the thread's yield count so that we won't waste work until the |
| 426 | * process is scheduled again. */ | 411 | // process is scheduled again. |
| 427 | cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); | 412 | cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); |
| 428 | } | 413 | } |
| 429 | } | 414 | } |
| @@ -433,40 +418,40 @@ void KScheduler::YieldWithoutCoreMigration() { | |||
| 433 | void KScheduler::YieldWithCoreMigration() { | 418 | void KScheduler::YieldWithCoreMigration() { |
| 434 | auto& kernel = system.Kernel(); | 419 | auto& kernel = system.Kernel(); |
| 435 | 420 | ||
| 436 | /* Validate preconditions. */ | 421 | // Validate preconditions. |
| 437 | ASSERT(CanSchedule(kernel)); | 422 | ASSERT(CanSchedule(kernel)); |
| 438 | ASSERT(kernel.CurrentProcess() != nullptr); | 423 | ASSERT(kernel.CurrentProcess() != nullptr); |
| 439 | 424 | ||
| 440 | /* Get the current thread and process. */ | 425 | // Get the current thread and process. |
| 441 | Thread& cur_thread = *GetCurrentThread(); | 426 | Thread& cur_thread = *GetCurrentThread(); |
| 442 | Process& cur_process = *kernel.CurrentProcess(); | 427 | Process& cur_process = *kernel.CurrentProcess(); |
| 443 | 428 | ||
| 444 | /* If the thread's yield count matches, there's nothing for us to do. */ | 429 | // If the thread's yield count matches, there's nothing for us to do. |
| 445 | if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { | 430 | if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { |
| 446 | return; | 431 | return; |
| 447 | } | 432 | } |
| 448 | 433 | ||
| 449 | /* Get a reference to the priority queue. */ | 434 | // Get a reference to the priority queue. |
| 450 | auto& priority_queue = GetPriorityQueue(kernel); | 435 | auto& priority_queue = GetPriorityQueue(kernel); |
| 451 | 436 | ||
| 452 | /* Perform the yield. */ | 437 | // Perform the yield. |
| 453 | { | 438 | { |
| 454 | KScopedSchedulerLock lock(kernel); | 439 | KScopedSchedulerLock lock(kernel); |
| 455 | 440 | ||
| 456 | const auto cur_state = cur_thread.scheduling_state; | 441 | const auto cur_state = cur_thread.scheduling_state; |
| 457 | if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { | 442 | if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { |
| 458 | /* Get the current active core. */ | 443 | // Get the current active core. |
| 459 | const s32 core_id = cur_thread.GetActiveCore(); | 444 | const s32 core_id = cur_thread.GetActiveCore(); |
| 460 | 445 | ||
| 461 | /* Put the current thread at the back of the queue. */ | 446 | // Put the current thread at the back of the queue. |
| 462 | Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); | 447 | Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); |
| 463 | IncrementScheduledCount(std::addressof(cur_thread)); | 448 | IncrementScheduledCount(std::addressof(cur_thread)); |
| 464 | 449 | ||
| 465 | /* While we have a suggested thread, try to migrate it! */ | 450 | // While we have a suggested thread, try to migrate it! |
| 466 | bool recheck = false; | 451 | bool recheck = false; |
| 467 | Thread* suggested = priority_queue.GetSuggestedFront(core_id); | 452 | Thread* suggested = priority_queue.GetSuggestedFront(core_id); |
| 468 | while (suggested != nullptr) { | 453 | while (suggested != nullptr) { |
| 469 | /* Check if the suggested thread is the thread running on its core. */ | 454 | // Check if the suggested thread is the thread running on its core. |
| 470 | const s32 suggested_core = suggested->GetActiveCore(); | 455 | const s32 suggested_core = suggested->GetActiveCore(); |
| 471 | 456 | ||
| 472 | if (Thread* running_on_suggested_core = | 457 | if (Thread* running_on_suggested_core = |
| @@ -474,10 +459,10 @@ void KScheduler::YieldWithCoreMigration() { | |||
| 474 | ? kernel.Scheduler(suggested_core).state.highest_priority_thread | 459 | ? kernel.Scheduler(suggested_core).state.highest_priority_thread |
| 475 | : nullptr; | 460 | : nullptr; |
| 476 | running_on_suggested_core != suggested) { | 461 | running_on_suggested_core != suggested) { |
| 477 | /* If the current thread's priority is higher than our suggestion's we prefer | 462 | // If the current thread's priority is higher than our suggestion's we prefer |
| 478 | * the next thread to the suggestion. */ | 463 | // the next thread to the suggestion. We also prefer the next thread when the |
| 479 | /* We also prefer the next thread when the current thread's priority is equal to | 464 | // current thread's priority is equal to the suggestions, but the next thread |
| 480 | * the suggestions, but the next thread has been waiting longer. */ | 465 | // has been waiting longer. |
| 481 | if ((suggested->GetPriority() > cur_thread.GetPriority()) || | 466 | if ((suggested->GetPriority() > cur_thread.GetPriority()) || |
| 482 | (suggested->GetPriority() == cur_thread.GetPriority() && | 467 | (suggested->GetPriority() == cur_thread.GetPriority() && |
| 483 | next_thread != std::addressof(cur_thread) && | 468 | next_thread != std::addressof(cur_thread) && |
| @@ -486,9 +471,9 @@ void KScheduler::YieldWithCoreMigration() { | |||
| 486 | break; | 471 | break; |
| 487 | } | 472 | } |
| 488 | 473 | ||
| 489 | /* If we're allowed to do a migration, do one. */ | 474 | // If we're allowed to do a migration, do one. |
| 490 | /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the | 475 | // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the |
| 491 | * suggestion to the front of the queue. */ | 476 | // suggestion to the front of the queue. |
| 492 | if (running_on_suggested_core == nullptr || | 477 | if (running_on_suggested_core == nullptr || |
| 493 | running_on_suggested_core->GetPriority() >= | 478 | running_on_suggested_core->GetPriority() >= |
| 494 | HighestCoreMigrationAllowedPriority) { | 479 | HighestCoreMigrationAllowedPriority) { |
| @@ -497,23 +482,23 @@ void KScheduler::YieldWithCoreMigration() { | |||
| 497 | IncrementScheduledCount(suggested); | 482 | IncrementScheduledCount(suggested); |
| 498 | break; | 483 | break; |
| 499 | } else { | 484 | } else { |
| 500 | /* We couldn't perform a migration, but we should check again on a future | 485 | // We couldn't perform a migration, but we should check again on a future |
| 501 | * yield. */ | 486 | // yield. |
| 502 | recheck = true; | 487 | recheck = true; |
| 503 | } | 488 | } |
| 504 | } | 489 | } |
| 505 | 490 | ||
| 506 | /* Get the next suggestion. */ | 491 | // Get the next suggestion. |
| 507 | suggested = priority_queue.GetSuggestedNext(core_id, suggested); | 492 | suggested = priority_queue.GetSuggestedNext(core_id, suggested); |
| 508 | } | 493 | } |
| 509 | 494 | ||
| 510 | /* If we still have a suggestion or the next thread is different, we have an update to | 495 | // If we still have a suggestion or the next thread is different, we have an update to |
| 511 | * perform. */ | 496 | // perform. |
| 512 | if (suggested != nullptr || next_thread != std::addressof(cur_thread)) { | 497 | if (suggested != nullptr || next_thread != std::addressof(cur_thread)) { |
| 513 | SetSchedulerUpdateNeeded(kernel); | 498 | SetSchedulerUpdateNeeded(kernel); |
| 514 | } else if (!recheck) { | 499 | } else if (!recheck) { |
| 515 | /* Otherwise if we don't need to re-check, set the thread's yield count so that we | 500 | // Otherwise if we don't need to re-check, set the thread's yield count so that we |
| 516 | * won't waste work until the process is scheduled again. */ | 501 | // won't waste work until the process is scheduled again. |
| 517 | cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); | 502 | cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); |
| 518 | } | 503 | } |
| 519 | } | 504 | } |
| @@ -523,48 +508,48 @@ void KScheduler::YieldWithCoreMigration() { | |||
| 523 | void KScheduler::YieldToAnyThread() { | 508 | void KScheduler::YieldToAnyThread() { |
| 524 | auto& kernel = system.Kernel(); | 509 | auto& kernel = system.Kernel(); |
| 525 | 510 | ||
| 526 | /* Validate preconditions. */ | 511 | // Validate preconditions. |
| 527 | ASSERT(CanSchedule(kernel)); | 512 | ASSERT(CanSchedule(kernel)); |
| 528 | ASSERT(kernel.CurrentProcess() != nullptr); | 513 | ASSERT(kernel.CurrentProcess() != nullptr); |
| 529 | 514 | ||
| 530 | /* Get the current thread and process. */ | 515 | // Get the current thread and process. |
| 531 | Thread& cur_thread = *GetCurrentThread(); | 516 | Thread& cur_thread = *GetCurrentThread(); |
| 532 | Process& cur_process = *kernel.CurrentProcess(); | 517 | Process& cur_process = *kernel.CurrentProcess(); |
| 533 | 518 | ||
| 534 | /* If the thread's yield count matches, there's nothing for us to do. */ | 519 | // If the thread's yield count matches, there's nothing for us to do. |
| 535 | if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { | 520 | if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { |
| 536 | return; | 521 | return; |
| 537 | } | 522 | } |
| 538 | 523 | ||
| 539 | /* Get a reference to the priority queue. */ | 524 | // Get a reference to the priority queue. |
| 540 | auto& priority_queue = GetPriorityQueue(kernel); | 525 | auto& priority_queue = GetPriorityQueue(kernel); |
| 541 | 526 | ||
| 542 | /* Perform the yield. */ | 527 | // Perform the yield. |
| 543 | { | 528 | { |
| 544 | KScopedSchedulerLock lock(kernel); | 529 | KScopedSchedulerLock lock(kernel); |
| 545 | 530 | ||
| 546 | const auto cur_state = cur_thread.scheduling_state; | 531 | const auto cur_state = cur_thread.scheduling_state; |
| 547 | if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { | 532 | if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { |
| 548 | /* Get the current active core. */ | 533 | // Get the current active core. |
| 549 | const s32 core_id = cur_thread.GetActiveCore(); | 534 | const s32 core_id = cur_thread.GetActiveCore(); |
| 550 | 535 | ||
| 551 | /* Migrate the current thread to core -1. */ | 536 | // Migrate the current thread to core -1. |
| 552 | cur_thread.SetActiveCore(-1); | 537 | cur_thread.SetActiveCore(-1); |
| 553 | priority_queue.ChangeCore(core_id, std::addressof(cur_thread)); | 538 | priority_queue.ChangeCore(core_id, std::addressof(cur_thread)); |
| 554 | IncrementScheduledCount(std::addressof(cur_thread)); | 539 | IncrementScheduledCount(std::addressof(cur_thread)); |
| 555 | 540 | ||
| 556 | /* If there's nothing scheduled, we can try to perform a migration. */ | 541 | // If there's nothing scheduled, we can try to perform a migration. |
| 557 | if (priority_queue.GetScheduledFront(core_id) == nullptr) { | 542 | if (priority_queue.GetScheduledFront(core_id) == nullptr) { |
| 558 | /* While we have a suggested thread, try to migrate it! */ | 543 | // While we have a suggested thread, try to migrate it! |
| 559 | Thread* suggested = priority_queue.GetSuggestedFront(core_id); | 544 | Thread* suggested = priority_queue.GetSuggestedFront(core_id); |
| 560 | while (suggested != nullptr) { | 545 | while (suggested != nullptr) { |
| 561 | /* Check if the suggested thread is the top thread on its core. */ | 546 | // Check if the suggested thread is the top thread on its core. |
| 562 | const s32 suggested_core = suggested->GetActiveCore(); | 547 | const s32 suggested_core = suggested->GetActiveCore(); |
| 563 | if (Thread* top_on_suggested_core = | 548 | if (Thread* top_on_suggested_core = |
| 564 | (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) | 549 | (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) |
| 565 | : nullptr; | 550 | : nullptr; |
| 566 | top_on_suggested_core != suggested) { | 551 | top_on_suggested_core != suggested) { |
| 567 | /* If we're allowed to do a migration, do one. */ | 552 | // If we're allowed to do a migration, do one. |
| 568 | if (top_on_suggested_core == nullptr || | 553 | if (top_on_suggested_core == nullptr || |
| 569 | top_on_suggested_core->GetPriority() >= | 554 | top_on_suggested_core->GetPriority() >= |
| 570 | HighestCoreMigrationAllowedPriority) { | 555 | HighestCoreMigrationAllowedPriority) { |
| @@ -573,25 +558,25 @@ void KScheduler::YieldToAnyThread() { | |||
| 573 | IncrementScheduledCount(suggested); | 558 | IncrementScheduledCount(suggested); |
| 574 | } | 559 | } |
| 575 | 560 | ||
| 576 | /* Regardless of whether we migrated, we had a candidate, so we're done. */ | 561 | // Regardless of whether we migrated, we had a candidate, so we're done. |
| 577 | break; | 562 | break; |
| 578 | } | 563 | } |
| 579 | 564 | ||
| 580 | /* Get the next suggestion. */ | 565 | // Get the next suggestion. |
| 581 | suggested = priority_queue.GetSuggestedNext(core_id, suggested); | 566 | suggested = priority_queue.GetSuggestedNext(core_id, suggested); |
| 582 | } | 567 | } |
| 583 | 568 | ||
| 584 | /* If the suggestion is different from the current thread, we need to perform an | 569 | // If the suggestion is different from the current thread, we need to perform an |
| 585 | * update. */ | 570 | // update. |
| 586 | if (suggested != std::addressof(cur_thread)) { | 571 | if (suggested != std::addressof(cur_thread)) { |
| 587 | SetSchedulerUpdateNeeded(kernel); | 572 | SetSchedulerUpdateNeeded(kernel); |
| 588 | } else { | 573 | } else { |
| 589 | /* Otherwise, set the thread's yield count so that we won't waste work until the | 574 | // Otherwise, set the thread's yield count so that we won't waste work until the |
| 590 | * process is scheduled again. */ | 575 | // process is scheduled again. |
| 591 | cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); | 576 | cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); |
| 592 | } | 577 | } |
| 593 | } else { | 578 | } else { |
| 594 | /* Otherwise, we have an update to perform. */ | 579 | // Otherwise, we have an update to perform. |
| 595 | SetSchedulerUpdateNeeded(kernel); | 580 | SetSchedulerUpdateNeeded(kernel); |
| 596 | } | 581 | } |
| 597 | } | 582 | } |
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index d46f5fc04..39a02af2a 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h | |||
| @@ -34,19 +34,19 @@ public: | |||
| 34 | 34 | ||
| 35 | void Lock() { | 35 | void Lock() { |
| 36 | if (this->IsLockedByCurrentThread()) { | 36 | if (this->IsLockedByCurrentThread()) { |
| 37 | /* If we already own the lock, we can just increment the count. */ | 37 | // If we already own the lock, we can just increment the count. |
| 38 | ASSERT(this->lock_count > 0); | 38 | ASSERT(this->lock_count > 0); |
| 39 | this->lock_count++; | 39 | this->lock_count++; |
| 40 | } else { | 40 | } else { |
| 41 | /* Otherwise, we want to disable scheduling and acquire the spinlock. */ | 41 | // Otherwise, we want to disable scheduling and acquire the spinlock. |
| 42 | SchedulerType::DisableScheduling(kernel); | 42 | SchedulerType::DisableScheduling(kernel); |
| 43 | this->spin_lock.lock(); | 43 | this->spin_lock.lock(); |
| 44 | 44 | ||
| 45 | /* For debug, ensure that our state is valid. */ | 45 | // For debug, ensure that our state is valid. |
| 46 | ASSERT(this->lock_count == 0); | 46 | ASSERT(this->lock_count == 0); |
| 47 | ASSERT(this->owner_thread == Core::EmuThreadHandle::InvalidHandle()); | 47 | ASSERT(this->owner_thread == Core::EmuThreadHandle::InvalidHandle()); |
| 48 | 48 | ||
| 49 | /* Increment count, take ownership. */ | 49 | // Increment count, take ownership. |
| 50 | this->lock_count = 1; | 50 | this->lock_count = 1; |
| 51 | this->owner_thread = kernel.GetCurrentEmuThreadID(); | 51 | this->owner_thread = kernel.GetCurrentEmuThreadID(); |
| 52 | } | 52 | } |
| @@ -56,18 +56,18 @@ public: | |||
| 56 | ASSERT(this->IsLockedByCurrentThread()); | 56 | ASSERT(this->IsLockedByCurrentThread()); |
| 57 | ASSERT(this->lock_count > 0); | 57 | ASSERT(this->lock_count > 0); |
| 58 | 58 | ||
| 59 | /* Release an instance of the lock. */ | 59 | // Release an instance of the lock. |
| 60 | if ((--this->lock_count) == 0) { | 60 | if ((--this->lock_count) == 0) { |
| 61 | /* We're no longer going to hold the lock. Take note of what cores need scheduling. */ | 61 | // We're no longer going to hold the lock. Take note of what cores need scheduling. |
| 62 | const u64 cores_needing_scheduling = | 62 | const u64 cores_needing_scheduling = |
| 63 | SchedulerType::UpdateHighestPriorityThreads(kernel); | 63 | SchedulerType::UpdateHighestPriorityThreads(kernel); |
| 64 | Core::EmuThreadHandle leaving_thread = owner_thread; | 64 | Core::EmuThreadHandle leaving_thread = owner_thread; |
| 65 | 65 | ||
| 66 | /* Note that we no longer hold the lock, and unlock the spinlock. */ | 66 | // Note that we no longer hold the lock, and unlock the spinlock. |
| 67 | this->owner_thread = Core::EmuThreadHandle::InvalidHandle(); | 67 | this->owner_thread = Core::EmuThreadHandle::InvalidHandle(); |
| 68 | this->spin_lock.unlock(); | 68 | this->spin_lock.unlock(); |
| 69 | 69 | ||
| 70 | /* Enable scheduling, and perform a rescheduling operation. */ | 70 | // Enable scheduling, and perform a rescheduling operation. |
| 71 | SchedulerType::EnableScheduling(kernel, cores_needing_scheduling, leaving_thread); | 71 | SchedulerType::EnableScheduling(kernel, cores_needing_scheduling, leaving_thread); |
| 72 | } | 72 | } |
| 73 | } | 73 | } |
diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h index 16f470923..d1cc5dda7 100644 --- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h +++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h | |||
| @@ -28,17 +28,17 @@ public: | |||
| 28 | : kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) { | 28 | : kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) { |
| 29 | event_handle = InvalidHandle; | 29 | event_handle = InvalidHandle; |
| 30 | 30 | ||
| 31 | /* Lock the scheduler. */ | 31 | // Lock the scheduler. |
| 32 | kernel.GlobalSchedulerContext().scheduler_lock.Lock(); | 32 | kernel.GlobalSchedulerContext().scheduler_lock.Lock(); |
| 33 | } | 33 | } |
| 34 | 34 | ||
| 35 | ~KScopedSchedulerLockAndSleep() { | 35 | ~KScopedSchedulerLockAndSleep() { |
| 36 | /* Register the sleep. */ | 36 | // Register the sleep. |
| 37 | if (this->timeout_tick > 0) { | 37 | if (this->timeout_tick > 0) { |
| 38 | kernel.TimeManager().ScheduleTimeEvent(event_handle, this->thread, this->timeout_tick); | 38 | kernel.TimeManager().ScheduleTimeEvent(event_handle, this->thread, this->timeout_tick); |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | /* Unlock the scheduler. */ | 41 | // Unlock the scheduler. |
| 42 | kernel.GlobalSchedulerContext().scheduler_lock.Unlock(); | 42 | kernel.GlobalSchedulerContext().scheduler_lock.Unlock(); |
| 43 | } | 43 | } |
| 44 | 44 | ||