diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/core/hle/kernel/init/init_slab_setup.cpp | 6 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_condition_variable.cpp | 16 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_light_lock.cpp | 6 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_process.cpp | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_thread.cpp | 269 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_thread.h | 165 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 93 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.h | 67 |
8 files changed, 436 insertions, 190 deletions
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index be52405c6..5e4090e2b 100644 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp | |||
| @@ -33,6 +33,9 @@ | |||
| 33 | 33 | ||
| 34 | namespace Kernel::Init { | 34 | namespace Kernel::Init { |
| 35 | 35 | ||
| 36 | // For macro convenience. | ||
| 37 | using KThreadLockInfo = KThread::LockWithPriorityInheritanceInfo; | ||
| 38 | |||
| 36 | #define SLAB_COUNT(CLASS) kernel.SlabResourceCounts().num_##CLASS | 39 | #define SLAB_COUNT(CLASS) kernel.SlabResourceCounts().num_##CLASS |
| 37 | 40 | ||
| 38 | #define FOREACH_SLAB_TYPE(HANDLER, ...) \ | 41 | #define FOREACH_SLAB_TYPE(HANDLER, ...) \ |
| @@ -54,7 +57,8 @@ namespace Kernel::Init { | |||
| 54 | HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) \ | 57 | HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) \ |
| 55 | HANDLER(KEventInfo, (SLAB_COUNT(KThread) + SLAB_COUNT(KDebug)), ##__VA_ARGS__) \ | 58 | HANDLER(KEventInfo, (SLAB_COUNT(KThread) + SLAB_COUNT(KDebug)), ##__VA_ARGS__) \ |
| 56 | HANDLER(KDebug, (SLAB_COUNT(KDebug)), ##__VA_ARGS__) \ | 59 | HANDLER(KDebug, (SLAB_COUNT(KDebug)), ##__VA_ARGS__) \ |
| 57 | HANDLER(KSecureSystemResource, (SLAB_COUNT(KProcess)), ##__VA_ARGS__) | 60 | HANDLER(KSecureSystemResource, (SLAB_COUNT(KProcess)), ##__VA_ARGS__) \ |
| 61 | HANDLER(KThreadLockInfo, (SLAB_COUNT(KThread)), ##__VA_ARGS__) | ||
| 58 | 62 | ||
| 59 | namespace { | 63 | namespace { |
| 60 | 64 | ||
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index c6a088942..8dae78397 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp | |||
| @@ -111,15 +111,15 @@ Result KConditionVariable::SignalToAddress(VAddr addr) { | |||
| 111 | KScopedSchedulerLock sl(kernel); | 111 | KScopedSchedulerLock sl(kernel); |
| 112 | 112 | ||
| 113 | // Remove waiter thread. | 113 | // Remove waiter thread. |
| 114 | s32 num_waiters{}; | 114 | bool has_waiters{}; |
| 115 | KThread* const next_owner_thread = | 115 | KThread* const next_owner_thread = |
| 116 | owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr); | 116 | owner_thread->RemoveWaiterByKey(std::addressof(has_waiters), addr); |
| 117 | 117 | ||
| 118 | // Determine the next tag. | 118 | // Determine the next tag. |
| 119 | u32 next_value{}; | 119 | u32 next_value{}; |
| 120 | if (next_owner_thread != nullptr) { | 120 | if (next_owner_thread != nullptr) { |
| 121 | next_value = next_owner_thread->GetAddressKeyValue(); | 121 | next_value = next_owner_thread->GetAddressKeyValue(); |
| 122 | if (num_waiters > 1) { | 122 | if (has_waiters) { |
| 123 | next_value |= Svc::HandleWaitMask; | 123 | next_value |= Svc::HandleWaitMask; |
| 124 | } | 124 | } |
| 125 | } | 125 | } |
| @@ -247,9 +247,11 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { | |||
| 247 | (it->GetConditionVariableKey() == cv_key)) { | 247 | (it->GetConditionVariableKey() == cv_key)) { |
| 248 | KThread* target_thread = std::addressof(*it); | 248 | KThread* target_thread = std::addressof(*it); |
| 249 | 249 | ||
| 250 | this->SignalImpl(target_thread); | ||
| 251 | it = thread_tree.erase(it); | 250 | it = thread_tree.erase(it); |
| 252 | target_thread->ClearConditionVariable(); | 251 | target_thread->ClearConditionVariable(); |
| 252 | |||
| 253 | this->SignalImpl(target_thread); | ||
| 254 | |||
| 253 | ++num_waiters; | 255 | ++num_waiters; |
| 254 | } | 256 | } |
| 255 | 257 | ||
| @@ -279,16 +281,16 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { | |||
| 279 | // Update the value and process for the next owner. | 281 | // Update the value and process for the next owner. |
| 280 | { | 282 | { |
| 281 | // Remove waiter thread. | 283 | // Remove waiter thread. |
| 282 | s32 num_waiters{}; | 284 | bool has_waiters{}; |
| 283 | KThread* next_owner_thread = | 285 | KThread* next_owner_thread = |
| 284 | cur_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr); | 286 | cur_thread->RemoveWaiterByKey(std::addressof(has_waiters), addr); |
| 285 | 287 | ||
| 286 | // Update for the next owner thread. | 288 | // Update for the next owner thread. |
| 287 | u32 next_value{}; | 289 | u32 next_value{}; |
| 288 | if (next_owner_thread != nullptr) { | 290 | if (next_owner_thread != nullptr) { |
| 289 | // Get the next tag value. | 291 | // Get the next tag value. |
| 290 | next_value = next_owner_thread->GetAddressKeyValue(); | 292 | next_value = next_owner_thread->GetAddressKeyValue(); |
| 291 | if (num_waiters > 1) { | 293 | if (has_waiters) { |
| 292 | next_value |= Svc::HandleWaitMask; | 294 | next_value |= Svc::HandleWaitMask; |
| 293 | } | 295 | } |
| 294 | 296 | ||
diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp index d791acbe3..b922a67a5 100644 --- a/src/core/hle/kernel/k_light_lock.cpp +++ b/src/core/hle/kernel/k_light_lock.cpp | |||
| @@ -90,15 +90,15 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) { | |||
| 90 | KScopedSchedulerLock sl(kernel); | 90 | KScopedSchedulerLock sl(kernel); |
| 91 | 91 | ||
| 92 | // Get the next owner. | 92 | // Get the next owner. |
| 93 | s32 num_waiters; | 93 | bool has_waiters; |
| 94 | KThread* next_owner = owner_thread->RemoveWaiterByKey( | 94 | KThread* next_owner = owner_thread->RemoveWaiterByKey( |
| 95 | std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag))); | 95 | std::addressof(has_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag))); |
| 96 | 96 | ||
| 97 | // Pass the lock to the next owner. | 97 | // Pass the lock to the next owner. |
| 98 | uintptr_t next_tag = 0; | 98 | uintptr_t next_tag = 0; |
| 99 | if (next_owner != nullptr) { | 99 | if (next_owner != nullptr) { |
| 100 | next_tag = | 100 | next_tag = |
| 101 | reinterpret_cast<uintptr_t>(next_owner) | static_cast<uintptr_t>(num_waiters > 1); | 101 | reinterpret_cast<uintptr_t>(next_owner) | static_cast<uintptr_t>(has_waiters); |
| 102 | 102 | ||
| 103 | next_owner->EndWait(ResultSuccess); | 103 | next_owner->EndWait(ResultSuccess); |
| 104 | 104 | ||
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index d9c1a0eb3..514f20ef4 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp | |||
| @@ -156,9 +156,9 @@ bool KProcess::ReleaseUserException(KThread* thread) { | |||
| 156 | exception_thread = nullptr; | 156 | exception_thread = nullptr; |
| 157 | 157 | ||
| 158 | // Remove waiter thread. | 158 | // Remove waiter thread. |
| 159 | s32 num_waiters{}; | 159 | bool has_waiters{}; |
| 160 | if (KThread* next = thread->RemoveWaiterByKey( | 160 | if (KThread* next = thread->RemoveWaiterByKey( |
| 161 | std::addressof(num_waiters), | 161 | std::addressof(has_waiters), |
| 162 | reinterpret_cast<uintptr_t>(std::addressof(exception_thread))); | 162 | reinterpret_cast<uintptr_t>(std::addressof(exception_thread))); |
| 163 | next != nullptr) { | 163 | next != nullptr) { |
| 164 | next->EndWait(ResultSuccess); | 164 | next->EndWait(ResultSuccess); |
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 599d05947..2831df733 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp | |||
| @@ -191,7 +191,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack | |||
| 191 | light_ipc_data = nullptr; | 191 | light_ipc_data = nullptr; |
| 192 | 192 | ||
| 193 | // We're not waiting for a lock, and we haven't disabled migration. | 193 | // We're not waiting for a lock, and we haven't disabled migration. |
| 194 | lock_owner = nullptr; | 194 | waiting_lock_info = nullptr; |
| 195 | num_core_migration_disables = 0; | 195 | num_core_migration_disables = 0; |
| 196 | 196 | ||
| 197 | // We have no waiters, but we do have an entrypoint. | 197 | // We have no waiters, but we do have an entrypoint. |
| @@ -341,25 +341,39 @@ void KThread::Finalize() { | |||
| 341 | 341 | ||
| 342 | // Release any waiters. | 342 | // Release any waiters. |
| 343 | { | 343 | { |
| 344 | ASSERT(lock_owner == nullptr); | 344 | ASSERT(waiting_lock_info == nullptr); |
| 345 | KScopedSchedulerLock sl{kernel}; | 345 | KScopedSchedulerLock sl{kernel}; |
| 346 | 346 | ||
| 347 | auto it = waiter_list.begin(); | 347 | // Check that we have no kernel waiters. |
| 348 | while (it != waiter_list.end()) { | 348 | ASSERT(num_kernel_waiters == 0); |
| 349 | // Get the thread. | ||
| 350 | KThread* const waiter = std::addressof(*it); | ||
| 351 | 349 | ||
| 352 | // The thread shouldn't be a kernel waiter. | 350 | auto it = held_lock_info_list.begin(); |
| 353 | ASSERT(!waiter->GetAddressKeyIsKernel()); | 351 | while (it != held_lock_info_list.end()) { |
| 352 | // Get the lock info. | ||
| 353 | auto* const lock_info = std::addressof(*it); | ||
| 354 | 354 | ||
| 355 | // Clear the lock owner. | 355 | // The lock shouldn't have a kernel waiter. |
| 356 | waiter->SetLockOwner(nullptr); | 356 | ASSERT(!lock_info->GetIsKernelAddressKey()); |
| 357 | 357 | ||
| 358 | // Erase the waiter from our list. | 358 | // Remove all waiters. |
| 359 | it = waiter_list.erase(it); | 359 | while (lock_info->GetWaiterCount() != 0) { |
| 360 | // Get the front waiter. | ||
| 361 | KThread* const waiter = lock_info->GetHighestPriorityWaiter(); | ||
| 360 | 362 | ||
| 361 | // Cancel the thread's wait. | 363 | // Remove it from the lock. |
| 362 | waiter->CancelWait(ResultInvalidState, true); | 364 | if (lock_info->RemoveWaiter(waiter)) { |
| 365 | ASSERT(lock_info->GetWaiterCount() == 0); | ||
| 366 | } | ||
| 367 | |||
| 368 | // Cancel the thread's wait. | ||
| 369 | waiter->CancelWait(ResultInvalidState, true); | ||
| 370 | } | ||
| 371 | |||
| 372 | // Remove the held lock from our list. | ||
| 373 | it = held_lock_info_list.erase(it); | ||
| 374 | |||
| 375 | // Free the lock info. | ||
| 376 | LockWithPriorityInheritanceInfo::Free(kernel, lock_info); | ||
| 363 | } | 377 | } |
| 364 | } | 378 | } |
| 365 | 379 | ||
| @@ -708,6 +722,24 @@ void KThread::SetBasePriority(s32 value) { | |||
| 708 | RestorePriority(kernel, this); | 722 | RestorePriority(kernel, this); |
| 709 | } | 723 | } |
| 710 | 724 | ||
| 725 | KThread* KThread::GetLockOwner() const { | ||
| 726 | return waiting_lock_info != nullptr ? waiting_lock_info->GetOwner() : nullptr; | ||
| 727 | } | ||
| 728 | |||
| 729 | void KThread::IncreaseBasePriority(s32 priority_) { | ||
| 730 | ASSERT(Svc::HighestThreadPriority <= priority_ && priority_ <= Svc::LowestThreadPriority); | ||
| 731 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); | ||
| 732 | ASSERT(!this->GetStackParameters().is_pinned); | ||
| 733 | |||
| 734 | // Set our base priority. | ||
| 735 | if (base_priority > priority_) { | ||
| 736 | base_priority = priority_; | ||
| 737 | |||
| 738 | // Perform a priority restoration. | ||
| 739 | RestorePriority(kernel, this); | ||
| 740 | } | ||
| 741 | } | ||
| 742 | |||
| 711 | void KThread::RequestSuspend(SuspendType type) { | 743 | void KThread::RequestSuspend(SuspendType type) { |
| 712 | KScopedSchedulerLock sl{kernel}; | 744 | KScopedSchedulerLock sl{kernel}; |
| 713 | 745 | ||
| @@ -891,51 +923,87 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) { | |||
| 891 | R_SUCCEED(); | 923 | R_SUCCEED(); |
| 892 | } | 924 | } |
| 893 | 925 | ||
| 894 | void KThread::AddWaiterImpl(KThread* thread) { | 926 | void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) { |
| 895 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | 927 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); |
| 928 | |||
| 929 | // Set ourselves as the lock's owner. | ||
| 930 | lock_info->SetOwner(this); | ||
| 896 | 931 | ||
| 897 | // Find the right spot to insert the waiter. | 932 | // Add the lock to our held list. |
| 898 | auto it = waiter_list.begin(); | 933 | held_lock_info_list.push_front(*lock_info); |
| 899 | while (it != waiter_list.end()) { | 934 | } |
| 900 | if (it->GetPriority() > thread->GetPriority()) { | 935 | |
| 901 | break; | 936 | KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key_) { |
| 937 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); | ||
| 938 | |||
| 939 | // Try to find an existing held lock. | ||
| 940 | for (auto& held_lock : held_lock_info_list) { | ||
| 941 | if (held_lock.GetAddressKey() == address_key_) { | ||
| 942 | return std::addressof(held_lock); | ||
| 902 | } | 943 | } |
| 903 | it++; | ||
| 904 | } | 944 | } |
| 905 | 945 | ||
| 946 | return nullptr; | ||
| 947 | } | ||
| 948 | |||
| 949 | void KThread::AddWaiterImpl(KThread* thread) { | ||
| 950 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); | ||
| 951 | ASSERT(thread->GetConditionVariableTree() == nullptr); | ||
| 952 | |||
| 953 | // Get the thread's address key. | ||
| 954 | const auto address_key_ = thread->GetAddressKey(); | ||
| 955 | const auto is_kernel_address_key_ = thread->GetIsKernelAddressKey(); | ||
| 956 | |||
| 906 | // Keep track of how many kernel waiters we have. | 957 | // Keep track of how many kernel waiters we have. |
| 907 | if (thread->GetAddressKeyIsKernel()) { | 958 | if (is_kernel_address_key_) { |
| 908 | ASSERT((num_kernel_waiters++) >= 0); | 959 | ASSERT((num_kernel_waiters++) >= 0); |
| 909 | KScheduler::SetSchedulerUpdateNeeded(kernel); | 960 | KScheduler::SetSchedulerUpdateNeeded(kernel); |
| 910 | } | 961 | } |
| 911 | 962 | ||
| 912 | // Insert the waiter. | 963 | // Get the relevant lock info. |
| 913 | waiter_list.insert(it, *thread); | 964 | auto* lock_info = this->FindHeldLock(address_key_); |
| 914 | thread->SetLockOwner(this); | 965 | if (lock_info == nullptr) { |
| 966 | // Create a new lock for the address key. | ||
| 967 | lock_info = | ||
| 968 | LockWithPriorityInheritanceInfo::Create(kernel, address_key_, is_kernel_address_key_); | ||
| 969 | |||
| 970 | // Add the new lock to our list. | ||
| 971 | this->AddHeldLock(lock_info); | ||
| 972 | } | ||
| 973 | |||
| 974 | // Add the thread as waiter to the lock info. | ||
| 975 | lock_info->AddWaiter(thread); | ||
| 915 | } | 976 | } |
| 916 | 977 | ||
| 917 | void KThread::RemoveWaiterImpl(KThread* thread) { | 978 | void KThread::RemoveWaiterImpl(KThread* thread) { |
| 918 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | 979 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); |
| 919 | 980 | ||
| 920 | // Keep track of how many kernel waiters we have. | 981 | // Keep track of how many kernel waiters we have. |
| 921 | if (thread->GetAddressKeyIsKernel()) { | 982 | if (thread->GetIsKernelAddressKey()) { |
| 922 | ASSERT((num_kernel_waiters--) > 0); | 983 | ASSERT((num_kernel_waiters--) > 0); |
| 923 | KScheduler::SetSchedulerUpdateNeeded(kernel); | 984 | KScheduler::SetSchedulerUpdateNeeded(kernel); |
| 924 | } | 985 | } |
| 925 | 986 | ||
| 987 | // Get the info for the lock the thread is waiting on. | ||
| 988 | auto* lock_info = thread->GetWaitingLockInfo(); | ||
| 989 | ASSERT(lock_info->GetOwner() == this); | ||
| 990 | |||
| 926 | // Remove the waiter. | 991 | // Remove the waiter. |
| 927 | waiter_list.erase(waiter_list.iterator_to(*thread)); | 992 | if (lock_info->RemoveWaiter(thread)) { |
| 928 | thread->SetLockOwner(nullptr); | 993 | held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info)); |
| 994 | LockWithPriorityInheritanceInfo::Free(kernel, lock_info); | ||
| 995 | } | ||
| 929 | } | 996 | } |
| 930 | 997 | ||
| 931 | void KThread::RestorePriority(KernelCore& kernel_ctx, KThread* thread) { | 998 | void KThread::RestorePriority(KernelCore& kernel, KThread* thread) { |
| 932 | ASSERT(kernel_ctx.GlobalSchedulerContext().IsLocked()); | 999 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); |
| 933 | 1000 | ||
| 934 | while (true) { | 1001 | while (thread != nullptr) { |
| 935 | // We want to inherit priority where possible. | 1002 | // We want to inherit priority where possible. |
| 936 | s32 new_priority = thread->GetBasePriority(); | 1003 | s32 new_priority = thread->GetBasePriority(); |
| 937 | if (thread->HasWaiters()) { | 1004 | for (const auto& held_lock : thread->held_lock_info_list) { |
| 938 | new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority()); | 1005 | new_priority = |
| 1006 | std::min(new_priority, held_lock.GetHighestPriorityWaiter()->GetPriority()); | ||
| 939 | } | 1007 | } |
| 940 | 1008 | ||
| 941 | // If the priority we would inherit is not different from ours, don't do anything. | 1009 | // If the priority we would inherit is not different from ours, don't do anything. |
| @@ -943,9 +1011,18 @@ void KThread::RestorePriority(KernelCore& kernel_ctx, KThread* thread) { | |||
| 943 | return; | 1011 | return; |
| 944 | } | 1012 | } |
| 945 | 1013 | ||
| 1014 | // Get the owner of whatever lock this thread is waiting on. | ||
| 1015 | KThread* const lock_owner = thread->GetLockOwner(); | ||
| 1016 | |||
| 1017 | // If the thread is waiting on some lock, remove it as a waiter to prevent violating red | ||
| 1018 | // black tree invariants. | ||
| 1019 | if (lock_owner != nullptr) { | ||
| 1020 | lock_owner->RemoveWaiterImpl(thread); | ||
| 1021 | } | ||
| 1022 | |||
| 946 | // Ensure we don't violate condition variable red black tree invariants. | 1023 | // Ensure we don't violate condition variable red black tree invariants. |
| 947 | if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) { | 1024 | if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) { |
| 948 | BeforeUpdatePriority(kernel_ctx, cv_tree, thread); | 1025 | BeforeUpdatePriority(kernel, cv_tree, thread); |
| 949 | } | 1026 | } |
| 950 | 1027 | ||
| 951 | // Change the priority. | 1028 | // Change the priority. |
| @@ -954,73 +1031,99 @@ void KThread::RestorePriority(KernelCore& kernel_ctx, KThread* thread) { | |||
| 954 | 1031 | ||
| 955 | // Restore the condition variable, if relevant. | 1032 | // Restore the condition variable, if relevant. |
| 956 | if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) { | 1033 | if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) { |
| 957 | AfterUpdatePriority(kernel_ctx, cv_tree, thread); | 1034 | AfterUpdatePriority(kernel, cv_tree, thread); |
| 958 | } | 1035 | } |
| 959 | 1036 | ||
| 960 | // Update the scheduler. | 1037 | // If we removed the thread from some lock's waiting list, add it back. |
| 961 | KScheduler::OnThreadPriorityChanged(kernel_ctx, thread, old_priority); | 1038 | if (lock_owner != nullptr) { |
| 962 | 1039 | lock_owner->AddWaiterImpl(thread); | |
| 963 | // Keep the lock owner up to date. | ||
| 964 | KThread* lock_owner = thread->GetLockOwner(); | ||
| 965 | if (lock_owner == nullptr) { | ||
| 966 | return; | ||
| 967 | } | 1040 | } |
| 968 | 1041 | ||
| 969 | // Update the thread in the lock owner's sorted list, and continue inheriting. | 1042 | // Update the scheduler. |
| 970 | lock_owner->RemoveWaiterImpl(thread); | 1043 | KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority); |
| 971 | lock_owner->AddWaiterImpl(thread); | 1044 | |
| 1045 | // Continue inheriting priority. | ||
| 972 | thread = lock_owner; | 1046 | thread = lock_owner; |
| 973 | } | 1047 | } |
| 974 | } | 1048 | } |
| 975 | 1049 | ||
| 976 | void KThread::AddWaiter(KThread* thread) { | 1050 | void KThread::AddWaiter(KThread* thread) { |
| 977 | AddWaiterImpl(thread); | 1051 | this->AddWaiterImpl(thread); |
| 978 | RestorePriority(kernel, this); | 1052 | |
| 1053 | // If the thread has a higher priority than us, we should inherit. | ||
| 1054 | if (thread->GetPriority() < this->GetPriority()) { | ||
| 1055 | RestorePriority(kernel, this); | ||
| 1056 | } | ||
| 979 | } | 1057 | } |
| 980 | 1058 | ||
| 981 | void KThread::RemoveWaiter(KThread* thread) { | 1059 | void KThread::RemoveWaiter(KThread* thread) { |
| 982 | RemoveWaiterImpl(thread); | 1060 | this->RemoveWaiterImpl(thread); |
| 983 | RestorePriority(kernel, this); | 1061 | |
| 1062 | // If our priority is the same as the thread's (and we've inherited), we may need to restore to | ||
| 1063 | // lower priority. | ||
| 1064 | if (this->GetPriority() == thread->GetPriority() && | ||
| 1065 | this->GetPriority() < this->GetBasePriority()) { | ||
| 1066 | RestorePriority(kernel, this); | ||
| 1067 | } | ||
| 984 | } | 1068 | } |
| 985 | 1069 | ||
| 986 | KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) { | 1070 | KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key) { |
| 987 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | 1071 | ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); |
| 988 | 1072 | ||
| 989 | s32 num_waiters{}; | 1073 | // Get the relevant lock info. |
| 990 | KThread* next_lock_owner{}; | 1074 | auto* lock_info = this->FindHeldLock(key); |
| 991 | auto it = waiter_list.begin(); | 1075 | if (lock_info == nullptr) { |
| 992 | while (it != waiter_list.end()) { | 1076 | *out_has_waiters = false; |
| 993 | if (it->GetAddressKey() == key) { | 1077 | return nullptr; |
| 994 | KThread* thread = std::addressof(*it); | 1078 | } |
| 995 | |||
| 996 | // Keep track of how many kernel waiters we have. | ||
| 997 | if (thread->GetAddressKeyIsKernel()) { | ||
| 998 | ASSERT((num_kernel_waiters--) > 0); | ||
| 999 | KScheduler::SetSchedulerUpdateNeeded(kernel); | ||
| 1000 | } | ||
| 1001 | it = waiter_list.erase(it); | ||
| 1002 | 1079 | ||
| 1003 | // Update the next lock owner. | 1080 | // Remove the lock info from our held list. |
| 1004 | if (next_lock_owner == nullptr) { | 1081 | held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info)); |
| 1005 | next_lock_owner = thread; | 1082 | |
| 1006 | next_lock_owner->SetLockOwner(nullptr); | 1083 | // Keep track of how many kernel waiters we have. |
| 1007 | } else { | 1084 | if (lock_info->GetIsKernelAddressKey()) { |
| 1008 | next_lock_owner->AddWaiterImpl(thread); | 1085 | num_kernel_waiters -= lock_info->GetWaiterCount(); |
| 1009 | } | 1086 | ASSERT(num_kernel_waiters >= 0); |
| 1010 | num_waiters++; | 1087 | KScheduler::SetSchedulerUpdateNeeded(kernel); |
| 1011 | } else { | 1088 | } |
| 1012 | it++; | 1089 | |
| 1090 | ASSERT(lock_info->GetWaiterCount() > 0); | ||
| 1091 | |||
| 1092 | // Remove the highest priority waiter from the lock to be the next owner. | ||
| 1093 | KThread* next_lock_owner = lock_info->GetHighestPriorityWaiter(); | ||
| 1094 | if (lock_info->RemoveWaiter(next_lock_owner)) { | ||
| 1095 | // The new owner was the only waiter. | ||
| 1096 | *out_has_waiters = false; | ||
| 1097 | |||
| 1098 | // Free the lock info, since it has no waiters. | ||
| 1099 | LockWithPriorityInheritanceInfo::Free(kernel, lock_info); | ||
| 1100 | } else { | ||
| 1101 | // There are additional waiters on the lock. | ||
| 1102 | *out_has_waiters = true; | ||
| 1103 | |||
| 1104 | // Add the lock to the new owner's held list. | ||
| 1105 | next_lock_owner->AddHeldLock(lock_info); | ||
| 1106 | |||
| 1107 | // Keep track of any kernel waiters for the new owner. | ||
| 1108 | if (lock_info->GetIsKernelAddressKey()) { | ||
| 1109 | next_lock_owner->num_kernel_waiters += lock_info->GetWaiterCount(); | ||
| 1110 | ASSERT(next_lock_owner->num_kernel_waiters > 0); | ||
| 1111 | |||
| 1112 | // NOTE: No need to set scheduler update needed, because we will have already done so | ||
| 1113 | // when removing earlier. | ||
| 1013 | } | 1114 | } |
| 1014 | } | 1115 | } |
| 1015 | 1116 | ||
| 1016 | // Do priority updates, if we have a next owner. | 1117 | // If our priority is the same as the next owner's (and we've inherited), we may need to restore |
| 1017 | if (next_lock_owner) { | 1118 | // to lower priority. |
| 1119 | if (this->GetPriority() == next_lock_owner->GetPriority() && | ||
| 1120 | this->GetPriority() < this->GetBasePriority()) { | ||
| 1018 | RestorePriority(kernel, this); | 1121 | RestorePriority(kernel, this); |
| 1019 | RestorePriority(kernel, next_lock_owner); | 1122 | // NOTE: No need to restore priority on the next lock owner, because it was already the |
| 1123 | // highest priority waiter on the lock. | ||
| 1020 | } | 1124 | } |
| 1021 | 1125 | ||
| 1022 | // Return output. | 1126 | // Return the next lock owner. |
| 1023 | *out_num_waiters = num_waiters; | ||
| 1024 | return next_lock_owner; | 1127 | return next_lock_owner; |
| 1025 | } | 1128 | } |
| 1026 | 1129 | ||
| @@ -1137,9 +1240,7 @@ ThreadState KThread::RequestTerminate() { | |||
| 1137 | } | 1240 | } |
| 1138 | 1241 | ||
| 1139 | // Change the thread's priority to be higher than any system thread's. | 1242 | // Change the thread's priority to be higher than any system thread's. |
| 1140 | if (this->GetBasePriority() >= Svc::SystemThreadPriorityHighest) { | 1243 | this->IncreaseBasePriority(TerminatingThreadPriority); |
| 1141 | this->SetBasePriority(TerminatingThreadPriority); | ||
| 1142 | } | ||
| 1143 | 1244 | ||
| 1144 | // If the thread is runnable, send a termination interrupt to other cores. | 1245 | // If the thread is runnable, send a termination interrupt to other cores. |
| 1145 | if (this->GetState() == ThreadState::Runnable) { | 1246 | if (this->GetState() == ThreadState::Runnable) { |
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index a04de21bc..e09dcbea0 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h | |||
| @@ -339,13 +339,7 @@ public: | |||
| 339 | void SetInterruptFlag(); | 339 | void SetInterruptFlag(); |
| 340 | void ClearInterruptFlag(); | 340 | void ClearInterruptFlag(); |
| 341 | 341 | ||
| 342 | [[nodiscard]] KThread* GetLockOwner() const { | 342 | KThread* GetLockOwner() const; |
| 343 | return lock_owner; | ||
| 344 | } | ||
| 345 | |||
| 346 | void SetLockOwner(KThread* owner) { | ||
| 347 | lock_owner = owner; | ||
| 348 | } | ||
| 349 | 343 | ||
| 350 | [[nodiscard]] const KAffinityMask& GetAffinityMask() const { | 344 | [[nodiscard]] const KAffinityMask& GetAffinityMask() const { |
| 351 | return physical_affinity_mask; | 345 | return physical_affinity_mask; |
| @@ -601,7 +595,7 @@ public: | |||
| 601 | 595 | ||
| 602 | [[nodiscard]] Result GetThreadContext3(std::vector<u8>& out); | 596 | [[nodiscard]] Result GetThreadContext3(std::vector<u8>& out); |
| 603 | 597 | ||
| 604 | [[nodiscard]] KThread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key); | 598 | [[nodiscard]] KThread* RemoveWaiterByKey(bool* out_has_waiters, VAddr key); |
| 605 | 599 | ||
| 606 | [[nodiscard]] VAddr GetAddressKey() const { | 600 | [[nodiscard]] VAddr GetAddressKey() const { |
| 607 | return address_key; | 601 | return address_key; |
| @@ -611,8 +605,8 @@ public: | |||
| 611 | return address_key_value; | 605 | return address_key_value; |
| 612 | } | 606 | } |
| 613 | 607 | ||
| 614 | [[nodiscard]] bool GetAddressKeyIsKernel() const { | 608 | [[nodiscard]] bool GetIsKernelAddressKey() const { |
| 615 | return address_key_is_kernel; | 609 | return is_kernel_address_key; |
| 616 | } | 610 | } |
| 617 | 611 | ||
| 618 | //! NB: intentional deviation from official kernel. | 612 | //! NB: intentional deviation from official kernel. |
| @@ -621,20 +615,17 @@ public: | |||
| 621 | // to cope with arbitrary host pointers making their way | 615 | // to cope with arbitrary host pointers making their way |
| 622 | // into things. | 616 | // into things. |
| 623 | 617 | ||
| 624 | void SetUserAddressKey(VAddr key) { | ||
| 625 | address_key = key; | ||
| 626 | address_key_is_kernel = false; | ||
| 627 | } | ||
| 628 | |||
| 629 | void SetUserAddressKey(VAddr key, u32 val) { | 618 | void SetUserAddressKey(VAddr key, u32 val) { |
| 619 | ASSERT(waiting_lock_info == nullptr); | ||
| 630 | address_key = key; | 620 | address_key = key; |
| 631 | address_key_value = val; | 621 | address_key_value = val; |
| 632 | address_key_is_kernel = false; | 622 | is_kernel_address_key = false; |
| 633 | } | 623 | } |
| 634 | 624 | ||
| 635 | void SetKernelAddressKey(VAddr key) { | 625 | void SetKernelAddressKey(VAddr key) { |
| 626 | ASSERT(waiting_lock_info == nullptr); | ||
| 636 | address_key = key; | 627 | address_key = key; |
| 637 | address_key_is_kernel = true; | 628 | is_kernel_address_key = true; |
| 638 | } | 629 | } |
| 639 | 630 | ||
| 640 | void ClearWaitQueue() { | 631 | void ClearWaitQueue() { |
| @@ -646,10 +637,6 @@ public: | |||
| 646 | void EndWait(Result wait_result_); | 637 | void EndWait(Result wait_result_); |
| 647 | void CancelWait(Result wait_result_, bool cancel_timer_task); | 638 | void CancelWait(Result wait_result_, bool cancel_timer_task); |
| 648 | 639 | ||
| 649 | [[nodiscard]] bool HasWaiters() const { | ||
| 650 | return !waiter_list.empty(); | ||
| 651 | } | ||
| 652 | |||
| 653 | [[nodiscard]] s32 GetNumKernelWaiters() const { | 640 | [[nodiscard]] s32 GetNumKernelWaiters() const { |
| 654 | return num_kernel_waiters; | 641 | return num_kernel_waiters; |
| 655 | } | 642 | } |
| @@ -722,13 +709,14 @@ private: | |||
| 722 | }; | 709 | }; |
| 723 | 710 | ||
| 724 | void AddWaiterImpl(KThread* thread); | 711 | void AddWaiterImpl(KThread* thread); |
| 725 | |||
| 726 | void RemoveWaiterImpl(KThread* thread); | 712 | void RemoveWaiterImpl(KThread* thread); |
| 713 | static void RestorePriority(KernelCore& kernel, KThread* thread); | ||
| 727 | 714 | ||
| 728 | void StartTermination(); | 715 | void StartTermination(); |
| 729 | |||
| 730 | void FinishTermination(); | 716 | void FinishTermination(); |
| 731 | 717 | ||
| 718 | void IncreaseBasePriority(s32 priority); | ||
| 719 | |||
| 732 | [[nodiscard]] Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, | 720 | [[nodiscard]] Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, |
| 733 | s32 prio, s32 virt_core, KProcess* owner, ThreadType type); | 721 | s32 prio, s32 virt_core, KProcess* owner, ThreadType type); |
| 734 | 722 | ||
| @@ -737,8 +725,6 @@ private: | |||
| 737 | s32 core, KProcess* owner, ThreadType type, | 725 | s32 core, KProcess* owner, ThreadType type, |
| 738 | std::function<void()>&& init_func); | 726 | std::function<void()>&& init_func); |
| 739 | 727 | ||
| 740 | static void RestorePriority(KernelCore& kernel_ctx, KThread* thread); | ||
| 741 | |||
| 742 | // For core KThread implementation | 728 | // For core KThread implementation |
| 743 | ThreadContext32 thread_context_32{}; | 729 | ThreadContext32 thread_context_32{}; |
| 744 | ThreadContext64 thread_context_64{}; | 730 | ThreadContext64 thread_context_64{}; |
| @@ -749,6 +735,127 @@ private: | |||
| 749 | &KThread::condvar_arbiter_tree_node>; | 735 | &KThread::condvar_arbiter_tree_node>; |
| 750 | using ConditionVariableThreadTree = | 736 | using ConditionVariableThreadTree = |
| 751 | ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>; | 737 | ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>; |
| 738 | |||
| 739 | private: | ||
| 740 | struct LockWithPriorityInheritanceComparator { | ||
| 741 | struct RedBlackKeyType { | ||
| 742 | s32 m_priority; | ||
| 743 | |||
| 744 | constexpr s32 GetPriority() const { | ||
| 745 | return m_priority; | ||
| 746 | } | ||
| 747 | }; | ||
| 748 | |||
| 749 | template <typename T> | ||
| 750 | requires(std::same_as<T, KThread> || std::same_as<T, RedBlackKeyType>) | ||
| 751 | static constexpr int Compare(const T& lhs, const KThread& rhs) { | ||
| 752 | if (lhs.GetPriority() < rhs.GetPriority()) { | ||
| 753 | // Sort by priority. | ||
| 754 | return -1; | ||
| 755 | } else { | ||
| 756 | return 1; | ||
| 757 | } | ||
| 758 | } | ||
| 759 | }; | ||
| 760 | static_assert(std::same_as<Common::RedBlackKeyType<LockWithPriorityInheritanceComparator, void>, | ||
| 761 | LockWithPriorityInheritanceComparator::RedBlackKeyType>); | ||
| 762 | |||
| 763 | using LockWithPriorityInheritanceThreadTreeTraits = | ||
| 764 | Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert< | ||
| 765 | &KThread::condvar_arbiter_tree_node>; | ||
| 766 | using LockWithPriorityInheritanceThreadTree = | ||
| 767 | ConditionVariableThreadTreeTraits::TreeType<LockWithPriorityInheritanceComparator>; | ||
| 768 | |||
| 769 | public: | ||
| 770 | class LockWithPriorityInheritanceInfo : public KSlabAllocated<LockWithPriorityInheritanceInfo>, | ||
| 771 | public boost::intrusive::list_base_hook<> { | ||
| 772 | public: | ||
| 773 | explicit LockWithPriorityInheritanceInfo(KernelCore&) {} | ||
| 774 | |||
| 775 | static LockWithPriorityInheritanceInfo* Create(KernelCore& kernel, VAddr address_key, | ||
| 776 | bool is_kernel_address_key) { | ||
| 777 | // Create a new lock info. | ||
| 778 | auto* new_lock = LockWithPriorityInheritanceInfo::Allocate(kernel); | ||
| 779 | ASSERT(new_lock != nullptr); | ||
| 780 | |||
| 781 | // Set the new lock's address key. | ||
| 782 | new_lock->m_address_key = address_key; | ||
| 783 | new_lock->m_is_kernel_address_key = is_kernel_address_key; | ||
| 784 | |||
| 785 | return new_lock; | ||
| 786 | } | ||
| 787 | |||
| 788 | void SetOwner(KThread* new_owner) { | ||
| 789 | // Set new owner. | ||
| 790 | m_owner = new_owner; | ||
| 791 | } | ||
| 792 | |||
| 793 | void AddWaiter(KThread* waiter) { | ||
| 794 | // Insert the waiter. | ||
| 795 | m_tree.insert(*waiter); | ||
| 796 | m_waiter_count++; | ||
| 797 | |||
| 798 | waiter->SetWaitingLockInfo(this); | ||
| 799 | } | ||
| 800 | |||
| 801 | [[nodiscard]] bool RemoveWaiter(KThread* waiter) { | ||
| 802 | m_tree.erase(m_tree.iterator_to(*waiter)); | ||
| 803 | |||
| 804 | waiter->SetWaitingLockInfo(nullptr); | ||
| 805 | |||
| 806 | return (--m_waiter_count) == 0; | ||
| 807 | } | ||
| 808 | |||
| 809 | KThread* GetHighestPriorityWaiter() { | ||
| 810 | return std::addressof(m_tree.front()); | ||
| 811 | } | ||
| 812 | const KThread* GetHighestPriorityWaiter() const { | ||
| 813 | return std::addressof(m_tree.front()); | ||
| 814 | } | ||
| 815 | |||
| 816 | LockWithPriorityInheritanceThreadTree& GetThreadTree() { | ||
| 817 | return m_tree; | ||
| 818 | } | ||
| 819 | const LockWithPriorityInheritanceThreadTree& GetThreadTree() const { | ||
| 820 | return m_tree; | ||
| 821 | } | ||
| 822 | |||
| 823 | VAddr GetAddressKey() const { | ||
| 824 | return m_address_key; | ||
| 825 | } | ||
| 826 | bool GetIsKernelAddressKey() const { | ||
| 827 | return m_is_kernel_address_key; | ||
| 828 | } | ||
| 829 | KThread* GetOwner() const { | ||
| 830 | return m_owner; | ||
| 831 | } | ||
| 832 | u32 GetWaiterCount() const { | ||
| 833 | return m_waiter_count; | ||
| 834 | } | ||
| 835 | |||
| 836 | private: | ||
| 837 | LockWithPriorityInheritanceThreadTree m_tree{}; | ||
| 838 | VAddr m_address_key{}; | ||
| 839 | KThread* m_owner{}; | ||
| 840 | u32 m_waiter_count{}; | ||
| 841 | bool m_is_kernel_address_key{}; | ||
| 842 | }; | ||
| 843 | |||
| 844 | void SetWaitingLockInfo(LockWithPriorityInheritanceInfo* lock) { | ||
| 845 | waiting_lock_info = lock; | ||
| 846 | } | ||
| 847 | |||
| 848 | LockWithPriorityInheritanceInfo* GetWaitingLockInfo() { | ||
| 849 | return waiting_lock_info; | ||
| 850 | } | ||
| 851 | |||
| 852 | void AddHeldLock(LockWithPriorityInheritanceInfo* lock_info); | ||
| 853 | LockWithPriorityInheritanceInfo* FindHeldLock(VAddr address_key); | ||
| 854 | |||
| 855 | private: | ||
| 856 | using LockWithPriorityInheritanceInfoList = | ||
| 857 | boost::intrusive::list<LockWithPriorityInheritanceInfo>; | ||
| 858 | |||
| 752 | ConditionVariableThreadTree* condvar_tree{}; | 859 | ConditionVariableThreadTree* condvar_tree{}; |
| 753 | u64 condvar_key{}; | 860 | u64 condvar_key{}; |
| 754 | u64 virtual_affinity_mask{}; | 861 | u64 virtual_affinity_mask{}; |
| @@ -765,9 +872,9 @@ private: | |||
| 765 | s64 last_scheduled_tick{}; | 872 | s64 last_scheduled_tick{}; |
| 766 | std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{}; | 873 | std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{}; |
| 767 | KThreadQueue* wait_queue{}; | 874 | KThreadQueue* wait_queue{}; |
| 768 | WaiterList waiter_list{}; | 875 | LockWithPriorityInheritanceInfoList held_lock_info_list{}; |
| 876 | LockWithPriorityInheritanceInfo* waiting_lock_info{}; | ||
| 769 | WaiterList pinned_waiter_list{}; | 877 | WaiterList pinned_waiter_list{}; |
| 770 | KThread* lock_owner{}; | ||
| 771 | u32 address_key_value{}; | 878 | u32 address_key_value{}; |
| 772 | u32 suspend_request_flags{}; | 879 | u32 suspend_request_flags{}; |
| 773 | u32 suspend_allowed_flags{}; | 880 | u32 suspend_allowed_flags{}; |
| @@ -791,7 +898,7 @@ private: | |||
| 791 | bool debug_attached{}; | 898 | bool debug_attached{}; |
| 792 | s8 priority_inheritance_count{}; | 899 | s8 priority_inheritance_count{}; |
| 793 | bool resource_limit_release_hint{}; | 900 | bool resource_limit_release_hint{}; |
| 794 | bool address_key_is_kernel{}; | 901 | bool is_kernel_address_key{}; |
| 795 | StackParameters stack_parameters{}; | 902 | StackParameters stack_parameters{}; |
| 796 | Common::SpinLock context_guard{}; | 903 | Common::SpinLock context_guard{}; |
| 797 | 904 | ||
| @@ -814,6 +921,7 @@ public: | |||
| 814 | 921 | ||
| 815 | void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key, | 922 | void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key, |
| 816 | u32 value) { | 923 | u32 value) { |
| 924 | ASSERT(waiting_lock_info == nullptr); | ||
| 817 | condvar_tree = tree; | 925 | condvar_tree = tree; |
| 818 | condvar_key = cv_key; | 926 | condvar_key = cv_key; |
| 819 | address_key = address; | 927 | address_key = address; |
| @@ -829,6 +937,7 @@ public: | |||
| 829 | } | 937 | } |
| 830 | 938 | ||
| 831 | void SetAddressArbiter(ConditionVariableThreadTree* tree, u64 address) { | 939 | void SetAddressArbiter(ConditionVariableThreadTree* tree, u64 address) { |
| 940 | ASSERT(waiting_lock_info == nullptr); | ||
| 832 | condvar_tree = tree; | 941 | condvar_tree = tree; |
| 833 | condvar_key = address; | 942 | condvar_key = address; |
| 834 | } | 943 | } |
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index ce94d3605..ef7057ff7 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -1318,4 +1318,97 @@ const Core::System& KernelCore::System() const { | |||
| 1318 | return impl->system; | 1318 | return impl->system; |
| 1319 | } | 1319 | } |
| 1320 | 1320 | ||
| 1321 | struct KernelCore::SlabHeapContainer { | ||
| 1322 | KSlabHeap<KClientSession> client_session; | ||
| 1323 | KSlabHeap<KEvent> event; | ||
| 1324 | KSlabHeap<KLinkedListNode> linked_list_node; | ||
| 1325 | KSlabHeap<KPort> port; | ||
| 1326 | KSlabHeap<KProcess> process; | ||
| 1327 | KSlabHeap<KResourceLimit> resource_limit; | ||
| 1328 | KSlabHeap<KSession> session; | ||
| 1329 | KSlabHeap<KSharedMemory> shared_memory; | ||
| 1330 | KSlabHeap<KSharedMemoryInfo> shared_memory_info; | ||
| 1331 | KSlabHeap<KThread> thread; | ||
| 1332 | KSlabHeap<KTransferMemory> transfer_memory; | ||
| 1333 | KSlabHeap<KCodeMemory> code_memory; | ||
| 1334 | KSlabHeap<KDeviceAddressSpace> device_address_space; | ||
| 1335 | KSlabHeap<KPageBuffer> page_buffer; | ||
| 1336 | KSlabHeap<KThreadLocalPage> thread_local_page; | ||
| 1337 | KSlabHeap<KObjectName> object_name; | ||
| 1338 | KSlabHeap<KSessionRequest> session_request; | ||
| 1339 | KSlabHeap<KSecureSystemResource> secure_system_resource; | ||
| 1340 | KSlabHeap<KThread::LockWithPriorityInheritanceInfo> lock_info; | ||
| 1341 | KSlabHeap<KEventInfo> event_info; | ||
| 1342 | KSlabHeap<KDebug> debug; | ||
| 1343 | }; | ||
| 1344 | |||
| 1345 | template <typename T> | ||
| 1346 | KSlabHeap<T>& KernelCore::SlabHeap() { | ||
| 1347 | if constexpr (std::is_same_v<T, KClientSession>) { | ||
| 1348 | return slab_heap_container->client_session; | ||
| 1349 | } else if constexpr (std::is_same_v<T, KEvent>) { | ||
| 1350 | return slab_heap_container->event; | ||
| 1351 | } else if constexpr (std::is_same_v<T, KLinkedListNode>) { | ||
| 1352 | return slab_heap_container->linked_list_node; | ||
| 1353 | } else if constexpr (std::is_same_v<T, KPort>) { | ||
| 1354 | return slab_heap_container->port; | ||
| 1355 | } else if constexpr (std::is_same_v<T, KProcess>) { | ||
| 1356 | return slab_heap_container->process; | ||
| 1357 | } else if constexpr (std::is_same_v<T, KResourceLimit>) { | ||
| 1358 | return slab_heap_container->resource_limit; | ||
| 1359 | } else if constexpr (std::is_same_v<T, KSession>) { | ||
| 1360 | return slab_heap_container->session; | ||
| 1361 | } else if constexpr (std::is_same_v<T, KSharedMemory>) { | ||
| 1362 | return slab_heap_container->shared_memory; | ||
| 1363 | } else if constexpr (std::is_same_v<T, KSharedMemoryInfo>) { | ||
| 1364 | return slab_heap_container->shared_memory_info; | ||
| 1365 | } else if constexpr (std::is_same_v<T, KThread>) { | ||
| 1366 | return slab_heap_container->thread; | ||
| 1367 | } else if constexpr (std::is_same_v<T, KTransferMemory>) { | ||
| 1368 | return slab_heap_container->transfer_memory; | ||
| 1369 | } else if constexpr (std::is_same_v<T, KCodeMemory>) { | ||
| 1370 | return slab_heap_container->code_memory; | ||
| 1371 | } else if constexpr (std::is_same_v<T, KDeviceAddressSpace>) { | ||
| 1372 | return slab_heap_container->device_address_space; | ||
| 1373 | } else if constexpr (std::is_same_v<T, KPageBuffer>) { | ||
| 1374 | return slab_heap_container->page_buffer; | ||
| 1375 | } else if constexpr (std::is_same_v<T, KThreadLocalPage>) { | ||
| 1376 | return slab_heap_container->thread_local_page; | ||
| 1377 | } else if constexpr (std::is_same_v<T, KObjectName>) { | ||
| 1378 | return slab_heap_container->object_name; | ||
| 1379 | } else if constexpr (std::is_same_v<T, KSessionRequest>) { | ||
| 1380 | return slab_heap_container->session_request; | ||
| 1381 | } else if constexpr (std::is_same_v<T, KSecureSystemResource>) { | ||
| 1382 | return slab_heap_container->secure_system_resource; | ||
| 1383 | } else if constexpr (std::is_same_v<T, KThread::LockWithPriorityInheritanceInfo>) { | ||
| 1384 | return slab_heap_container->lock_info; | ||
| 1385 | } else if constexpr (std::is_same_v<T, KEventInfo>) { | ||
| 1386 | return slab_heap_container->event_info; | ||
| 1387 | } else if constexpr (std::is_same_v<T, KDebug>) { | ||
| 1388 | return slab_heap_container->debug; | ||
| 1389 | } | ||
| 1390 | } | ||
| 1391 | |||
| 1392 | template KSlabHeap<KClientSession>& KernelCore::SlabHeap(); | ||
| 1393 | template KSlabHeap<KEvent>& KernelCore::SlabHeap(); | ||
| 1394 | template KSlabHeap<KLinkedListNode>& KernelCore::SlabHeap(); | ||
| 1395 | template KSlabHeap<KPort>& KernelCore::SlabHeap(); | ||
| 1396 | template KSlabHeap<KProcess>& KernelCore::SlabHeap(); | ||
| 1397 | template KSlabHeap<KResourceLimit>& KernelCore::SlabHeap(); | ||
| 1398 | template KSlabHeap<KSession>& KernelCore::SlabHeap(); | ||
| 1399 | template KSlabHeap<KSharedMemory>& KernelCore::SlabHeap(); | ||
| 1400 | template KSlabHeap<KSharedMemoryInfo>& KernelCore::SlabHeap(); | ||
| 1401 | template KSlabHeap<KThread>& KernelCore::SlabHeap(); | ||
| 1402 | template KSlabHeap<KTransferMemory>& KernelCore::SlabHeap(); | ||
| 1403 | template KSlabHeap<KCodeMemory>& KernelCore::SlabHeap(); | ||
| 1404 | template KSlabHeap<KDeviceAddressSpace>& KernelCore::SlabHeap(); | ||
| 1405 | template KSlabHeap<KPageBuffer>& KernelCore::SlabHeap(); | ||
| 1406 | template KSlabHeap<KThreadLocalPage>& KernelCore::SlabHeap(); | ||
| 1407 | template KSlabHeap<KObjectName>& KernelCore::SlabHeap(); | ||
| 1408 | template KSlabHeap<KSessionRequest>& KernelCore::SlabHeap(); | ||
| 1409 | template KSlabHeap<KSecureSystemResource>& KernelCore::SlabHeap(); | ||
| 1410 | template KSlabHeap<KThread::LockWithPriorityInheritanceInfo>& KernelCore::SlabHeap(); | ||
| 1411 | template KSlabHeap<KEventInfo>& KernelCore::SlabHeap(); | ||
| 1412 | template KSlabHeap<KDebug>& KernelCore::SlabHeap(); | ||
| 1413 | |||
| 1321 | } // namespace Kernel | 1414 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 4449f6949..1b380a07b 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h | |||
| @@ -305,49 +305,7 @@ public: | |||
| 305 | 305 | ||
| 306 | /// Gets the slab heap for the specified kernel object type. | 306 | /// Gets the slab heap for the specified kernel object type. |
| 307 | template <typename T> | 307 | template <typename T> |
| 308 | KSlabHeap<T>& SlabHeap() { | 308 | KSlabHeap<T>& SlabHeap(); |
| 309 | if constexpr (std::is_same_v<T, KClientSession>) { | ||
| 310 | return slab_heap_container->client_session; | ||
| 311 | } else if constexpr (std::is_same_v<T, KEvent>) { | ||
| 312 | return slab_heap_container->event; | ||
| 313 | } else if constexpr (std::is_same_v<T, KLinkedListNode>) { | ||
| 314 | return slab_heap_container->linked_list_node; | ||
| 315 | } else if constexpr (std::is_same_v<T, KPort>) { | ||
| 316 | return slab_heap_container->port; | ||
| 317 | } else if constexpr (std::is_same_v<T, KProcess>) { | ||
| 318 | return slab_heap_container->process; | ||
| 319 | } else if constexpr (std::is_same_v<T, KResourceLimit>) { | ||
| 320 | return slab_heap_container->resource_limit; | ||
| 321 | } else if constexpr (std::is_same_v<T, KSession>) { | ||
| 322 | return slab_heap_container->session; | ||
| 323 | } else if constexpr (std::is_same_v<T, KSharedMemory>) { | ||
| 324 | return slab_heap_container->shared_memory; | ||
| 325 | } else if constexpr (std::is_same_v<T, KSharedMemoryInfo>) { | ||
| 326 | return slab_heap_container->shared_memory_info; | ||
| 327 | } else if constexpr (std::is_same_v<T, KThread>) { | ||
| 328 | return slab_heap_container->thread; | ||
| 329 | } else if constexpr (std::is_same_v<T, KTransferMemory>) { | ||
| 330 | return slab_heap_container->transfer_memory; | ||
| 331 | } else if constexpr (std::is_same_v<T, KCodeMemory>) { | ||
| 332 | return slab_heap_container->code_memory; | ||
| 333 | } else if constexpr (std::is_same_v<T, KDeviceAddressSpace>) { | ||
| 334 | return slab_heap_container->device_address_space; | ||
| 335 | } else if constexpr (std::is_same_v<T, KPageBuffer>) { | ||
| 336 | return slab_heap_container->page_buffer; | ||
| 337 | } else if constexpr (std::is_same_v<T, KThreadLocalPage>) { | ||
| 338 | return slab_heap_container->thread_local_page; | ||
| 339 | } else if constexpr (std::is_same_v<T, KObjectName>) { | ||
| 340 | return slab_heap_container->object_name; | ||
| 341 | } else if constexpr (std::is_same_v<T, KSessionRequest>) { | ||
| 342 | return slab_heap_container->session_request; | ||
| 343 | } else if constexpr (std::is_same_v<T, KSecureSystemResource>) { | ||
| 344 | return slab_heap_container->secure_system_resource; | ||
| 345 | } else if constexpr (std::is_same_v<T, KEventInfo>) { | ||
| 346 | return slab_heap_container->event_info; | ||
| 347 | } else if constexpr (std::is_same_v<T, KDebug>) { | ||
| 348 | return slab_heap_container->debug; | ||
| 349 | } | ||
| 350 | } | ||
| 351 | 309 | ||
| 352 | /// Gets the current slab resource counts. | 310 | /// Gets the current slab resource counts. |
| 353 | Init::KSlabResourceCounts& SlabResourceCounts(); | 311 | Init::KSlabResourceCounts& SlabResourceCounts(); |
| @@ -393,28 +351,7 @@ private: | |||
| 393 | 351 | ||
| 394 | private: | 352 | private: |
| 395 | /// Helper to encapsulate all slab heaps in a single heap allocated container | 353 | /// Helper to encapsulate all slab heaps in a single heap allocated container |
| 396 | struct SlabHeapContainer { | 354 | struct SlabHeapContainer; |
| 397 | KSlabHeap<KClientSession> client_session; | ||
| 398 | KSlabHeap<KEvent> event; | ||
| 399 | KSlabHeap<KLinkedListNode> linked_list_node; | ||
| 400 | KSlabHeap<KPort> port; | ||
| 401 | KSlabHeap<KProcess> process; | ||
| 402 | KSlabHeap<KResourceLimit> resource_limit; | ||
| 403 | KSlabHeap<KSession> session; | ||
| 404 | KSlabHeap<KSharedMemory> shared_memory; | ||
| 405 | KSlabHeap<KSharedMemoryInfo> shared_memory_info; | ||
| 406 | KSlabHeap<KThread> thread; | ||
| 407 | KSlabHeap<KTransferMemory> transfer_memory; | ||
| 408 | KSlabHeap<KCodeMemory> code_memory; | ||
| 409 | KSlabHeap<KDeviceAddressSpace> device_address_space; | ||
| 410 | KSlabHeap<KPageBuffer> page_buffer; | ||
| 411 | KSlabHeap<KThreadLocalPage> thread_local_page; | ||
| 412 | KSlabHeap<KObjectName> object_name; | ||
| 413 | KSlabHeap<KSessionRequest> session_request; | ||
| 414 | KSlabHeap<KSecureSystemResource> secure_system_resource; | ||
| 415 | KSlabHeap<KEventInfo> event_info; | ||
| 416 | KSlabHeap<KDebug> debug; | ||
| 417 | }; | ||
| 418 | 355 | ||
| 419 | std::unique_ptr<SlabHeapContainer> slab_heap_container; | 356 | std::unique_ptr<SlabHeapContainer> slab_heap_container; |
| 420 | }; | 357 | }; |