summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar Liam2023-03-06 22:31:50 -0500
committerGravatar Liam2023-03-12 22:06:53 -0400
commit7322c99e5fd335aa144620dff7f4d7a71932d35e (patch)
tree69d417b94fe2bb0ea1d42b07e36c121a62c597dc /src
parentkernel: convert KMemoryLayout, KMemoryRegion*, KPageTableSlabHeap, KPriorityQ... (diff)
downloadyuzu-7322c99e5fd335aa144620dff7f4d7a71932d35e.tar.gz
yuzu-7322c99e5fd335aa144620dff7f4d7a71932d35e.tar.xz
yuzu-7322c99e5fd335aa144620dff7f4d7a71932d35e.zip
kernel: convert KAbstractSchedulerLock
Diffstat (limited to 'src')
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h55
1 files changed, 24 insertions, 31 deletions
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
index 13463717f..caa1404f1 100644
--- a/src/core/hle/kernel/k_scheduler_lock.h
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -14,74 +14,67 @@
14namespace Kernel { 14namespace Kernel {
15 15
16class KernelCore; 16class KernelCore;
17class GlobalSchedulerContext;
17 18
18template <typename SchedulerType> 19template <typename SchedulerType>
19class KAbstractSchedulerLock { 20class KAbstractSchedulerLock {
20public: 21public:
21 explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {} 22 explicit KAbstractSchedulerLock(KernelCore& kernel) : m_kernel{kernel} {}
22 23
23 bool IsLockedByCurrentThread() const { 24 bool IsLockedByCurrentThread() const {
24 return owner_thread == GetCurrentThreadPointer(kernel); 25 return m_owner_thread == GetCurrentThreadPointer(m_kernel);
25 } 26 }
26 27
27 void Lock() { 28 void Lock() {
28 // If we are shutting down the kernel, none of this is relevant anymore. 29 if (this->IsLockedByCurrentThread()) {
29 if (kernel.IsShuttingDown()) {
30 return;
31 }
32
33 if (IsLockedByCurrentThread()) {
34 // If we already own the lock, the lock count should be > 0. 30 // If we already own the lock, the lock count should be > 0.
35 // For debug, ensure this is true. 31 // For debug, ensure this is true.
36 ASSERT(lock_count > 0); 32 ASSERT(m_lock_count > 0);
37 } else { 33 } else {
38 // Otherwise, we want to disable scheduling and acquire the spinlock. 34 // Otherwise, we want to disable scheduling and acquire the spinlock.
39 SchedulerType::DisableScheduling(kernel); 35 SchedulerType::DisableScheduling(m_kernel);
40 spin_lock.Lock(); 36 m_spin_lock.Lock();
41 37
42 ASSERT(lock_count == 0); 38 ASSERT(m_lock_count == 0);
43 ASSERT(owner_thread == nullptr); 39 ASSERT(m_owner_thread == nullptr);
44 40
45 // Take ownership of the lock. 41 // Take ownership of the lock.
46 owner_thread = GetCurrentThreadPointer(kernel); 42 m_owner_thread = GetCurrentThreadPointer(m_kernel);
47 } 43 }
48 44
49 // Increment the lock count. 45 // Increment the lock count.
50 lock_count++; 46 m_lock_count++;
51 } 47 }
52 48
53 void Unlock() { 49 void Unlock() {
54 // If we are shutting down the kernel, none of this is relevant anymore. 50 ASSERT(this->IsLockedByCurrentThread());
55 if (kernel.IsShuttingDown()) { 51 ASSERT(m_lock_count > 0);
56 return;
57 }
58
59 ASSERT(IsLockedByCurrentThread());
60 ASSERT(lock_count > 0);
61 52
62 // Release an instance of the lock. 53 // Release an instance of the lock.
63 if ((--lock_count) == 0) { 54 if ((--m_lock_count) == 0) {
64 // Perform a memory barrier here. 55 // Perform a memory barrier here.
65 std::atomic_thread_fence(std::memory_order_seq_cst); 56 std::atomic_thread_fence(std::memory_order_seq_cst);
66 57
67 // We're no longer going to hold the lock. Take note of what cores need scheduling. 58 // We're no longer going to hold the lock. Take note of what cores need scheduling.
68 const u64 cores_needing_scheduling = 59 const u64 cores_needing_scheduling =
69 SchedulerType::UpdateHighestPriorityThreads(kernel); 60 SchedulerType::UpdateHighestPriorityThreads(m_kernel);
70 61
71 // Note that we no longer hold the lock, and unlock the spinlock. 62 // Note that we no longer hold the lock, and unlock the spinlock.
72 owner_thread = nullptr; 63 m_owner_thread = nullptr;
73 spin_lock.Unlock(); 64 m_spin_lock.Unlock();
74 65
75 // Enable scheduling, and perform a rescheduling operation. 66 // Enable scheduling, and perform a rescheduling operation.
76 SchedulerType::EnableScheduling(kernel, cores_needing_scheduling); 67 SchedulerType::EnableScheduling(m_kernel, cores_needing_scheduling);
77 } 68 }
78 } 69 }
79 70
80private: 71private:
81 KernelCore& kernel; 72 friend class GlobalSchedulerContext;
82 KAlignedSpinLock spin_lock{}; 73
83 s32 lock_count{}; 74 KernelCore& m_kernel;
84 std::atomic<KThread*> owner_thread{}; 75 KAlignedSpinLock m_spin_lock{};
76 s32 m_lock_count{};
77 std::atomic<KThread*> m_owner_thread{};
85}; 78};
86 79
87} // namespace Kernel 80} // namespace Kernel