summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar Liam2023-03-07 10:49:41 -0500
committerGravatar Liam2023-03-12 22:09:09 -0400
commitc0b9e93b77cca0e5fbd455efc5dec10199ef8184 (patch)
tree2e6e76c5075bc46695b9c8a178deaadc11b3eb99 /src
parentkernel: remove gratitutous attribute usage (diff)
downloadyuzu-c0b9e93b77cca0e5fbd455efc5dec10199ef8184.tar.gz
yuzu-c0b9e93b77cca0e5fbd455efc5dec10199ef8184.tar.xz
yuzu-c0b9e93b77cca0e5fbd455efc5dec10199ef8184.zip
kernel: remove kernel_
Diffstat (limited to 'src')
-rw-r--r--src/core/hle/kernel/k_auto_object.cpp4
-rw-r--r--src/core/hle/kernel/k_auto_object.h6
-rw-r--r--src/core/hle/kernel/k_client_port.cpp12
-rw-r--r--src/core/hle/kernel/k_client_session.cpp7
-rw-r--r--src/core/hle/kernel/k_client_session.h2
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp14
-rw-r--r--src/core/hle/kernel/k_code_memory.h2
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp8
-rw-r--r--src/core/hle/kernel/k_debug.h2
-rw-r--r--src/core/hle/kernel/k_device_address_space.cpp4
-rw-r--r--src/core/hle/kernel/k_event.cpp8
-rw-r--r--src/core/hle/kernel/k_event.h2
-rw-r--r--src/core/hle/kernel/k_port.cpp12
-rw-r--r--src/core/hle/kernel/k_port.h2
-rw-r--r--src/core/hle/kernel/k_process.cpp68
-rw-r--r--src/core/hle/kernel/k_process.h2
-rw-r--r--src/core/hle/kernel/k_readable_event.cpp10
-rw-r--r--src/core/hle/kernel/k_readable_event.h2
-rw-r--r--src/core/hle/kernel/k_resource_limit.cpp4
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp62
-rw-r--r--src/core/hle/kernel/k_scheduler.h2
-rw-r--r--src/core/hle/kernel/k_server_port.cpp8
-rw-r--r--src/core/hle/kernel/k_server_port.h2
-rw-r--r--src/core/hle/kernel/k_server_session.cpp40
-rw-r--r--src/core/hle/kernel/k_server_session.h2
-rw-r--r--src/core/hle/kernel/k_session.cpp6
-rw-r--r--src/core/hle/kernel/k_session.h2
-rw-r--r--src/core/hle/kernel/k_session_request.h6
-rw-r--r--src/core/hle/kernel/k_shared_memory.cpp8
-rw-r--r--src/core/hle/kernel/k_shared_memory.h2
-rw-r--r--src/core/hle/kernel/k_synchronization_object.cpp11
-rw-r--r--src/core/hle/kernel/k_system_resource.h6
-rw-r--r--src/core/hle/kernel/k_thread.cpp200
-rw-r--r--src/core/hle/kernel/k_thread.h12
-rw-r--r--src/core/hle/kernel/k_thread_queue.h2
-rw-r--r--src/core/hle/kernel/k_transfer_memory.cpp16
-rw-r--r--src/core/hle/kernel/k_transfer_memory.h4
-rw-r--r--src/core/hle/kernel/k_worker_task.h2
-rw-r--r--src/core/hle/kernel/k_worker_task_manager.cpp2
-rw-r--r--src/core/hle/kernel/k_worker_task_manager.h2
-rw-r--r--src/core/hle/kernel/slab_helpers.h17
41 files changed, 290 insertions, 295 deletions
diff --git a/src/core/hle/kernel/k_auto_object.cpp b/src/core/hle/kernel/k_auto_object.cpp
index 691af8ccb..0ae42c95c 100644
--- a/src/core/hle/kernel/k_auto_object.cpp
+++ b/src/core/hle/kernel/k_auto_object.cpp
@@ -12,11 +12,11 @@ KAutoObject* KAutoObject::Create(KAutoObject* obj) {
12} 12}
13 13
14void KAutoObject::RegisterWithKernel() { 14void KAutoObject::RegisterWithKernel() {
15 kernel.RegisterKernelObject(this); 15 m_kernel.RegisterKernelObject(this);
16} 16}
17 17
18void KAutoObject::UnregisterWithKernel() { 18void KAutoObject::UnregisterWithKernel() {
19 kernel.UnregisterKernelObject(this); 19 m_kernel.UnregisterKernelObject(this);
20} 20}
21 21
22} // namespace Kernel 22} // namespace Kernel
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h
index 2443ab2a5..edb9cf071 100644
--- a/src/core/hle/kernel/k_auto_object.h
+++ b/src/core/hle/kernel/k_auto_object.h
@@ -80,7 +80,7 @@ private:
80 KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const); 80 KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const);
81 81
82public: 82public:
83 explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) { 83 explicit KAutoObject(KernelCore& kernel) : m_kernel(kernel) {
84 RegisterWithKernel(); 84 RegisterWithKernel();
85 } 85 }
86 virtual ~KAutoObject() = default; 86 virtual ~KAutoObject() = default;
@@ -169,7 +169,7 @@ private:
169 void UnregisterWithKernel(); 169 void UnregisterWithKernel();
170 170
171protected: 171protected:
172 KernelCore& kernel; 172 KernelCore& m_kernel;
173 173
174private: 174private:
175 std::atomic<u32> m_ref_count{}; 175 std::atomic<u32> m_ref_count{};
@@ -179,7 +179,7 @@ class KAutoObjectWithListContainer;
179 179
180class KAutoObjectWithList : public KAutoObject, public boost::intrusive::set_base_hook<> { 180class KAutoObjectWithList : public KAutoObject, public boost::intrusive::set_base_hook<> {
181public: 181public:
182 explicit KAutoObjectWithList(KernelCore& kernel_) : KAutoObject(kernel_) {} 182 explicit KAutoObjectWithList(KernelCore& kernel) : KAutoObject(kernel) {}
183 183
184 static int Compare(const KAutoObjectWithList& lhs, const KAutoObjectWithList& rhs) { 184 static int Compare(const KAutoObjectWithList& lhs, const KAutoObjectWithList& rhs) {
185 const u64 lid = lhs.GetId(); 185 const u64 lid = lhs.GetId();
diff --git a/src/core/hle/kernel/k_client_port.cpp b/src/core/hle/kernel/k_client_port.cpp
index 7a3d650fd..40e09e532 100644
--- a/src/core/hle/kernel/k_client_port.cpp
+++ b/src/core/hle/kernel/k_client_port.cpp
@@ -11,7 +11,7 @@
11 11
12namespace Kernel { 12namespace Kernel {
13 13
14KClientPort::KClientPort(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} 14KClientPort::KClientPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
15KClientPort::~KClientPort() = default; 15KClientPort::~KClientPort() = default;
16 16
17void KClientPort::Initialize(KPort* parent, s32 max_sessions) { 17void KClientPort::Initialize(KPort* parent, s32 max_sessions) {
@@ -23,7 +23,7 @@ void KClientPort::Initialize(KPort* parent, s32 max_sessions) {
23} 23}
24 24
25void KClientPort::OnSessionFinalized() { 25void KClientPort::OnSessionFinalized() {
26 KScopedSchedulerLock sl{kernel}; 26 KScopedSchedulerLock sl{m_kernel};
27 27
28 if (const auto prev = m_num_sessions--; prev == m_max_sessions) { 28 if (const auto prev = m_num_sessions--; prev == m_max_sessions) {
29 this->NotifyAvailable(); 29 this->NotifyAvailable();
@@ -58,12 +58,12 @@ Result KClientPort::CreateSession(KClientSession** out) {
58 58
59 // Reserve a new session from the resource limit. 59 // Reserve a new session from the resource limit.
60 //! FIXME: we are reserving this from the wrong resource limit! 60 //! FIXME: we are reserving this from the wrong resource limit!
61 KScopedResourceReservation session_reservation(kernel.ApplicationProcess()->GetResourceLimit(), 61 KScopedResourceReservation session_reservation(
62 LimitableResource::SessionCountMax); 62 m_kernel.ApplicationProcess()->GetResourceLimit(), LimitableResource::SessionCountMax);
63 R_UNLESS(session_reservation.Succeeded(), ResultLimitReached); 63 R_UNLESS(session_reservation.Succeeded(), ResultLimitReached);
64 64
65 // Allocate a session normally. 65 // Allocate a session normally.
66 session = KSession::Create(kernel); 66 session = KSession::Create(m_kernel);
67 67
68 // Check that we successfully created a session. 68 // Check that we successfully created a session.
69 R_UNLESS(session != nullptr, ResultOutOfResource); 69 R_UNLESS(session != nullptr, ResultOutOfResource);
@@ -105,7 +105,7 @@ Result KClientPort::CreateSession(KClientSession** out) {
105 session_reservation.Commit(); 105 session_reservation.Commit();
106 106
107 // Register the session. 107 // Register the session.
108 KSession::Register(kernel, session); 108 KSession::Register(m_kernel, session);
109 ON_RESULT_FAILURE { 109 ON_RESULT_FAILURE {
110 session->GetClientSession().Close(); 110 session->GetClientSession().Close();
111 session->GetServerSession().Close(); 111 session->GetServerSession().Close();
diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp
index c9196d04b..62a8fab45 100644
--- a/src/core/hle/kernel/k_client_session.cpp
+++ b/src/core/hle/kernel/k_client_session.cpp
@@ -12,8 +12,7 @@ namespace Kernel {
12 12
13static constexpr u32 MessageBufferSize = 0x100; 13static constexpr u32 MessageBufferSize = 0x100;
14 14
15KClientSession::KClientSession(KernelCore& kernel_) 15KClientSession::KClientSession(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
16 : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
17KClientSession::~KClientSession() = default; 16KClientSession::~KClientSession() = default;
18 17
19void KClientSession::Destroy() { 18void KClientSession::Destroy() {
@@ -25,12 +24,12 @@ void KClientSession::OnServerClosed() {}
25 24
26Result KClientSession::SendSyncRequest() { 25Result KClientSession::SendSyncRequest() {
27 // Create a session request. 26 // Create a session request.
28 KSessionRequest* request = KSessionRequest::Create(kernel); 27 KSessionRequest* request = KSessionRequest::Create(m_kernel);
29 R_UNLESS(request != nullptr, ResultOutOfResource); 28 R_UNLESS(request != nullptr, ResultOutOfResource);
30 SCOPE_EXIT({ request->Close(); }); 29 SCOPE_EXIT({ request->Close(); });
31 30
32 // Initialize the request. 31 // Initialize the request.
33 request->Initialize(nullptr, GetCurrentThread(kernel).GetTLSAddress(), MessageBufferSize); 32 request->Initialize(nullptr, GetCurrentThread(m_kernel).GetTLSAddress(), MessageBufferSize);
34 33
35 // Send the request. 34 // Send the request.
36 R_RETURN(m_parent->GetServerSession().OnRequest(request)); 35 R_RETURN(m_parent->GetServerSession().OnRequest(request));
diff --git a/src/core/hle/kernel/k_client_session.h b/src/core/hle/kernel/k_client_session.h
index ecde2549c..9b62e55e4 100644
--- a/src/core/hle/kernel/k_client_session.h
+++ b/src/core/hle/kernel/k_client_session.h
@@ -30,7 +30,7 @@ class KClientSession final
30 KERNEL_AUTOOBJECT_TRAITS(KClientSession, KAutoObject); 30 KERNEL_AUTOOBJECT_TRAITS(KClientSession, KAutoObject);
31 31
32public: 32public:
33 explicit KClientSession(KernelCore& kernel_); 33 explicit KClientSession(KernelCore& kernel);
34 ~KClientSession() override; 34 ~KClientSession() override;
35 35
36 void Initialize(KSession* parent) { 36 void Initialize(KSession* parent) {
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
index 4167ade2b..89df6b5d8 100644
--- a/src/core/hle/kernel/k_code_memory.cpp
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -16,18 +16,18 @@
16 16
17namespace Kernel { 17namespace Kernel {
18 18
19KCodeMemory::KCodeMemory(KernelCore& kernel_) 19KCodeMemory::KCodeMemory(KernelCore& kernel)
20 : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_lock(kernel_) {} 20 : KAutoObjectWithSlabHeapAndContainer{kernel}, m_lock(kernel) {}
21 21
22Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) { 22Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
23 // Set members. 23 // Set members.
24 m_owner = GetCurrentProcessPointer(kernel); 24 m_owner = GetCurrentProcessPointer(m_kernel);
25 25
26 // Get the owner page table. 26 // Get the owner page table.
27 auto& page_table = m_owner->PageTable(); 27 auto& page_table = m_owner->PageTable();
28 28
29 // Construct the page group. 29 // Construct the page group.
30 m_page_group.emplace(kernel, page_table.GetBlockInfoManager()); 30 m_page_group.emplace(m_kernel, page_table.GetBlockInfoManager());
31 31
32 // Lock the memory. 32 // Lock the memory.
33 R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size)) 33 R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size))
@@ -74,7 +74,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
74 R_UNLESS(!m_is_mapped, ResultInvalidState); 74 R_UNLESS(!m_is_mapped, ResultInvalidState);
75 75
76 // Map the memory. 76 // Map the memory.
77 R_TRY(GetCurrentProcess(kernel).PageTable().MapPageGroup( 77 R_TRY(GetCurrentProcess(m_kernel).PageTable().MapPageGroup(
78 address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); 78 address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
79 79
80 // Mark ourselves as mapped. 80 // Mark ourselves as mapped.
@@ -91,8 +91,8 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
91 KScopedLightLock lk(m_lock); 91 KScopedLightLock lk(m_lock);
92 92
93 // Unmap the memory. 93 // Unmap the memory.
94 R_TRY(GetCurrentProcess(kernel).PageTable().UnmapPageGroup(address, *m_page_group, 94 R_TRY(GetCurrentProcess(m_kernel).PageTable().UnmapPageGroup(address, *m_page_group,
95 KMemoryState::CodeOut)); 95 KMemoryState::CodeOut));
96 96
97 // Mark ourselves as unmapped. 97 // Mark ourselves as unmapped.
98 m_is_mapped = false; 98 m_is_mapped = false;
diff --git a/src/core/hle/kernel/k_code_memory.h b/src/core/hle/kernel/k_code_memory.h
index fa63c18df..23cbb283b 100644
--- a/src/core/hle/kernel/k_code_memory.h
+++ b/src/core/hle/kernel/k_code_memory.h
@@ -29,7 +29,7 @@ class KCodeMemory final
29 KERNEL_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject); 29 KERNEL_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject);
30 30
31public: 31public:
32 explicit KCodeMemory(KernelCore& kernel_); 32 explicit KCodeMemory(KernelCore& kernel);
33 33
34 Result Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size); 34 Result Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
35 void Finalize() override; 35 void Finalize() override;
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index 0dc01f6aa..067f26fba 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -57,8 +57,8 @@ bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero
57 57
58class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue { 58class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue {
59public: 59public:
60 explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_) 60 explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel)
61 : KThreadQueue(kernel_) {} 61 : KThreadQueue(kernel) {}
62 62
63 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override { 63 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
64 // Remove the thread as a waiter from its owner. 64 // Remove the thread as a waiter from its owner.
@@ -75,8 +75,8 @@ private:
75 75
76public: 76public:
77 explicit ThreadQueueImplForKConditionVariableWaitConditionVariable( 77 explicit ThreadQueueImplForKConditionVariableWaitConditionVariable(
78 KernelCore& kernel_, KConditionVariable::ThreadTree* t) 78 KernelCore& kernel, KConditionVariable::ThreadTree* t)
79 : KThreadQueue(kernel_), m_tree(t) {} 79 : KThreadQueue(kernel), m_tree(t) {}
80 80
81 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override { 81 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
82 // Remove the thread as a waiter from its owner. 82 // Remove the thread as a waiter from its owner.
diff --git a/src/core/hle/kernel/k_debug.h b/src/core/hle/kernel/k_debug.h
index 9ffcf27d1..2290e3bca 100644
--- a/src/core/hle/kernel/k_debug.h
+++ b/src/core/hle/kernel/k_debug.h
@@ -12,7 +12,7 @@ class KDebug final : public KAutoObjectWithSlabHeapAndContainer<KDebug, KAutoObj
12 KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject); 12 KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject);
13 13
14public: 14public:
15 explicit KDebug(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} 15 explicit KDebug(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
16 16
17 static void PostDestroy(uintptr_t arg) {} 17 static void PostDestroy(uintptr_t arg) {}
18}; 18};
diff --git a/src/core/hle/kernel/k_device_address_space.cpp b/src/core/hle/kernel/k_device_address_space.cpp
index 27659ea3b..a2fc4fe1f 100644
--- a/src/core/hle/kernel/k_device_address_space.cpp
+++ b/src/core/hle/kernel/k_device_address_space.cpp
@@ -9,8 +9,8 @@
9 9
10namespace Kernel { 10namespace Kernel {
11 11
12KDeviceAddressSpace::KDeviceAddressSpace(KernelCore& kernel_) 12KDeviceAddressSpace::KDeviceAddressSpace(KernelCore& kernel)
13 : KAutoObjectWithSlabHeapAndContainer(kernel_), m_lock(kernel_), m_is_initialized(false) {} 13 : KAutoObjectWithSlabHeapAndContainer(kernel), m_lock(kernel), m_is_initialized(false) {}
14KDeviceAddressSpace::~KDeviceAddressSpace() = default; 14KDeviceAddressSpace::~KDeviceAddressSpace() = default;
15 15
16void KDeviceAddressSpace::Initialize() { 16void KDeviceAddressSpace::Initialize() {
diff --git a/src/core/hle/kernel/k_event.cpp b/src/core/hle/kernel/k_event.cpp
index d973853ab..d92b491f8 100644
--- a/src/core/hle/kernel/k_event.cpp
+++ b/src/core/hle/kernel/k_event.cpp
@@ -7,8 +7,8 @@
7 7
8namespace Kernel { 8namespace Kernel {
9 9
10KEvent::KEvent(KernelCore& kernel_) 10KEvent::KEvent(KernelCore& kernel)
11 : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_readable_event{kernel_} {} 11 : KAutoObjectWithSlabHeapAndContainer{kernel}, m_readable_event{kernel} {}
12 12
13KEvent::~KEvent() = default; 13KEvent::~KEvent() = default;
14 14
@@ -36,7 +36,7 @@ void KEvent::Finalize() {
36} 36}
37 37
38Result KEvent::Signal() { 38Result KEvent::Signal() {
39 KScopedSchedulerLock sl{kernel}; 39 KScopedSchedulerLock sl{m_kernel};
40 40
41 R_SUCCEED_IF(m_readable_event_destroyed); 41 R_SUCCEED_IF(m_readable_event_destroyed);
42 42
@@ -44,7 +44,7 @@ Result KEvent::Signal() {
44} 44}
45 45
46Result KEvent::Clear() { 46Result KEvent::Clear() {
47 KScopedSchedulerLock sl{kernel}; 47 KScopedSchedulerLock sl{m_kernel};
48 48
49 R_SUCCEED_IF(m_readable_event_destroyed); 49 R_SUCCEED_IF(m_readable_event_destroyed);
50 50
diff --git a/src/core/hle/kernel/k_event.h b/src/core/hle/kernel/k_event.h
index 48ce7d9a0..f522b0a84 100644
--- a/src/core/hle/kernel/k_event.h
+++ b/src/core/hle/kernel/k_event.h
@@ -16,7 +16,7 @@ class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObj
16 KERNEL_AUTOOBJECT_TRAITS(KEvent, KAutoObject); 16 KERNEL_AUTOOBJECT_TRAITS(KEvent, KAutoObject);
17 17
18public: 18public:
19 explicit KEvent(KernelCore& kernel_); 19 explicit KEvent(KernelCore& kernel);
20 ~KEvent() override; 20 ~KEvent() override;
21 21
22 void Initialize(KProcess* owner); 22 void Initialize(KProcess* owner);
diff --git a/src/core/hle/kernel/k_port.cpp b/src/core/hle/kernel/k_port.cpp
index f73bc34d4..1621ca1d3 100644
--- a/src/core/hle/kernel/k_port.cpp
+++ b/src/core/hle/kernel/k_port.cpp
@@ -7,8 +7,8 @@
7 7
8namespace Kernel { 8namespace Kernel {
9 9
10KPort::KPort(KernelCore& kernel_) 10KPort::KPort(KernelCore& kernel)
11 : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_server{kernel_}, m_client{kernel_} {} 11 : KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {}
12 12
13KPort::~KPort() = default; 13KPort::~KPort() = default;
14 14
@@ -29,7 +29,7 @@ void KPort::Initialize(s32 max_sessions, bool is_light, uintptr_t name) {
29} 29}
30 30
31void KPort::OnClientClosed() { 31void KPort::OnClientClosed() {
32 KScopedSchedulerLock sl{kernel}; 32 KScopedSchedulerLock sl{m_kernel};
33 33
34 if (m_state == State::Normal) { 34 if (m_state == State::Normal) {
35 m_state = State::ClientClosed; 35 m_state = State::ClientClosed;
@@ -37,7 +37,7 @@ void KPort::OnClientClosed() {
37} 37}
38 38
39void KPort::OnServerClosed() { 39void KPort::OnServerClosed() {
40 KScopedSchedulerLock sl{kernel}; 40 KScopedSchedulerLock sl{m_kernel};
41 41
42 if (m_state == State::Normal) { 42 if (m_state == State::Normal) {
43 m_state = State::ServerClosed; 43 m_state = State::ServerClosed;
@@ -45,12 +45,12 @@ void KPort::OnServerClosed() {
45} 45}
46 46
47bool KPort::IsServerClosed() const { 47bool KPort::IsServerClosed() const {
48 KScopedSchedulerLock sl{kernel}; 48 KScopedSchedulerLock sl{m_kernel};
49 return m_state == State::ServerClosed; 49 return m_state == State::ServerClosed;
50} 50}
51 51
52Result KPort::EnqueueSession(KServerSession* session) { 52Result KPort::EnqueueSession(KServerSession* session) {
53 KScopedSchedulerLock sl{kernel}; 53 KScopedSchedulerLock sl{m_kernel};
54 54
55 R_UNLESS(m_state == State::Normal, ResultPortClosed); 55 R_UNLESS(m_state == State::Normal, ResultPortClosed);
56 56
diff --git a/src/core/hle/kernel/k_port.h b/src/core/hle/kernel/k_port.h
index f2a08547c..991be27ab 100644
--- a/src/core/hle/kernel/k_port.h
+++ b/src/core/hle/kernel/k_port.h
@@ -19,7 +19,7 @@ class KPort final : public KAutoObjectWithSlabHeapAndContainer<KPort, KAutoObjec
19 KERNEL_AUTOOBJECT_TRAITS(KPort, KAutoObject); 19 KERNEL_AUTOOBJECT_TRAITS(KPort, KAutoObject);
20 20
21public: 21public:
22 explicit KPort(KernelCore& kernel_); 22 explicit KPort(KernelCore& kernel);
23 ~KPort() override; 23 ~KPort() override;
24 24
25 static void PostDestroy(uintptr_t arg) {} 25 static void PostDestroy(uintptr_t arg) {}
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 74a04aa66..4954a40db 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -126,7 +126,7 @@ u64 KProcess::GetTotalPhysicalMemoryAvailable() {
126 const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) + 126 const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) +
127 page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size + 127 page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size +
128 main_thread_stack_size}; 128 main_thread_stack_size};
129 if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); 129 if (const auto pool_size = m_kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
130 capacity != pool_size) { 130 capacity != pool_size) {
131 LOG_WARNING(Kernel, "capacity {} != application pool size {}", capacity, pool_size); 131 LOG_WARNING(Kernel, "capacity {} != application pool size {}", capacity, pool_size);
132 } 132 }
@@ -150,7 +150,7 @@ u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
150} 150}
151 151
152bool KProcess::ReleaseUserException(KThread* thread) { 152bool KProcess::ReleaseUserException(KThread* thread) {
153 KScopedSchedulerLock sl{kernel}; 153 KScopedSchedulerLock sl{m_kernel};
154 154
155 if (exception_thread == thread) { 155 if (exception_thread == thread) {
156 exception_thread = nullptr; 156 exception_thread = nullptr;
@@ -164,7 +164,7 @@ bool KProcess::ReleaseUserException(KThread* thread) {
164 next->EndWait(ResultSuccess); 164 next->EndWait(ResultSuccess);
165 } 165 }
166 166
167 KScheduler::SetSchedulerUpdateNeeded(kernel); 167 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
168 168
169 return true; 169 return true;
170 } else { 170 } else {
@@ -173,11 +173,11 @@ bool KProcess::ReleaseUserException(KThread* thread) {
173} 173}
174 174
175void KProcess::PinCurrentThread(s32 core_id) { 175void KProcess::PinCurrentThread(s32 core_id) {
176 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 176 ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
177 177
178 // Get the current thread. 178 // Get the current thread.
179 KThread* cur_thread = 179 KThread* cur_thread =
180 kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread(); 180 m_kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
181 181
182 // If the thread isn't terminated, pin it. 182 // If the thread isn't terminated, pin it.
183 if (!cur_thread->IsTerminationRequested()) { 183 if (!cur_thread->IsTerminationRequested()) {
@@ -186,27 +186,27 @@ void KProcess::PinCurrentThread(s32 core_id) {
186 cur_thread->Pin(core_id); 186 cur_thread->Pin(core_id);
187 187
188 // An update is needed. 188 // An update is needed.
189 KScheduler::SetSchedulerUpdateNeeded(kernel); 189 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
190 } 190 }
191} 191}
192 192
193void KProcess::UnpinCurrentThread(s32 core_id) { 193void KProcess::UnpinCurrentThread(s32 core_id) {
194 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 194 ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
195 195
196 // Get the current thread. 196 // Get the current thread.
197 KThread* cur_thread = 197 KThread* cur_thread =
198 kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread(); 198 m_kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
199 199
200 // Unpin it. 200 // Unpin it.
201 cur_thread->Unpin(); 201 cur_thread->Unpin();
202 UnpinThread(core_id, cur_thread); 202 UnpinThread(core_id, cur_thread);
203 203
204 // An update is needed. 204 // An update is needed.
205 KScheduler::SetSchedulerUpdateNeeded(kernel); 205 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
206} 206}
207 207
208void KProcess::UnpinThread(KThread* thread) { 208void KProcess::UnpinThread(KThread* thread) {
209 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 209 ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
210 210
211 // Get the thread's core id. 211 // Get the thread's core id.
212 const auto core_id = thread->GetActiveCore(); 212 const auto core_id = thread->GetActiveCore();
@@ -216,7 +216,7 @@ void KProcess::UnpinThread(KThread* thread) {
216 thread->Unpin(); 216 thread->Unpin();
217 217
218 // An update is needed. 218 // An update is needed.
219 KScheduler::SetSchedulerUpdateNeeded(kernel); 219 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
220} 220}
221 221
222Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, 222Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
@@ -234,7 +234,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
234 } 234 }
235 235
236 if (shemen_info == nullptr) { 236 if (shemen_info == nullptr) {
237 shemen_info = KSharedMemoryInfo::Allocate(kernel); 237 shemen_info = KSharedMemoryInfo::Allocate(m_kernel);
238 R_UNLESS(shemen_info != nullptr, ResultOutOfMemory); 238 R_UNLESS(shemen_info != nullptr, ResultOutOfMemory);
239 239
240 shemen_info->Initialize(shmem); 240 shemen_info->Initialize(shmem);
@@ -265,7 +265,7 @@ void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr a
265 265
266 if (shemen_info->Close()) { 266 if (shemen_info->Close()) {
267 shared_memory_list.erase(iter); 267 shared_memory_list.erase(iter);
268 KSharedMemoryInfo::Free(kernel, shemen_info); 268 KSharedMemoryInfo::Free(m_kernel, shemen_info);
269 } 269 }
270 270
271 // Close a reference to the shared memory. 271 // Close a reference to the shared memory.
@@ -298,7 +298,7 @@ u64 KProcess::GetFreeThreadCount() const {
298Result KProcess::Reset() { 298Result KProcess::Reset() {
299 // Lock the process and the scheduler. 299 // Lock the process and the scheduler.
300 KScopedLightLock lk(state_lock); 300 KScopedLightLock lk(state_lock);
301 KScopedSchedulerLock sl{kernel}; 301 KScopedSchedulerLock sl{m_kernel};
302 302
303 // Validate that we're in a state that we can reset. 303 // Validate that we're in a state that we can reset.
304 R_UNLESS(state != State::Terminated, ResultInvalidState); 304 R_UNLESS(state != State::Terminated, ResultInvalidState);
@@ -313,7 +313,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
313 // Lock ourselves and the scheduler. 313 // Lock ourselves and the scheduler.
314 KScopedLightLock lk{state_lock}; 314 KScopedLightLock lk{state_lock};
315 KScopedLightLock list_lk{list_lock}; 315 KScopedLightLock list_lk{list_lock};
316 KScopedSchedulerLock sl{kernel}; 316 KScopedSchedulerLock sl{m_kernel};
317 317
318 // Validate our state. 318 // Validate our state.
319 R_UNLESS(state != State::Terminating, ResultInvalidState); 319 R_UNLESS(state != State::Terminating, ResultInvalidState);
@@ -366,7 +366,7 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
366 // Initialize process address space 366 // Initialize process address space
367 if (const Result result{page_table.InitializeForProcess( 367 if (const Result result{page_table.InitializeForProcess(
368 metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application, 368 metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application,
369 0x8000000, code_size, &kernel.GetAppSystemResource(), resource_limit)}; 369 0x8000000, code_size, &m_kernel.GetAppSystemResource(), resource_limit)};
370 result.IsError()) { 370 result.IsError()) {
371 R_RETURN(result); 371 R_RETURN(result);
372 } 372 }
@@ -421,7 +421,7 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
421 421
422 ChangeState(State::Running); 422 ChangeState(State::Running);
423 423
424 SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top); 424 SetupMainThread(m_kernel.System(), *this, main_thread_priority, main_thread_stack_top);
425} 425}
426 426
427void KProcess::PrepareForTermination() { 427void KProcess::PrepareForTermination() {
@@ -432,7 +432,7 @@ void KProcess::PrepareForTermination() {
432 if (thread->GetOwnerProcess() != this) 432 if (thread->GetOwnerProcess() != this)
433 continue; 433 continue;
434 434
435 if (thread == GetCurrentThreadPointer(kernel)) 435 if (thread == GetCurrentThreadPointer(m_kernel))
436 continue; 436 continue;
437 437
438 // TODO(Subv): When are the other running/ready threads terminated? 438 // TODO(Subv): When are the other running/ready threads terminated?
@@ -443,7 +443,7 @@ void KProcess::PrepareForTermination() {
443 } 443 }
444 }; 444 };
445 445
446 stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList()); 446 stop_threads(m_kernel.System().GlobalSchedulerContext().GetThreadList());
447 447
448 this->DeleteThreadLocalRegion(plr_address); 448 this->DeleteThreadLocalRegion(plr_address);
449 plr_address = 0; 449 plr_address = 0;
@@ -471,7 +471,7 @@ void KProcess::Finalize() {
471 shmem->Close(); 471 shmem->Close();
472 472
473 it = shared_memory_list.erase(it); 473 it = shared_memory_list.erase(it);
474 KSharedMemoryInfo::Free(kernel, info); 474 KSharedMemoryInfo::Free(m_kernel, info);
475 } 475 }
476 } 476 }
477 477
@@ -494,7 +494,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
494 494
495 // See if we can get a region from a partially used TLP. 495 // See if we can get a region from a partially used TLP.
496 { 496 {
497 KScopedSchedulerLock sl{kernel}; 497 KScopedSchedulerLock sl{m_kernel};
498 498
499 if (auto it = partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) { 499 if (auto it = partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) {
500 tlr = it->Reserve(); 500 tlr = it->Reserve();
@@ -512,12 +512,12 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
512 } 512 }
513 513
514 // Allocate a new page. 514 // Allocate a new page.
515 tlp = KThreadLocalPage::Allocate(kernel); 515 tlp = KThreadLocalPage::Allocate(m_kernel);
516 R_UNLESS(tlp != nullptr, ResultOutOfMemory); 516 R_UNLESS(tlp != nullptr, ResultOutOfMemory);
517 auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(kernel, tlp); }); 517 auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(m_kernel, tlp); });
518 518
519 // Initialize the new page. 519 // Initialize the new page.
520 R_TRY(tlp->Initialize(kernel, this)); 520 R_TRY(tlp->Initialize(m_kernel, this));
521 521
522 // Reserve a TLR. 522 // Reserve a TLR.
523 tlr = tlp->Reserve(); 523 tlr = tlp->Reserve();
@@ -525,7 +525,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
525 525
526 // Insert into our tree. 526 // Insert into our tree.
527 { 527 {
528 KScopedSchedulerLock sl{kernel}; 528 KScopedSchedulerLock sl{m_kernel};
529 if (tlp->IsAllUsed()) { 529 if (tlp->IsAllUsed()) {
530 fully_used_tlp_tree.insert(*tlp); 530 fully_used_tlp_tree.insert(*tlp);
531 } else { 531 } else {
@@ -544,7 +544,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
544 544
545 // Release the region. 545 // Release the region.
546 { 546 {
547 KScopedSchedulerLock sl{kernel}; 547 KScopedSchedulerLock sl{m_kernel};
548 548
549 // Try to find the page in the partially used list. 549 // Try to find the page in the partially used list.
550 auto it = partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize)); 550 auto it = partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
@@ -581,7 +581,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
581 if (page_to_free != nullptr) { 581 if (page_to_free != nullptr) {
582 page_to_free->Finalize(); 582 page_to_free->Finalize();
583 583
584 KThreadLocalPage::Free(kernel, page_to_free); 584 KThreadLocalPage::Free(m_kernel, page_to_free);
585 } 585 }
586 586
587 R_SUCCEED(); 587 R_SUCCEED();
@@ -639,8 +639,8 @@ void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
639 page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); 639 page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
640 }; 640 };
641 641
642 kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(), 642 m_kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
643 code_set.memory.size()); 643 code_set.memory.size());
644 644
645 ReprotectSegment(code_set.CodeSegment(), Svc::MemoryPermission::ReadExecute); 645 ReprotectSegment(code_set.CodeSegment(), Svc::MemoryPermission::ReadExecute);
646 ReprotectSegment(code_set.RODataSegment(), Svc::MemoryPermission::Read); 646 ReprotectSegment(code_set.RODataSegment(), Svc::MemoryPermission::Read);
@@ -648,14 +648,14 @@ void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
648} 648}
649 649
650bool KProcess::IsSignaled() const { 650bool KProcess::IsSignaled() const {
651 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 651 ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
652 return is_signaled; 652 return is_signaled;
653} 653}
654 654
655KProcess::KProcess(KernelCore& kernel_) 655KProcess::KProcess(KernelCore& kernel)
656 : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()}, 656 : KAutoObjectWithSlabHeapAndContainer{kernel}, page_table{m_kernel.System()},
657 handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()}, 657 handle_table{m_kernel}, address_arbiter{m_kernel.System()}, condition_var{m_kernel.System()},
658 state_lock{kernel_}, list_lock{kernel_} {} 658 state_lock{m_kernel}, list_lock{m_kernel} {}
659 659
660KProcess::~KProcess() = default; 660KProcess::~KProcess() = default;
661 661
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 8d65be17a..a19d9b09d 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -68,7 +68,7 @@ class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWor
68 KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject); 68 KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
69 69
70public: 70public:
71 explicit KProcess(KernelCore& kernel_); 71 explicit KProcess(KernelCore& kernel);
72 ~KProcess() override; 72 ~KProcess() override;
73 73
74 enum class State { 74 enum class State {
diff --git a/src/core/hle/kernel/k_readable_event.cpp b/src/core/hle/kernel/k_readable_event.cpp
index eeac678e4..c30662666 100644
--- a/src/core/hle/kernel/k_readable_event.cpp
+++ b/src/core/hle/kernel/k_readable_event.cpp
@@ -11,7 +11,7 @@
11 11
12namespace Kernel { 12namespace Kernel {
13 13
14KReadableEvent::KReadableEvent(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} 14KReadableEvent::KReadableEvent(KernelCore& kernel) : KSynchronizationObject{kernel} {}
15 15
16KReadableEvent::~KReadableEvent() = default; 16KReadableEvent::~KReadableEvent() = default;
17 17
@@ -25,7 +25,7 @@ void KReadableEvent::Initialize(KEvent* parent) {
25} 25}
26 26
27bool KReadableEvent::IsSignaled() const { 27bool KReadableEvent::IsSignaled() const {
28 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 28 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
29 29
30 return m_is_signaled; 30 return m_is_signaled;
31} 31}
@@ -33,7 +33,7 @@ bool KReadableEvent::IsSignaled() const {
33void KReadableEvent::Destroy() { 33void KReadableEvent::Destroy() {
34 if (m_parent) { 34 if (m_parent) {
35 { 35 {
36 KScopedSchedulerLock sl{kernel}; 36 KScopedSchedulerLock sl{m_kernel};
37 m_parent->OnReadableEventDestroyed(); 37 m_parent->OnReadableEventDestroyed();
38 } 38 }
39 m_parent->Close(); 39 m_parent->Close();
@@ -41,7 +41,7 @@ void KReadableEvent::Destroy() {
41} 41}
42 42
43Result KReadableEvent::Signal() { 43Result KReadableEvent::Signal() {
44 KScopedSchedulerLock lk{kernel}; 44 KScopedSchedulerLock lk{m_kernel};
45 45
46 if (!m_is_signaled) { 46 if (!m_is_signaled) {
47 m_is_signaled = true; 47 m_is_signaled = true;
@@ -58,7 +58,7 @@ Result KReadableEvent::Clear() {
58} 58}
59 59
60Result KReadableEvent::Reset() { 60Result KReadableEvent::Reset() {
61 KScopedSchedulerLock lk{kernel}; 61 KScopedSchedulerLock lk{m_kernel};
62 62
63 R_UNLESS(m_is_signaled, ResultInvalidState); 63 R_UNLESS(m_is_signaled, ResultInvalidState);
64 64
diff --git a/src/core/hle/kernel/k_readable_event.h b/src/core/hle/kernel/k_readable_event.h
index 743f96bf5..d2ec36323 100644
--- a/src/core/hle/kernel/k_readable_event.h
+++ b/src/core/hle/kernel/k_readable_event.h
@@ -17,7 +17,7 @@ class KReadableEvent : public KSynchronizationObject {
17 KERNEL_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject); 17 KERNEL_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject);
18 18
19public: 19public:
20 explicit KReadableEvent(KernelCore& kernel_); 20 explicit KReadableEvent(KernelCore& kernel);
21 ~KReadableEvent() override; 21 ~KReadableEvent() override;
22 22
23 void Initialize(KEvent* parent); 23 void Initialize(KEvent* parent);
diff --git a/src/core/hle/kernel/k_resource_limit.cpp b/src/core/hle/kernel/k_resource_limit.cpp
index 2847da291..e224e1622 100644
--- a/src/core/hle/kernel/k_resource_limit.cpp
+++ b/src/core/hle/kernel/k_resource_limit.cpp
@@ -11,8 +11,8 @@
11namespace Kernel { 11namespace Kernel {
12constexpr s64 DefaultTimeout = 10000000000; // 10 seconds 12constexpr s64 DefaultTimeout = 10000000000; // 10 seconds
13 13
14KResourceLimit::KResourceLimit(KernelCore& kernel_) 14KResourceLimit::KResourceLimit(KernelCore& kernel)
15 : KAutoObjectWithSlabHeapAndContainer{kernel_}, lock{kernel_}, cond_var{kernel_} {} 15 : KAutoObjectWithSlabHeapAndContainer{kernel}, lock{kernel}, cond_var{kernel} {}
16KResourceLimit::~KResourceLimit() = default; 16KResourceLimit::~KResourceLimit() = default;
17 17
18void KResourceLimit::Initialize(const Core::Timing::CoreTiming* core_timing_) { 18void KResourceLimit::Initialize(const Core::Timing::CoreTiming* core_timing_) {
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index d6c214237..b631ec406 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -27,7 +27,7 @@ static void IncrementScheduledCount(Kernel::KThread* thread) {
27 } 27 }
28} 28}
29 29
30KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} { 30KScheduler::KScheduler(KernelCore& kernel) : m_kernel{kernel} {
31 m_switch_fiber = std::make_shared<Common::Fiber>([this] { 31 m_switch_fiber = std::make_shared<Common::Fiber>([this] {
32 while (true) { 32 while (true) {
33 ScheduleImplFiber(); 33 ScheduleImplFiber();
@@ -47,7 +47,7 @@ void KScheduler::SetInterruptTaskRunnable() {
47void KScheduler::RequestScheduleOnInterrupt() { 47void KScheduler::RequestScheduleOnInterrupt() {
48 m_state.needs_scheduling = true; 48 m_state.needs_scheduling = true;
49 49
50 if (CanSchedule(kernel)) { 50 if (CanSchedule(m_kernel)) {
51 ScheduleOnInterrupt(); 51 ScheduleOnInterrupt();
52 } 52 }
53} 53}
@@ -97,50 +97,50 @@ u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
97} 97}
98 98
99void KScheduler::Schedule() { 99void KScheduler::Schedule() {
100 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); 100 ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1);
101 ASSERT(m_core_id == GetCurrentCoreId(kernel)); 101 ASSERT(m_core_id == GetCurrentCoreId(m_kernel));
102 102
103 ScheduleImpl(); 103 ScheduleImpl();
104} 104}
105 105
106void KScheduler::ScheduleOnInterrupt() { 106void KScheduler::ScheduleOnInterrupt() {
107 GetCurrentThread(kernel).DisableDispatch(); 107 GetCurrentThread(m_kernel).DisableDispatch();
108 Schedule(); 108 Schedule();
109 GetCurrentThread(kernel).EnableDispatch(); 109 GetCurrentThread(m_kernel).EnableDispatch();
110} 110}
111 111
112void KScheduler::PreemptSingleCore() { 112void KScheduler::PreemptSingleCore() {
113 GetCurrentThread(kernel).DisableDispatch(); 113 GetCurrentThread(m_kernel).DisableDispatch();
114 114
115 auto* thread = GetCurrentThreadPointer(kernel); 115 auto* thread = GetCurrentThreadPointer(m_kernel);
116 auto& previous_scheduler = kernel.Scheduler(thread->GetCurrentCore()); 116 auto& previous_scheduler = m_kernel.Scheduler(thread->GetCurrentCore());
117 previous_scheduler.Unload(thread); 117 previous_scheduler.Unload(thread);
118 118
119 Common::Fiber::YieldTo(thread->GetHostContext(), *m_switch_fiber); 119 Common::Fiber::YieldTo(thread->GetHostContext(), *m_switch_fiber);
120 120
121 GetCurrentThread(kernel).EnableDispatch(); 121 GetCurrentThread(m_kernel).EnableDispatch();
122} 122}
123 123
124void KScheduler::RescheduleCurrentCore() { 124void KScheduler::RescheduleCurrentCore() {
125 ASSERT(!kernel.IsPhantomModeForSingleCore()); 125 ASSERT(!m_kernel.IsPhantomModeForSingleCore());
126 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); 126 ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1);
127 127
128 GetCurrentThread(kernel).EnableDispatch(); 128 GetCurrentThread(m_kernel).EnableDispatch();
129 129
130 if (m_state.needs_scheduling.load()) { 130 if (m_state.needs_scheduling.load()) {
131 // Disable interrupts, and then check again if rescheduling is needed. 131 // Disable interrupts, and then check again if rescheduling is needed.
132 // KScopedInterruptDisable intr_disable; 132 // KScopedInterruptDisable intr_disable;
133 133
134 kernel.CurrentScheduler()->RescheduleCurrentCoreImpl(); 134 m_kernel.CurrentScheduler()->RescheduleCurrentCoreImpl();
135 } 135 }
136} 136}
137 137
138void KScheduler::RescheduleCurrentCoreImpl() { 138void KScheduler::RescheduleCurrentCoreImpl() {
139 // Check that scheduling is needed. 139 // Check that scheduling is needed.
140 if (m_state.needs_scheduling.load()) [[likely]] { 140 if (m_state.needs_scheduling.load()) [[likely]] {
141 GetCurrentThread(kernel).DisableDispatch(); 141 GetCurrentThread(m_kernel).DisableDispatch();
142 Schedule(); 142 Schedule();
143 GetCurrentThread(kernel).EnableDispatch(); 143 GetCurrentThread(m_kernel).EnableDispatch();
144 } 144 }
145} 145}
146 146
@@ -153,14 +153,14 @@ void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core
153 153
154 // Insert the main thread into the priority queue. 154 // Insert the main thread into the priority queue.
155 // { 155 // {
156 // KScopedSchedulerLock lk{kernel}; 156 // KScopedSchedulerLock lk{m_kernel};
157 // GetPriorityQueue(kernel).PushBack(GetCurrentThreadPointer(kernel)); 157 // GetPriorityQueue(m_kernel).PushBack(GetCurrentThreadPointer(m_kernel));
158 // SetSchedulerUpdateNeeded(kernel); 158 // SetSchedulerUpdateNeeded(m_kernel);
159 // } 159 // }
160 160
161 // Bind interrupt handler. 161 // Bind interrupt handler.
162 // kernel.GetInterruptManager().BindHandler( 162 // kernel.GetInterruptManager().BindHandler(
163 // GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id, 163 // GetSchedulerInterruptHandler(m_kernel), KInterruptName::Scheduler, m_core_id,
164 // KInterruptController::PriorityLevel::Scheduler, false, false); 164 // KInterruptController::PriorityLevel::Scheduler, false, false);
165 165
166 // Set the current thread. 166 // Set the current thread.
@@ -168,7 +168,7 @@ void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core
168} 168}
169 169
170void KScheduler::Activate() { 170void KScheduler::Activate() {
171 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); 171 ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1);
172 172
173 // m_state.should_count_idle = KTargetSystem::IsDebugMode(); 173 // m_state.should_count_idle = KTargetSystem::IsDebugMode();
174 m_is_active = true; 174 m_is_active = true;
@@ -176,7 +176,7 @@ void KScheduler::Activate() {
176} 176}
177 177
178void KScheduler::OnThreadStart() { 178void KScheduler::OnThreadStart() {
179 GetCurrentThread(kernel).EnableDispatch(); 179 GetCurrentThread(m_kernel).EnableDispatch();
180} 180}
181 181
182u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { 182u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
@@ -184,7 +184,7 @@ u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
184 prev_highest_thread != highest_thread) [[likely]] { 184 prev_highest_thread != highest_thread) [[likely]] {
185 if (prev_highest_thread != nullptr) [[likely]] { 185 if (prev_highest_thread != nullptr) [[likely]] {
186 IncrementScheduledCount(prev_highest_thread); 186 IncrementScheduledCount(prev_highest_thread);
187 prev_highest_thread->SetLastScheduledTick(kernel.System().CoreTiming().GetCPUTicks()); 187 prev_highest_thread->SetLastScheduledTick(m_kernel.System().CoreTiming().GetCPUTicks());
188 } 188 }
189 if (m_state.should_count_idle) { 189 if (m_state.should_count_idle) {
190 if (highest_thread != nullptr) [[likely]] { 190 if (highest_thread != nullptr) [[likely]] {
@@ -328,8 +328,8 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
328} 328}
329 329
330void KScheduler::SwitchThread(KThread* next_thread) { 330void KScheduler::SwitchThread(KThread* next_thread) {
331 KProcess* const cur_process = GetCurrentProcessPointer(kernel); 331 KProcess* const cur_process = GetCurrentProcessPointer(m_kernel);
332 KThread* const cur_thread = GetCurrentThreadPointer(kernel); 332 KThread* const cur_thread = GetCurrentThreadPointer(m_kernel);
333 333
334 // We never want to schedule a null thread, so use the idle thread if we don't have a next. 334 // We never want to schedule a null thread, so use the idle thread if we don't have a next.
335 if (next_thread == nullptr) { 335 if (next_thread == nullptr) {
@@ -351,7 +351,7 @@ void KScheduler::SwitchThread(KThread* next_thread) {
351 351
352 // Update the CPU time tracking variables. 352 // Update the CPU time tracking variables.
353 const s64 prev_tick = m_last_context_switch_time; 353 const s64 prev_tick = m_last_context_switch_time;
354 const s64 cur_tick = kernel.System().CoreTiming().GetCPUTicks(); 354 const s64 cur_tick = m_kernel.System().CoreTiming().GetCPUTicks();
355 const s64 tick_diff = cur_tick - prev_tick; 355 const s64 tick_diff = cur_tick - prev_tick;
356 cur_thread->AddCpuTime(m_core_id, tick_diff); 356 cur_thread->AddCpuTime(m_core_id, tick_diff);
357 if (cur_process != nullptr) { 357 if (cur_process != nullptr) {
@@ -375,7 +375,7 @@ void KScheduler::SwitchThread(KThread* next_thread) {
375 // } 375 // }
376 376
377 // Set the new thread. 377 // Set the new thread.
378 SetCurrentThread(kernel, next_thread); 378 SetCurrentThread(m_kernel, next_thread);
379 m_current_thread = next_thread; 379 m_current_thread = next_thread;
380 380
381 // Set the new Thread Local region. 381 // Set the new Thread Local region.
@@ -388,7 +388,7 @@ void KScheduler::ScheduleImpl() {
388 std::atomic_thread_fence(std::memory_order_seq_cst); 388 std::atomic_thread_fence(std::memory_order_seq_cst);
389 389
390 // Load the appropriate thread pointers for scheduling. 390 // Load the appropriate thread pointers for scheduling.
391 KThread* const cur_thread{GetCurrentThreadPointer(kernel)}; 391 KThread* const cur_thread{GetCurrentThreadPointer(m_kernel)};
392 KThread* highest_priority_thread{m_state.highest_priority_thread}; 392 KThread* highest_priority_thread{m_state.highest_priority_thread};
393 393
394 // Check whether there are runnable interrupt tasks. 394 // Check whether there are runnable interrupt tasks.
@@ -493,7 +493,7 @@ void KScheduler::ScheduleImplFiber() {
493} 493}
494 494
495void KScheduler::Unload(KThread* thread) { 495void KScheduler::Unload(KThread* thread) {
496 auto& cpu_core = kernel.System().ArmInterface(m_core_id); 496 auto& cpu_core = m_kernel.System().ArmInterface(m_core_id);
497 cpu_core.SaveContext(thread->GetContext32()); 497 cpu_core.SaveContext(thread->GetContext32());
498 cpu_core.SaveContext(thread->GetContext64()); 498 cpu_core.SaveContext(thread->GetContext64());
499 // Save the TPIDR_EL0 system register in case it was modified. 499 // Save the TPIDR_EL0 system register in case it was modified.
@@ -508,7 +508,7 @@ void KScheduler::Unload(KThread* thread) {
508} 508}
509 509
510void KScheduler::Reload(KThread* thread) { 510void KScheduler::Reload(KThread* thread) {
511 auto& cpu_core = kernel.System().ArmInterface(m_core_id); 511 auto& cpu_core = m_kernel.System().ArmInterface(m_core_id);
512 cpu_core.LoadContext(thread->GetContext32()); 512 cpu_core.LoadContext(thread->GetContext32());
513 cpu_core.LoadContext(thread->GetContext64()); 513 cpu_core.LoadContext(thread->GetContext64());
514 cpu_core.SetTlsAddress(thread->GetTLSAddress()); 514 cpu_core.SetTlsAddress(thread->GetTLSAddress());
@@ -891,7 +891,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
891 891
892void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) { 892void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) {
893 if (const u64 core_mask = cores_needing_scheduling & ~(1ULL << m_core_id); core_mask != 0) { 893 if (const u64 core_mask = cores_needing_scheduling & ~(1ULL << m_core_id); core_mask != 0) {
894 RescheduleCores(kernel, core_mask); 894 RescheduleCores(m_kernel, core_mask);
895 } 895 }
896} 896}
897 897
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 3f13b8193..d85a0c040 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -149,7 +149,7 @@ private:
149 KInterruptTaskManager* interrupt_task_manager{nullptr}; 149 KInterruptTaskManager* interrupt_task_manager{nullptr};
150 }; 150 };
151 151
152 KernelCore& kernel; 152 KernelCore& m_kernel;
153 SchedulingState m_state; 153 SchedulingState m_state;
154 bool m_is_active{false}; 154 bool m_is_active{false};
155 s32 m_core_id{0}; 155 s32 m_core_id{0};
diff --git a/src/core/hle/kernel/k_server_port.cpp b/src/core/hle/kernel/k_server_port.cpp
index dc70ee848..a29d34bc1 100644
--- a/src/core/hle/kernel/k_server_port.cpp
+++ b/src/core/hle/kernel/k_server_port.cpp
@@ -12,7 +12,7 @@
12 12
13namespace Kernel { 13namespace Kernel {
14 14
15KServerPort::KServerPort(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} 15KServerPort::KServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
16KServerPort::~KServerPort() = default; 16KServerPort::~KServerPort() = default;
17 17
18void KServerPort::Initialize(KPort* parent) { 18void KServerPort::Initialize(KPort* parent) {
@@ -35,7 +35,7 @@ void KServerPort::CleanupSessions() {
35 // Get the last session in the list 35 // Get the last session in the list
36 KServerSession* session = nullptr; 36 KServerSession* session = nullptr;
37 { 37 {
38 KScopedSchedulerLock sl{kernel}; 38 KScopedSchedulerLock sl{m_kernel};
39 if (!m_session_list.empty()) { 39 if (!m_session_list.empty()) {
40 session = std::addressof(m_session_list.front()); 40 session = std::addressof(m_session_list.front());
41 m_session_list.pop_front(); 41 m_session_list.pop_front();
@@ -74,7 +74,7 @@ bool KServerPort::IsSignaled() const {
74void KServerPort::EnqueueSession(KServerSession* session) { 74void KServerPort::EnqueueSession(KServerSession* session) {
75 ASSERT(!this->IsLight()); 75 ASSERT(!this->IsLight());
76 76
77 KScopedSchedulerLock sl{kernel}; 77 KScopedSchedulerLock sl{m_kernel};
78 78
79 // Add the session to our queue. 79 // Add the session to our queue.
80 m_session_list.push_back(*session); 80 m_session_list.push_back(*session);
@@ -86,7 +86,7 @@ void KServerPort::EnqueueSession(KServerSession* session) {
86KServerSession* KServerPort::AcceptSession() { 86KServerSession* KServerPort::AcceptSession() {
87 ASSERT(!this->IsLight()); 87 ASSERT(!this->IsLight());
88 88
89 KScopedSchedulerLock sl{kernel}; 89 KScopedSchedulerLock sl{m_kernel};
90 90
91 // Return the first session in the list. 91 // Return the first session in the list.
92 if (m_session_list.empty()) { 92 if (m_session_list.empty()) {
diff --git a/src/core/hle/kernel/k_server_port.h b/src/core/hle/kernel/k_server_port.h
index 964767156..21c040e62 100644
--- a/src/core/hle/kernel/k_server_port.h
+++ b/src/core/hle/kernel/k_server_port.h
@@ -22,7 +22,7 @@ class KServerPort final : public KSynchronizationObject {
22 KERNEL_AUTOOBJECT_TRAITS(KServerPort, KSynchronizationObject); 22 KERNEL_AUTOOBJECT_TRAITS(KServerPort, KSynchronizationObject);
23 23
24public: 24public:
25 explicit KServerPort(KernelCore& kernel_); 25 explicit KServerPort(KernelCore& kernel);
26 ~KServerPort() override; 26 ~KServerPort() override;
27 27
28 void Initialize(KPort* parent); 28 void Initialize(KPort* parent);
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index c68ec09ce..e9b4ef528 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -28,8 +28,8 @@ namespace Kernel {
28 28
29using ThreadQueueImplForKServerSessionRequest = KThreadQueue; 29using ThreadQueueImplForKServerSessionRequest = KThreadQueue;
30 30
31KServerSession::KServerSession(KernelCore& kernel_) 31KServerSession::KServerSession(KernelCore& kernel)
32 : KSynchronizationObject{kernel_}, m_lock{kernel_} {} 32 : KSynchronizationObject{kernel}, m_lock{m_kernel} {}
33 33
34KServerSession::~KServerSession() = default; 34KServerSession::~KServerSession() = default;
35 35
@@ -56,7 +56,7 @@ void KServerSession::OnClientClosed() {
56 56
57 // Get the next request. 57 // Get the next request.
58 { 58 {
59 KScopedSchedulerLock sl{kernel}; 59 KScopedSchedulerLock sl{m_kernel};
60 60
61 if (m_current_request != nullptr && m_current_request != prev_request) { 61 if (m_current_request != nullptr && m_current_request != prev_request) {
62 // Set the request, open a reference as we process it. 62 // Set the request, open a reference as we process it.
@@ -135,7 +135,7 @@ void KServerSession::OnClientClosed() {
135} 135}
136 136
137bool KServerSession::IsSignaled() const { 137bool KServerSession::IsSignaled() const {
138 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 138 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
139 139
140 // If the client is closed, we're always signaled. 140 // If the client is closed, we're always signaled.
141 if (m_parent->IsClientClosed()) { 141 if (m_parent->IsClientClosed()) {
@@ -148,17 +148,17 @@ bool KServerSession::IsSignaled() const {
148 148
149Result KServerSession::OnRequest(KSessionRequest* request) { 149Result KServerSession::OnRequest(KSessionRequest* request) {
150 // Create the wait queue. 150 // Create the wait queue.
151 ThreadQueueImplForKServerSessionRequest wait_queue{kernel}; 151 ThreadQueueImplForKServerSessionRequest wait_queue{m_kernel};
152 152
153 { 153 {
154 // Lock the scheduler. 154 // Lock the scheduler.
155 KScopedSchedulerLock sl{kernel}; 155 KScopedSchedulerLock sl{m_kernel};
156 156
157 // Ensure that we can handle new requests. 157 // Ensure that we can handle new requests.
158 R_UNLESS(!m_parent->IsServerClosed(), ResultSessionClosed); 158 R_UNLESS(!m_parent->IsServerClosed(), ResultSessionClosed);
159 159
160 // Check that we're not terminating. 160 // Check that we're not terminating.
161 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested); 161 R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested);
162 162
163 // Get whether we're empty. 163 // Get whether we're empty.
164 const bool was_empty = m_request_list.empty(); 164 const bool was_empty = m_request_list.empty();
@@ -176,11 +176,11 @@ Result KServerSession::OnRequest(KSessionRequest* request) {
176 R_SUCCEED_IF(request->GetEvent() != nullptr); 176 R_SUCCEED_IF(request->GetEvent() != nullptr);
177 177
178 // This is a synchronous request, so we should wait for our request to complete. 178 // This is a synchronous request, so we should wait for our request to complete.
179 GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); 179 GetCurrentThread(m_kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
180 GetCurrentThread(kernel).BeginWait(&wait_queue); 180 GetCurrentThread(m_kernel).BeginWait(&wait_queue);
181 } 181 }
182 182
183 return GetCurrentThread(kernel).GetWaitResult(); 183 return GetCurrentThread(m_kernel).GetWaitResult();
184} 184}
185 185
186Result KServerSession::SendReply(bool is_hle) { 186Result KServerSession::SendReply(bool is_hle) {
@@ -190,7 +190,7 @@ Result KServerSession::SendReply(bool is_hle) {
190 // Get the request. 190 // Get the request.
191 KSessionRequest* request; 191 KSessionRequest* request;
192 { 192 {
193 KScopedSchedulerLock sl{kernel}; 193 KScopedSchedulerLock sl{m_kernel};
194 194
195 // Get the current request. 195 // Get the current request.
196 request = m_current_request; 196 request = m_current_request;
@@ -222,8 +222,8 @@ Result KServerSession::SendReply(bool is_hle) {
222 // HLE servers write directly to a pointer to the thread command buffer. Therefore 222 // HLE servers write directly to a pointer to the thread command buffer. Therefore
223 // the reply has already been written in this case. 223 // the reply has already been written in this case.
224 } else { 224 } else {
225 Core::Memory::Memory& memory{kernel.System().Memory()}; 225 Core::Memory::Memory& memory{m_kernel.System().Memory()};
226 KThread* server_thread{GetCurrentThreadPointer(kernel)}; 226 KThread* server_thread{GetCurrentThreadPointer(m_kernel)};
227 UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); 227 UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
228 228
229 auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); 229 auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress());
@@ -264,7 +264,7 @@ Result KServerSession::SendReply(bool is_hle) {
264 event->Signal(); 264 event->Signal();
265 } else { 265 } else {
266 // End the client thread's wait. 266 // End the client thread's wait.
267 KScopedSchedulerLock sl{kernel}; 267 KScopedSchedulerLock sl{m_kernel};
268 268
269 if (!client_thread->IsTerminationRequested()) { 269 if (!client_thread->IsTerminationRequested()) {
270 client_thread->EndWait(client_result); 270 client_thread->EndWait(client_result);
@@ -285,7 +285,7 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext
285 KThread* client_thread; 285 KThread* client_thread;
286 286
287 { 287 {
288 KScopedSchedulerLock sl{kernel}; 288 KScopedSchedulerLock sl{m_kernel};
289 289
290 // Ensure that we can service the request. 290 // Ensure that we can service the request.
291 R_UNLESS(!m_parent->IsClientClosed(), ResultSessionClosed); 291 R_UNLESS(!m_parent->IsClientClosed(), ResultSessionClosed);
@@ -319,18 +319,18 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext
319 // bool recv_list_broken = false; 319 // bool recv_list_broken = false;
320 320
321 // Receive the message. 321 // Receive the message.
322 Core::Memory::Memory& memory{kernel.System().Memory()}; 322 Core::Memory::Memory& memory{m_kernel.System().Memory()};
323 if (out_context != nullptr) { 323 if (out_context != nullptr) {
324 // HLE request. 324 // HLE request.
325 u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(client_message))}; 325 u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(client_message))};
326 *out_context = 326 *out_context =
327 std::make_shared<Service::HLERequestContext>(kernel, memory, this, client_thread); 327 std::make_shared<Service::HLERequestContext>(m_kernel, memory, this, client_thread);
328 (*out_context)->SetSessionRequestManager(manager); 328 (*out_context)->SetSessionRequestManager(manager);
329 (*out_context) 329 (*out_context)
330 ->PopulateFromIncomingCommandBuffer(client_thread->GetOwnerProcess()->GetHandleTable(), 330 ->PopulateFromIncomingCommandBuffer(client_thread->GetOwnerProcess()->GetHandleTable(),
331 cmd_buf); 331 cmd_buf);
332 } else { 332 } else {
333 KThread* server_thread{GetCurrentThreadPointer(kernel)}; 333 KThread* server_thread{GetCurrentThreadPointer(m_kernel)};
334 UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); 334 UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
335 335
336 auto* src_msg_buffer = memory.GetPointer(client_message); 336 auto* src_msg_buffer = memory.GetPointer(client_message);
@@ -350,7 +350,7 @@ void KServerSession::CleanupRequests() {
350 // Get the next request. 350 // Get the next request.
351 KSessionRequest* request = nullptr; 351 KSessionRequest* request = nullptr;
352 { 352 {
353 KScopedSchedulerLock sl{kernel}; 353 KScopedSchedulerLock sl{m_kernel};
354 354
355 if (m_current_request) { 355 if (m_current_request) {
356 // Choose the current request if we have one. 356 // Choose the current request if we have one.
@@ -401,7 +401,7 @@ void KServerSession::CleanupRequests() {
401 event->Signal(); 401 event->Signal();
402 } else { 402 } else {
403 // End the client thread's wait. 403 // End the client thread's wait.
404 KScopedSchedulerLock sl{kernel}; 404 KScopedSchedulerLock sl{m_kernel};
405 405
406 if (!client_thread->IsTerminationRequested()) { 406 if (!client_thread->IsTerminationRequested()) {
407 client_thread->EndWait(ResultSessionClosed); 407 client_thread->EndWait(ResultSessionClosed);
diff --git a/src/core/hle/kernel/k_server_session.h b/src/core/hle/kernel/k_server_session.h
index e340e4dd8..5ee02f556 100644
--- a/src/core/hle/kernel/k_server_session.h
+++ b/src/core/hle/kernel/k_server_session.h
@@ -33,7 +33,7 @@ class KServerSession final : public KSynchronizationObject,
33 friend class ServiceThread; 33 friend class ServiceThread;
34 34
35public: 35public:
36 explicit KServerSession(KernelCore& kernel_); 36 explicit KServerSession(KernelCore& kernel);
37 ~KServerSession() override; 37 ~KServerSession() override;
38 38
39 void Destroy() override; 39 void Destroy() override;
diff --git a/src/core/hle/kernel/k_session.cpp b/src/core/hle/kernel/k_session.cpp
index 771ad68bf..44d7a8f02 100644
--- a/src/core/hle/kernel/k_session.cpp
+++ b/src/core/hle/kernel/k_session.cpp
@@ -9,8 +9,8 @@
9 9
10namespace Kernel { 10namespace Kernel {
11 11
12KSession::KSession(KernelCore& kernel_) 12KSession::KSession(KernelCore& kernel)
13 : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_server{kernel_}, m_client{kernel_} {} 13 : KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {}
14KSession::~KSession() = default; 14KSession::~KSession() = default;
15 15
16void KSession::Initialize(KClientPort* client_port, uintptr_t name) { 16void KSession::Initialize(KClientPort* client_port, uintptr_t name) {
@@ -34,7 +34,7 @@ void KSession::Initialize(KClientPort* client_port, uintptr_t name) {
34 34
35 // Set our owner process. 35 // Set our owner process.
36 //! FIXME: this is the wrong process! 36 //! FIXME: this is the wrong process!
37 m_process = kernel.ApplicationProcess(); 37 m_process = m_kernel.ApplicationProcess();
38 m_process->Open(); 38 m_process->Open();
39 39
40 // Set our port. 40 // Set our port.
diff --git a/src/core/hle/kernel/k_session.h b/src/core/hle/kernel/k_session.h
index ab553a04c..f69bab088 100644
--- a/src/core/hle/kernel/k_session.h
+++ b/src/core/hle/kernel/k_session.h
@@ -18,7 +18,7 @@ class KSession final : public KAutoObjectWithSlabHeapAndContainer<KSession, KAut
18 KERNEL_AUTOOBJECT_TRAITS(KSession, KAutoObject); 18 KERNEL_AUTOOBJECT_TRAITS(KSession, KAutoObject);
19 19
20public: 20public:
21 explicit KSession(KernelCore& kernel_); 21 explicit KSession(KernelCore& kernel);
22 ~KSession() override; 22 ~KSession() override;
23 23
24 void Initialize(KClientPort* port, uintptr_t name); 24 void Initialize(KClientPort* port, uintptr_t name);
diff --git a/src/core/hle/kernel/k_session_request.h b/src/core/hle/kernel/k_session_request.h
index 5003e5c1d..5685048ba 100644
--- a/src/core/hle/kernel/k_session_request.h
+++ b/src/core/hle/kernel/k_session_request.h
@@ -158,7 +158,7 @@ public:
158 }; 158 };
159 159
160public: 160public:
161 explicit KSessionRequest(KernelCore& kernel_) : KAutoObject(kernel_), m_mappings(kernel_) {} 161 explicit KSessionRequest(KernelCore& kernel) : KAutoObject(kernel), m_mappings(kernel) {}
162 162
163 static KSessionRequest* Create(KernelCore& kernel) { 163 static KSessionRequest* Create(KernelCore& kernel) {
164 KSessionRequest* req = KSessionRequest::Allocate(kernel); 164 KSessionRequest* req = KSessionRequest::Allocate(kernel);
@@ -170,13 +170,13 @@ public:
170 170
171 void Destroy() override { 171 void Destroy() override {
172 this->Finalize(); 172 this->Finalize();
173 KSessionRequest::Free(kernel, this); 173 KSessionRequest::Free(m_kernel, this);
174 } 174 }
175 175
176 void Initialize(KEvent* event, uintptr_t address, size_t size) { 176 void Initialize(KEvent* event, uintptr_t address, size_t size) {
177 m_mappings.Initialize(); 177 m_mappings.Initialize();
178 178
179 m_thread = GetCurrentThreadPointer(kernel); 179 m_thread = GetCurrentThreadPointer(m_kernel);
180 m_event = event; 180 m_event = event;
181 m_address = address; 181 m_address = address;
182 m_size = size; 182 m_size = size;
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp
index bf134f7c8..b7b3b612b 100644
--- a/src/core/hle/kernel/k_shared_memory.cpp
+++ b/src/core/hle/kernel/k_shared_memory.cpp
@@ -12,7 +12,7 @@
12 12
13namespace Kernel { 13namespace Kernel {
14 14
15KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} 15KSharedMemory::KSharedMemory(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
16KSharedMemory::~KSharedMemory() = default; 16KSharedMemory::~KSharedMemory() = default;
17 17
18Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory, KProcess* owner_process, 18Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory, KProcess* owner_process,
@@ -28,7 +28,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory, KProcess* ow
28 const size_t num_pages = Common::DivideUp(size, PageSize); 28 const size_t num_pages = Common::DivideUp(size, PageSize);
29 29
30 // Get the resource limit. 30 // Get the resource limit.
31 KResourceLimit* reslimit = kernel.GetSystemResourceLimit(); 31 KResourceLimit* reslimit = m_kernel.GetSystemResourceLimit();
32 32
33 // Reserve memory for ourselves. 33 // Reserve memory for ourselves.
34 KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemoryMax, 34 KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemoryMax,
@@ -40,11 +40,11 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory, KProcess* ow
40 //! HACK: Open continuous mapping from sysmodule pool. 40 //! HACK: Open continuous mapping from sysmodule pool.
41 auto option = KMemoryManager::EncodeOption(KMemoryManager::Pool::Secure, 41 auto option = KMemoryManager::EncodeOption(KMemoryManager::Pool::Secure,
42 KMemoryManager::Direction::FromBack); 42 KMemoryManager::Direction::FromBack);
43 m_physical_address = kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, option); 43 m_physical_address = m_kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, option);
44 R_UNLESS(m_physical_address != 0, ResultOutOfMemory); 44 R_UNLESS(m_physical_address != 0, ResultOutOfMemory);
45 45
46 //! Insert the result into our page group. 46 //! Insert the result into our page group.
47 m_page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager()); 47 m_page_group.emplace(m_kernel, &m_kernel.GetSystemSystemResource().GetBlockInfoManager());
48 m_page_group->AddBlock(m_physical_address, num_pages); 48 m_page_group->AddBlock(m_physical_address, num_pages);
49 49
50 // Commit our reservation. 50 // Commit our reservation.
diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h
index 8c94ceb3a..b4c4125bb 100644
--- a/src/core/hle/kernel/k_shared_memory.h
+++ b/src/core/hle/kernel/k_shared_memory.h
@@ -23,7 +23,7 @@ class KSharedMemory final
23 KERNEL_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject); 23 KERNEL_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject);
24 24
25public: 25public:
26 explicit KSharedMemory(KernelCore& kernel_); 26 explicit KSharedMemory(KernelCore& kernel);
27 ~KSharedMemory() override; 27 ~KSharedMemory() override;
28 28
29 Result Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, 29 Result Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp
index dd912a82d..b7da3eee7 100644
--- a/src/core/hle/kernel/k_synchronization_object.cpp
+++ b/src/core/hle/kernel/k_synchronization_object.cpp
@@ -17,9 +17,9 @@ namespace {
17 17
18class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait { 18class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait {
19public: 19public:
20 ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel_, KSynchronizationObject** o, 20 ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel, KSynchronizationObject** o,
21 KSynchronizationObject::ThreadListNode* n, s32 c) 21 KSynchronizationObject::ThreadListNode* n, s32 c)
22 : KThreadQueueWithoutEndWait(kernel_), m_objects(o), m_nodes(n), m_count(c) {} 22 : KThreadQueueWithoutEndWait(kernel), m_objects(o), m_nodes(n), m_count(c) {}
23 23
24 void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object, 24 void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
25 Result wait_result) override { 25 Result wait_result) override {
@@ -144,13 +144,12 @@ Result KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
144 R_RETURN(thread->GetWaitResult()); 144 R_RETURN(thread->GetWaitResult());
145} 145}
146 146
147KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_) 147KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : KAutoObjectWithList{kernel} {}
148 : KAutoObjectWithList{kernel_} {}
149 148
150KSynchronizationObject::~KSynchronizationObject() = default; 149KSynchronizationObject::~KSynchronizationObject() = default;
151 150
152void KSynchronizationObject::NotifyAvailable(Result result) { 151void KSynchronizationObject::NotifyAvailable(Result result) {
153 KScopedSchedulerLock sl(kernel); 152 KScopedSchedulerLock sl(m_kernel);
154 153
155 // If we're not signaled, we've nothing to notify. 154 // If we're not signaled, we've nothing to notify.
156 if (!this->IsSignaled()) { 155 if (!this->IsSignaled()) {
@@ -168,7 +167,7 @@ std::vector<KThread*> KSynchronizationObject::GetWaitingThreadsForDebugging() co
168 167
169 // If debugging, dump the list of waiters. 168 // If debugging, dump the list of waiters.
170 { 169 {
171 KScopedSchedulerLock lock(kernel); 170 KScopedSchedulerLock lock(m_kernel);
172 for (auto* cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { 171 for (auto* cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
173 threads.emplace_back(cur_node->thread); 172 threads.emplace_back(cur_node->thread);
174 } 173 }
diff --git a/src/core/hle/kernel/k_system_resource.h b/src/core/hle/kernel/k_system_resource.h
index aec058a95..d36aaa9bd 100644
--- a/src/core/hle/kernel/k_system_resource.h
+++ b/src/core/hle/kernel/k_system_resource.h
@@ -21,7 +21,7 @@ class KSystemResource : public KAutoObject {
21 KERNEL_AUTOOBJECT_TRAITS(KSystemResource, KAutoObject); 21 KERNEL_AUTOOBJECT_TRAITS(KSystemResource, KAutoObject);
22 22
23public: 23public:
24 explicit KSystemResource(KernelCore& kernel_) : KAutoObject(kernel_) {} 24 explicit KSystemResource(KernelCore& kernel) : KAutoObject(kernel) {}
25 25
26protected: 26protected:
27 void SetSecureResource() { 27 void SetSecureResource() {
@@ -87,8 +87,8 @@ private:
87class KSecureSystemResource final 87class KSecureSystemResource final
88 : public KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource> { 88 : public KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource> {
89public: 89public:
90 explicit KSecureSystemResource(KernelCore& kernel_) 90 explicit KSecureSystemResource(KernelCore& kernel)
91 : KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource>(kernel_) { 91 : KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource>(kernel) {
92 // Mark ourselves as being a secure resource. 92 // Mark ourselves as being a secure resource.
93 this->SetSecureResource(); 93 this->SetSecureResource();
94 } 94 }
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 26e3700e4..49a683e5f 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -77,14 +77,14 @@ struct ThreadLocalRegion {
77 77
78class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait { 78class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait {
79public: 79public:
80 explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel_) 80 explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel)
81 : KThreadQueueWithoutEndWait(kernel_) {} 81 : KThreadQueueWithoutEndWait(kernel) {}
82}; 82};
83 83
84class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue { 84class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue {
85public: 85public:
86 explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel_, KThread::WaiterList* wl) 86 explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel, KThread::WaiterList* wl)
87 : KThreadQueue(kernel_), m_wait_list(wl) {} 87 : KThreadQueue(kernel), m_wait_list(wl) {}
88 88
89 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override { 89 void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
90 // Remove the thread from the wait list. 90 // Remove the thread from the wait list.
@@ -100,8 +100,8 @@ private:
100 100
101} // namespace 101} // namespace
102 102
103KThread::KThread(KernelCore& kernel_) 103KThread::KThread(KernelCore& kernel)
104 : KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {} 104 : KAutoObjectWithSlabHeapAndContainer{kernel}, activity_pause_lock{kernel} {}
105KThread::~KThread() = default; 105KThread::~KThread() = default;
106 106
107Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, 107Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
@@ -236,7 +236,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
236 SetInExceptionHandler(); 236 SetInExceptionHandler();
237 237
238 // Set thread ID. 238 // Set thread ID.
239 thread_id = kernel.CreateNewThreadID(); 239 thread_id = m_kernel.CreateNewThreadID();
240 240
241 // We initialized! 241 // We initialized!
242 initialized = true; 242 initialized = true;
@@ -343,7 +343,7 @@ void KThread::Finalize() {
343 // Release any waiters. 343 // Release any waiters.
344 { 344 {
345 ASSERT(waiting_lock_info == nullptr); 345 ASSERT(waiting_lock_info == nullptr);
346 KScopedSchedulerLock sl{kernel}; 346 KScopedSchedulerLock sl{m_kernel};
347 347
348 // Check that we have no kernel waiters. 348 // Check that we have no kernel waiters.
349 ASSERT(num_kernel_waiters == 0); 349 ASSERT(num_kernel_waiters == 0);
@@ -374,7 +374,7 @@ void KThread::Finalize() {
374 it = held_lock_info_list.erase(it); 374 it = held_lock_info_list.erase(it);
375 375
376 // Free the lock info. 376 // Free the lock info.
377 LockWithPriorityInheritanceInfo::Free(kernel, lock_info); 377 LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info);
378 } 378 }
379 } 379 }
380 380
@@ -390,7 +390,7 @@ bool KThread::IsSignaled() const {
390} 390}
391 391
392void KThread::OnTimer() { 392void KThread::OnTimer() {
393 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 393 ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
394 394
395 // If we're waiting, cancel the wait. 395 // If we're waiting, cancel the wait.
396 if (GetState() == ThreadState::Waiting) { 396 if (GetState() == ThreadState::Waiting) {
@@ -399,12 +399,12 @@ void KThread::OnTimer() {
399} 399}
400 400
401void KThread::StartTermination() { 401void KThread::StartTermination() {
402 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 402 ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
403 403
404 // Release user exception and unpin, if relevant. 404 // Release user exception and unpin, if relevant.
405 if (parent != nullptr) { 405 if (parent != nullptr) {
406 parent->ReleaseUserException(this); 406 parent->ReleaseUserException(this);
407 if (parent->GetPinnedThread(GetCurrentCoreId(kernel)) == this) { 407 if (parent->GetPinnedThread(GetCurrentCoreId(m_kernel)) == this) {
408 parent->UnpinCurrentThread(core_id); 408 parent->UnpinCurrentThread(core_id);
409 } 409 }
410 } 410 }
@@ -422,7 +422,7 @@ void KThread::StartTermination() {
422 KSynchronizationObject::NotifyAvailable(); 422 KSynchronizationObject::NotifyAvailable();
423 423
424 // Clear previous thread in KScheduler. 424 // Clear previous thread in KScheduler.
425 KScheduler::ClearPreviousThread(kernel, this); 425 KScheduler::ClearPreviousThread(m_kernel, this);
426 426
427 // Register terminated dpc flag. 427 // Register terminated dpc flag.
428 RegisterDpc(DpcFlag::Terminated); 428 RegisterDpc(DpcFlag::Terminated);
@@ -434,7 +434,7 @@ void KThread::FinishTermination() {
434 for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) { 434 for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
435 KThread* core_thread{}; 435 KThread* core_thread{};
436 do { 436 do {
437 core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread(); 437 core_thread = m_kernel.Scheduler(i).GetSchedulerCurrentThread();
438 } while (core_thread == this); 438 } while (core_thread == this);
439 } 439 }
440 } 440 }
@@ -449,7 +449,7 @@ void KThread::DoWorkerTaskImpl() {
449} 449}
450 450
451void KThread::Pin(s32 current_core) { 451void KThread::Pin(s32 current_core) {
452 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 452 ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
453 453
454 // Set ourselves as pinned. 454 // Set ourselves as pinned.
455 GetStackParameters().is_pinned = true; 455 GetStackParameters().is_pinned = true;
@@ -472,7 +472,7 @@ void KThread::Pin(s32 current_core) {
472 472
473 if (active_core != current_core || physical_affinity_mask.GetAffinityMask() != 473 if (active_core != current_core || physical_affinity_mask.GetAffinityMask() !=
474 original_physical_affinity_mask.GetAffinityMask()) { 474 original_physical_affinity_mask.GetAffinityMask()) {
475 KScheduler::OnThreadAffinityMaskChanged(kernel, this, original_physical_affinity_mask, 475 KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, original_physical_affinity_mask,
476 active_core); 476 active_core);
477 } 477 }
478 } 478 }
@@ -492,7 +492,7 @@ void KThread::Pin(s32 current_core) {
492} 492}
493 493
494void KThread::Unpin() { 494void KThread::Unpin() {
495 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 495 ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
496 496
497 // Set ourselves as unpinned. 497 // Set ourselves as unpinned.
498 GetStackParameters().is_pinned = false; 498 GetStackParameters().is_pinned = false;
@@ -520,7 +520,7 @@ void KThread::Unpin() {
520 std::countl_zero(physical_affinity_mask.GetAffinityMask()))); 520 std::countl_zero(physical_affinity_mask.GetAffinityMask())));
521 } 521 }
522 } 522 }
523 KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core); 523 KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core);
524 } 524 }
525 } 525 }
526 526
@@ -549,7 +549,7 @@ u16 KThread::GetUserDisableCount() const {
549 return {}; 549 return {};
550 } 550 }
551 551
552 auto& memory = kernel.System().Memory(); 552 auto& memory = m_kernel.System().Memory();
553 return memory.Read16(tls_address + offsetof(ThreadLocalRegion, disable_count)); 553 return memory.Read16(tls_address + offsetof(ThreadLocalRegion, disable_count));
554} 554}
555 555
@@ -559,7 +559,7 @@ void KThread::SetInterruptFlag() {
559 return; 559 return;
560 } 560 }
561 561
562 auto& memory = kernel.System().Memory(); 562 auto& memory = m_kernel.System().Memory();
563 memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1); 563 memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1);
564} 564}
565 565
@@ -569,12 +569,12 @@ void KThread::ClearInterruptFlag() {
569 return; 569 return;
570 } 570 }
571 571
572 auto& memory = kernel.System().Memory(); 572 auto& memory = m_kernel.System().Memory();
573 memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0); 573 memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0);
574} 574}
575 575
576Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { 576Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
577 KScopedSchedulerLock sl{kernel}; 577 KScopedSchedulerLock sl{m_kernel};
578 578
579 // Get the virtual mask. 579 // Get the virtual mask.
580 *out_ideal_core = virtual_ideal_core_id; 580 *out_ideal_core = virtual_ideal_core_id;
@@ -584,7 +584,7 @@ Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
584} 584}
585 585
586Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { 586Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
587 KScopedSchedulerLock sl{kernel}; 587 KScopedSchedulerLock sl{m_kernel};
588 ASSERT(num_core_migration_disables >= 0); 588 ASSERT(num_core_migration_disables >= 0);
589 589
590 // Select between core mask and original core mask. 590 // Select between core mask and original core mask.
@@ -607,7 +607,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
607 // Set the core mask. 607 // Set the core mask.
608 u64 p_affinity_mask = 0; 608 u64 p_affinity_mask = 0;
609 { 609 {
610 KScopedSchedulerLock sl(kernel); 610 KScopedSchedulerLock sl(m_kernel);
611 ASSERT(num_core_migration_disables >= 0); 611 ASSERT(num_core_migration_disables >= 0);
612 612
613 // If we're updating, set our ideal virtual core. 613 // If we're updating, set our ideal virtual core.
@@ -653,7 +653,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
653 std::countl_zero(physical_affinity_mask.GetAffinityMask())); 653 std::countl_zero(physical_affinity_mask.GetAffinityMask()));
654 SetActiveCore(new_core); 654 SetActiveCore(new_core);
655 } 655 }
656 KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core); 656 KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core);
657 } 657 }
658 } else { 658 } else {
659 // Otherwise, we edit the original affinity for restoration later. 659 // Otherwise, we edit the original affinity for restoration later.
@@ -663,12 +663,12 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
663 } 663 }
664 664
665 // Update the pinned waiter list. 665 // Update the pinned waiter list.
666 ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, std::addressof(pinned_waiter_list)); 666 ThreadQueueImplForKThreadSetProperty wait_queue_(m_kernel, std::addressof(pinned_waiter_list));
667 { 667 {
668 bool retry_update{}; 668 bool retry_update{};
669 do { 669 do {
670 // Lock the scheduler. 670 // Lock the scheduler.
671 KScopedSchedulerLock sl(kernel); 671 KScopedSchedulerLock sl(m_kernel);
672 672
673 // Don't do any further management if our termination has been requested. 673 // Don't do any further management if our termination has been requested.
674 R_SUCCEED_IF(IsTerminationRequested()); 674 R_SUCCEED_IF(IsTerminationRequested());
@@ -681,7 +681,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
681 s32 thread_core; 681 s32 thread_core;
682 for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); 682 for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES);
683 ++thread_core) { 683 ++thread_core) {
684 if (kernel.Scheduler(thread_core).GetSchedulerCurrentThread() == this) { 684 if (m_kernel.Scheduler(thread_core).GetSchedulerCurrentThread() == this) {
685 thread_is_current = true; 685 thread_is_current = true;
686 break; 686 break;
687 } 687 }
@@ -693,12 +693,12 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
693 // If the thread is pinned, we want to wait until it's not pinned. 693 // If the thread is pinned, we want to wait until it's not pinned.
694 if (GetStackParameters().is_pinned) { 694 if (GetStackParameters().is_pinned) {
695 // Verify that the current thread isn't terminating. 695 // Verify that the current thread isn't terminating.
696 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), 696 R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(),
697 ResultTerminationRequested); 697 ResultTerminationRequested);
698 698
699 // Wait until the thread isn't pinned any more. 699 // Wait until the thread isn't pinned any more.
700 pinned_waiter_list.push_back(GetCurrentThread(kernel)); 700 pinned_waiter_list.push_back(GetCurrentThread(m_kernel));
701 GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_)); 701 GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue_));
702 } else { 702 } else {
703 // If the thread isn't pinned, release the scheduler lock and retry until it's 703 // If the thread isn't pinned, release the scheduler lock and retry until it's
704 // not current. 704 // not current.
@@ -714,13 +714,13 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
714void KThread::SetBasePriority(s32 value) { 714void KThread::SetBasePriority(s32 value) {
715 ASSERT(Svc::HighestThreadPriority <= value && value <= Svc::LowestThreadPriority); 715 ASSERT(Svc::HighestThreadPriority <= value && value <= Svc::LowestThreadPriority);
716 716
717 KScopedSchedulerLock sl{kernel}; 717 KScopedSchedulerLock sl{m_kernel};
718 718
719 // Change our base priority. 719 // Change our base priority.
720 base_priority = value; 720 base_priority = value;
721 721
722 // Perform a priority restoration. 722 // Perform a priority restoration.
723 RestorePriority(kernel, this); 723 RestorePriority(m_kernel, this);
724} 724}
725 725
726KThread* KThread::GetLockOwner() const { 726KThread* KThread::GetLockOwner() const {
@@ -729,7 +729,7 @@ KThread* KThread::GetLockOwner() const {
729 729
730void KThread::IncreaseBasePriority(s32 priority_) { 730void KThread::IncreaseBasePriority(s32 priority_) {
731 ASSERT(Svc::HighestThreadPriority <= priority_ && priority_ <= Svc::LowestThreadPriority); 731 ASSERT(Svc::HighestThreadPriority <= priority_ && priority_ <= Svc::LowestThreadPriority);
732 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 732 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
733 ASSERT(!this->GetStackParameters().is_pinned); 733 ASSERT(!this->GetStackParameters().is_pinned);
734 734
735 // Set our base priority. 735 // Set our base priority.
@@ -737,12 +737,12 @@ void KThread::IncreaseBasePriority(s32 priority_) {
737 base_priority = priority_; 737 base_priority = priority_;
738 738
739 // Perform a priority restoration. 739 // Perform a priority restoration.
740 RestorePriority(kernel, this); 740 RestorePriority(m_kernel, this);
741 } 741 }
742} 742}
743 743
744void KThread::RequestSuspend(SuspendType type) { 744void KThread::RequestSuspend(SuspendType type) {
745 KScopedSchedulerLock sl{kernel}; 745 KScopedSchedulerLock sl{m_kernel};
746 746
747 // Note the request in our flags. 747 // Note the request in our flags.
748 suspend_request_flags |= 748 suspend_request_flags |=
@@ -753,7 +753,7 @@ void KThread::RequestSuspend(SuspendType type) {
753} 753}
754 754
755void KThread::Resume(SuspendType type) { 755void KThread::Resume(SuspendType type) {
756 KScopedSchedulerLock sl{kernel}; 756 KScopedSchedulerLock sl{m_kernel};
757 757
758 // Clear the request in our flags. 758 // Clear the request in our flags.
759 suspend_request_flags &= 759 suspend_request_flags &=
@@ -764,7 +764,7 @@ void KThread::Resume(SuspendType type) {
764} 764}
765 765
766void KThread::WaitCancel() { 766void KThread::WaitCancel() {
767 KScopedSchedulerLock sl{kernel}; 767 KScopedSchedulerLock sl{m_kernel};
768 768
769 // Check if we're waiting and cancellable. 769 // Check if we're waiting and cancellable.
770 if (this->GetState() == ThreadState::Waiting && cancellable) { 770 if (this->GetState() == ThreadState::Waiting && cancellable) {
@@ -777,7 +777,7 @@ void KThread::WaitCancel() {
777} 777}
778 778
779void KThread::TrySuspend() { 779void KThread::TrySuspend() {
780 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 780 ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
781 ASSERT(IsSuspendRequested()); 781 ASSERT(IsSuspendRequested());
782 782
783 // Ensure that we have no waiters. 783 // Ensure that we have no waiters.
@@ -791,7 +791,7 @@ void KThread::TrySuspend() {
791} 791}
792 792
793void KThread::UpdateState() { 793void KThread::UpdateState() {
794 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 794 ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
795 795
796 // Set our suspend flags in state. 796 // Set our suspend flags in state.
797 const ThreadState old_state = thread_state.load(std::memory_order_relaxed); 797 const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
@@ -801,37 +801,37 @@ void KThread::UpdateState() {
801 801
802 // Note the state change in scheduler. 802 // Note the state change in scheduler.
803 if (new_state != old_state) { 803 if (new_state != old_state) {
804 KScheduler::OnThreadStateChanged(kernel, this, old_state); 804 KScheduler::OnThreadStateChanged(m_kernel, this, old_state);
805 } 805 }
806} 806}
807 807
808void KThread::Continue() { 808void KThread::Continue() {
809 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 809 ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
810 810
811 // Clear our suspend flags in state. 811 // Clear our suspend flags in state.
812 const ThreadState old_state = thread_state.load(std::memory_order_relaxed); 812 const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
813 thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed); 813 thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed);
814 814
815 // Note the state change in scheduler. 815 // Note the state change in scheduler.
816 KScheduler::OnThreadStateChanged(kernel, this, old_state); 816 KScheduler::OnThreadStateChanged(m_kernel, this, old_state);
817} 817}
818 818
819void KThread::CloneFpuStatus() { 819void KThread::CloneFpuStatus() {
820 // We shouldn't reach here when starting kernel threads. 820 // We shouldn't reach here when starting kernel threads.
821 ASSERT(this->GetOwnerProcess() != nullptr); 821 ASSERT(this->GetOwnerProcess() != nullptr);
822 ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(kernel)); 822 ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(m_kernel));
823 823
824 if (this->GetOwnerProcess()->Is64BitProcess()) { 824 if (this->GetOwnerProcess()->Is64BitProcess()) {
825 // Clone FPSR and FPCR. 825 // Clone FPSR and FPCR.
826 ThreadContext64 cur_ctx{}; 826 ThreadContext64 cur_ctx{};
827 kernel.System().CurrentArmInterface().SaveContext(cur_ctx); 827 m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
828 828
829 this->GetContext64().fpcr = cur_ctx.fpcr; 829 this->GetContext64().fpcr = cur_ctx.fpcr;
830 this->GetContext64().fpsr = cur_ctx.fpsr; 830 this->GetContext64().fpsr = cur_ctx.fpsr;
831 } else { 831 } else {
832 // Clone FPSCR. 832 // Clone FPSCR.
833 ThreadContext32 cur_ctx{}; 833 ThreadContext32 cur_ctx{};
834 kernel.System().CurrentArmInterface().SaveContext(cur_ctx); 834 m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
835 835
836 this->GetContext32().fpscr = cur_ctx.fpscr; 836 this->GetContext32().fpscr = cur_ctx.fpscr;
837 } 837 }
@@ -844,7 +844,7 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
844 // Set the activity. 844 // Set the activity.
845 { 845 {
846 // Lock the scheduler. 846 // Lock the scheduler.
847 KScopedSchedulerLock sl(kernel); 847 KScopedSchedulerLock sl(m_kernel);
848 848
849 // Verify our state. 849 // Verify our state.
850 const auto cur_state = this->GetState(); 850 const auto cur_state = this->GetState();
@@ -871,13 +871,13 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
871 871
872 // If the thread is now paused, update the pinned waiter list. 872 // If the thread is now paused, update the pinned waiter list.
873 if (activity == Svc::ThreadActivity::Paused) { 873 if (activity == Svc::ThreadActivity::Paused) {
874 ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, 874 ThreadQueueImplForKThreadSetProperty wait_queue_(m_kernel,
875 std::addressof(pinned_waiter_list)); 875 std::addressof(pinned_waiter_list));
876 876
877 bool thread_is_current; 877 bool thread_is_current;
878 do { 878 do {
879 // Lock the scheduler. 879 // Lock the scheduler.
880 KScopedSchedulerLock sl(kernel); 880 KScopedSchedulerLock sl(m_kernel);
881 881
882 // Don't do any further management if our termination has been requested. 882 // Don't do any further management if our termination has been requested.
883 R_SUCCEED_IF(this->IsTerminationRequested()); 883 R_SUCCEED_IF(this->IsTerminationRequested());
@@ -888,17 +888,17 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
888 // Check whether the thread is pinned. 888 // Check whether the thread is pinned.
889 if (this->GetStackParameters().is_pinned) { 889 if (this->GetStackParameters().is_pinned) {
890 // Verify that the current thread isn't terminating. 890 // Verify that the current thread isn't terminating.
891 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), 891 R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(),
892 ResultTerminationRequested); 892 ResultTerminationRequested);
893 893
894 // Wait until the thread isn't pinned any more. 894 // Wait until the thread isn't pinned any more.
895 pinned_waiter_list.push_back(GetCurrentThread(kernel)); 895 pinned_waiter_list.push_back(GetCurrentThread(m_kernel));
896 GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_)); 896 GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue_));
897 } else { 897 } else {
898 // Check if the thread is currently running. 898 // Check if the thread is currently running.
899 // If it is, we'll need to retry. 899 // If it is, we'll need to retry.
900 for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) { 900 for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
901 if (kernel.Scheduler(i).GetSchedulerCurrentThread() == this) { 901 if (m_kernel.Scheduler(i).GetSchedulerCurrentThread() == this) {
902 thread_is_current = true; 902 thread_is_current = true;
903 break; 903 break;
904 } 904 }
@@ -917,7 +917,7 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) {
917 // Get the context. 917 // Get the context.
918 { 918 {
919 // Lock the scheduler. 919 // Lock the scheduler.
920 KScopedSchedulerLock sl{kernel}; 920 KScopedSchedulerLock sl{m_kernel};
921 921
922 // Verify that we're suspended. 922 // Verify that we're suspended.
923 R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState); 923 R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
@@ -946,7 +946,7 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) {
946} 946}
947 947
948void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) { 948void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) {
949 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 949 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
950 950
951 // Set ourselves as the lock's owner. 951 // Set ourselves as the lock's owner.
952 lock_info->SetOwner(this); 952 lock_info->SetOwner(this);
@@ -957,7 +957,7 @@ void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) {
957 957
958KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key_, 958KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key_,
959 bool is_kernel_address_key_) { 959 bool is_kernel_address_key_) {
960 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 960 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
961 961
962 // Try to find an existing held lock. 962 // Try to find an existing held lock.
963 for (auto& held_lock : held_lock_info_list) { 963 for (auto& held_lock : held_lock_info_list) {
@@ -971,7 +971,7 @@ KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_ke
971} 971}
972 972
973void KThread::AddWaiterImpl(KThread* thread) { 973void KThread::AddWaiterImpl(KThread* thread) {
974 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 974 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
975 ASSERT(thread->GetConditionVariableTree() == nullptr); 975 ASSERT(thread->GetConditionVariableTree() == nullptr);
976 976
977 // Get the thread's address key. 977 // Get the thread's address key.
@@ -981,7 +981,7 @@ void KThread::AddWaiterImpl(KThread* thread) {
981 // Keep track of how many kernel waiters we have. 981 // Keep track of how many kernel waiters we have.
982 if (is_kernel_address_key_) { 982 if (is_kernel_address_key_) {
983 ASSERT((num_kernel_waiters++) >= 0); 983 ASSERT((num_kernel_waiters++) >= 0);
984 KScheduler::SetSchedulerUpdateNeeded(kernel); 984 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
985 } 985 }
986 986
987 // Get the relevant lock info. 987 // Get the relevant lock info.
@@ -989,7 +989,7 @@ void KThread::AddWaiterImpl(KThread* thread) {
989 if (lock_info == nullptr) { 989 if (lock_info == nullptr) {
990 // Create a new lock for the address key. 990 // Create a new lock for the address key.
991 lock_info = 991 lock_info =
992 LockWithPriorityInheritanceInfo::Create(kernel, address_key_, is_kernel_address_key_); 992 LockWithPriorityInheritanceInfo::Create(m_kernel, address_key_, is_kernel_address_key_);
993 993
994 // Add the new lock to our list. 994 // Add the new lock to our list.
995 this->AddHeldLock(lock_info); 995 this->AddHeldLock(lock_info);
@@ -1000,12 +1000,12 @@ void KThread::AddWaiterImpl(KThread* thread) {
1000} 1000}
1001 1001
1002void KThread::RemoveWaiterImpl(KThread* thread) { 1002void KThread::RemoveWaiterImpl(KThread* thread) {
1003 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 1003 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
1004 1004
1005 // Keep track of how many kernel waiters we have. 1005 // Keep track of how many kernel waiters we have.
1006 if (thread->GetIsKernelAddressKey()) { 1006 if (thread->GetIsKernelAddressKey()) {
1007 ASSERT((num_kernel_waiters--) > 0); 1007 ASSERT((num_kernel_waiters--) > 0);
1008 KScheduler::SetSchedulerUpdateNeeded(kernel); 1008 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
1009 } 1009 }
1010 1010
1011 // Get the info for the lock the thread is waiting on. 1011 // Get the info for the lock the thread is waiting on.
@@ -1015,7 +1015,7 @@ void KThread::RemoveWaiterImpl(KThread* thread) {
1015 // Remove the waiter. 1015 // Remove the waiter.
1016 if (lock_info->RemoveWaiter(thread)) { 1016 if (lock_info->RemoveWaiter(thread)) {
1017 held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info)); 1017 held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info));
1018 LockWithPriorityInheritanceInfo::Free(kernel, lock_info); 1018 LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info);
1019 } 1019 }
1020} 1020}
1021 1021
@@ -1076,7 +1076,7 @@ void KThread::AddWaiter(KThread* thread) {
1076 1076
1077 // If the thread has a higher priority than us, we should inherit. 1077 // If the thread has a higher priority than us, we should inherit.
1078 if (thread->GetPriority() < this->GetPriority()) { 1078 if (thread->GetPriority() < this->GetPriority()) {
1079 RestorePriority(kernel, this); 1079 RestorePriority(m_kernel, this);
1080 } 1080 }
1081} 1081}
1082 1082
@@ -1087,12 +1087,12 @@ void KThread::RemoveWaiter(KThread* thread) {
1087 // lower priority. 1087 // lower priority.
1088 if (this->GetPriority() == thread->GetPriority() && 1088 if (this->GetPriority() == thread->GetPriority() &&
1089 this->GetPriority() < this->GetBasePriority()) { 1089 this->GetPriority() < this->GetBasePriority()) {
1090 RestorePriority(kernel, this); 1090 RestorePriority(m_kernel, this);
1091 } 1091 }
1092} 1092}
1093 1093
1094KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key_) { 1094KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key_) {
1095 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 1095 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
1096 1096
1097 // Get the relevant lock info. 1097 // Get the relevant lock info.
1098 auto* lock_info = this->FindHeldLock(key, is_kernel_address_key_); 1098 auto* lock_info = this->FindHeldLock(key, is_kernel_address_key_);
@@ -1108,7 +1108,7 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke
1108 if (lock_info->GetIsKernelAddressKey()) { 1108 if (lock_info->GetIsKernelAddressKey()) {
1109 num_kernel_waiters -= lock_info->GetWaiterCount(); 1109 num_kernel_waiters -= lock_info->GetWaiterCount();
1110 ASSERT(num_kernel_waiters >= 0); 1110 ASSERT(num_kernel_waiters >= 0);
1111 KScheduler::SetSchedulerUpdateNeeded(kernel); 1111 KScheduler::SetSchedulerUpdateNeeded(m_kernel);
1112 } 1112 }
1113 1113
1114 ASSERT(lock_info->GetWaiterCount() > 0); 1114 ASSERT(lock_info->GetWaiterCount() > 0);
@@ -1120,7 +1120,7 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke
1120 *out_has_waiters = false; 1120 *out_has_waiters = false;
1121 1121
1122 // Free the lock info, since it has no waiters. 1122 // Free the lock info, since it has no waiters.
1123 LockWithPriorityInheritanceInfo::Free(kernel, lock_info); 1123 LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info);
1124 } else { 1124 } else {
1125 // There are additional waiters on the lock. 1125 // There are additional waiters on the lock.
1126 *out_has_waiters = true; 1126 *out_has_waiters = true;
@@ -1142,7 +1142,7 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke
1142 // to lower priority. 1142 // to lower priority.
1143 if (this->GetPriority() == next_lock_owner->GetPriority() && 1143 if (this->GetPriority() == next_lock_owner->GetPriority() &&
1144 this->GetPriority() < this->GetBasePriority()) { 1144 this->GetPriority() < this->GetBasePriority()) {
1145 RestorePriority(kernel, this); 1145 RestorePriority(m_kernel, this);
1146 // NOTE: No need to restore priority on the next lock owner, because it was already the 1146 // NOTE: No need to restore priority on the next lock owner, because it was already the
1147 // highest priority waiter on the lock. 1147 // highest priority waiter on the lock.
1148 } 1148 }
@@ -1153,18 +1153,18 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke
1153 1153
1154Result KThread::Run() { 1154Result KThread::Run() {
1155 while (true) { 1155 while (true) {
1156 KScopedSchedulerLock lk{kernel}; 1156 KScopedSchedulerLock lk{m_kernel};
1157 1157
1158 // If either this thread or the current thread are requesting termination, note it. 1158 // If either this thread or the current thread are requesting termination, note it.
1159 R_UNLESS(!IsTerminationRequested(), ResultTerminationRequested); 1159 R_UNLESS(!IsTerminationRequested(), ResultTerminationRequested);
1160 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested); 1160 R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested);
1161 1161
1162 // Ensure our thread state is correct. 1162 // Ensure our thread state is correct.
1163 R_UNLESS(GetState() == ThreadState::Initialized, ResultInvalidState); 1163 R_UNLESS(GetState() == ThreadState::Initialized, ResultInvalidState);
1164 1164
1165 // If the current thread has been asked to suspend, suspend it and retry. 1165 // If the current thread has been asked to suspend, suspend it and retry.
1166 if (GetCurrentThread(kernel).IsSuspended()) { 1166 if (GetCurrentThread(m_kernel).IsSuspended()) {
1167 GetCurrentThread(kernel).UpdateState(); 1167 GetCurrentThread(m_kernel).UpdateState();
1168 continue; 1168 continue;
1169 } 1169 }
1170 1170
@@ -1184,7 +1184,7 @@ Result KThread::Run() {
1184} 1184}
1185 1185
1186void KThread::Exit() { 1186void KThread::Exit() {
1187 ASSERT(this == GetCurrentThreadPointer(kernel)); 1187 ASSERT(this == GetCurrentThreadPointer(m_kernel));
1188 1188
1189 // Release the thread resource hint, running thread count from parent. 1189 // Release the thread resource hint, running thread count from parent.
1190 if (parent != nullptr) { 1190 if (parent != nullptr) {
@@ -1195,7 +1195,7 @@ void KThread::Exit() {
1195 1195
1196 // Perform termination. 1196 // Perform termination.
1197 { 1197 {
1198 KScopedSchedulerLock sl{kernel}; 1198 KScopedSchedulerLock sl{m_kernel};
1199 1199
1200 // Disallow all suspension. 1200 // Disallow all suspension.
1201 suspend_allowed_flags = 0; 1201 suspend_allowed_flags = 0;
@@ -1208,21 +1208,21 @@ void KThread::Exit() {
1208 StartTermination(); 1208 StartTermination();
1209 1209
1210 // Register the thread as a work task. 1210 // Register the thread as a work task.
1211 KWorkerTaskManager::AddTask(kernel, KWorkerTaskManager::WorkerType::Exit, this); 1211 KWorkerTaskManager::AddTask(m_kernel, KWorkerTaskManager::WorkerType::Exit, this);
1212 } 1212 }
1213 1213
1214 UNREACHABLE_MSG("KThread::Exit() would return"); 1214 UNREACHABLE_MSG("KThread::Exit() would return");
1215} 1215}
1216 1216
1217Result KThread::Terminate() { 1217Result KThread::Terminate() {
1218 ASSERT(this != GetCurrentThreadPointer(kernel)); 1218 ASSERT(this != GetCurrentThreadPointer(m_kernel));
1219 1219
1220 // Request the thread terminate if it hasn't already. 1220 // Request the thread terminate if it hasn't already.
1221 if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) { 1221 if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) {
1222 // If the thread isn't terminated, wait for it to terminate. 1222 // If the thread isn't terminated, wait for it to terminate.
1223 s32 index; 1223 s32 index;
1224 KSynchronizationObject* objects[] = {this}; 1224 KSynchronizationObject* objects[] = {this};
1225 R_TRY(KSynchronizationObject::Wait(kernel, std::addressof(index), objects, 1, 1225 R_TRY(KSynchronizationObject::Wait(m_kernel, std::addressof(index), objects, 1,
1226 Svc::WaitInfinite)); 1226 Svc::WaitInfinite));
1227 } 1227 }
1228 1228
@@ -1230,9 +1230,9 @@ Result KThread::Terminate() {
1230} 1230}
1231 1231
1232ThreadState KThread::RequestTerminate() { 1232ThreadState KThread::RequestTerminate() {
1233 ASSERT(this != GetCurrentThreadPointer(kernel)); 1233 ASSERT(this != GetCurrentThreadPointer(m_kernel));
1234 1234
1235 KScopedSchedulerLock sl{kernel}; 1235 KScopedSchedulerLock sl{m_kernel};
1236 1236
1237 // Determine if this is the first termination request. 1237 // Determine if this is the first termination request.
1238 const bool first_request = [&]() -> bool { 1238 const bool first_request = [&]() -> bool {
@@ -1268,10 +1268,10 @@ ThreadState KThread::RequestTerminate() {
1268 1268
1269 // If the thread is runnable, send a termination interrupt to other cores. 1269 // If the thread is runnable, send a termination interrupt to other cores.
1270 if (this->GetState() == ThreadState::Runnable) { 1270 if (this->GetState() == ThreadState::Runnable) {
1271 if (const u64 core_mask = 1271 if (const u64 core_mask = physical_affinity_mask.GetAffinityMask() &
1272 physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(kernel)); 1272 ~(1ULL << GetCurrentCoreId(m_kernel));
1273 core_mask != 0) { 1273 core_mask != 0) {
1274 Kernel::KInterruptManager::SendInterProcessorInterrupt(kernel, core_mask); 1274 Kernel::KInterruptManager::SendInterProcessorInterrupt(m_kernel, core_mask);
1275 } 1275 }
1276 } 1276 }
1277 1277
@@ -1285,15 +1285,15 @@ ThreadState KThread::RequestTerminate() {
1285} 1285}
1286 1286
1287Result KThread::Sleep(s64 timeout) { 1287Result KThread::Sleep(s64 timeout) {
1288 ASSERT(!kernel.GlobalSchedulerContext().IsLocked()); 1288 ASSERT(!m_kernel.GlobalSchedulerContext().IsLocked());
1289 ASSERT(this == GetCurrentThreadPointer(kernel)); 1289 ASSERT(this == GetCurrentThreadPointer(m_kernel));
1290 ASSERT(timeout > 0); 1290 ASSERT(timeout > 0);
1291 1291
1292 ThreadQueueImplForKThreadSleep wait_queue_(kernel); 1292 ThreadQueueImplForKThreadSleep wait_queue_(m_kernel);
1293 KHardwareTimer* timer{}; 1293 KHardwareTimer* timer{};
1294 { 1294 {
1295 // Setup the scheduling lock and sleep. 1295 // Setup the scheduling lock and sleep.
1296 KScopedSchedulerLockAndSleep slp(kernel, std::addressof(timer), this, timeout); 1296 KScopedSchedulerLockAndSleep slp(m_kernel, std::addressof(timer), this, timeout);
1297 1297
1298 // Check if the thread should terminate. 1298 // Check if the thread should terminate.
1299 if (this->IsTerminationRequested()) { 1299 if (this->IsTerminationRequested()) {
@@ -1311,7 +1311,7 @@ Result KThread::Sleep(s64 timeout) {
1311} 1311}
1312 1312
1313void KThread::RequestDummyThreadWait() { 1313void KThread::RequestDummyThreadWait() {
1314 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 1314 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
1315 ASSERT(this->IsDummyThread()); 1315 ASSERT(this->IsDummyThread());
1316 1316
1317 // We will block when the scheduler lock is released. 1317 // We will block when the scheduler lock is released.
@@ -1319,7 +1319,7 @@ void KThread::RequestDummyThreadWait() {
1319} 1319}
1320 1320
1321void KThread::DummyThreadBeginWait() { 1321void KThread::DummyThreadBeginWait() {
1322 if (!this->IsDummyThread() || kernel.IsPhantomModeForSingleCore()) { 1322 if (!this->IsDummyThread() || m_kernel.IsPhantomModeForSingleCore()) {
1323 // Occurs in single core mode. 1323 // Occurs in single core mode.
1324 return; 1324 return;
1325 } 1325 }
@@ -1329,7 +1329,7 @@ void KThread::DummyThreadBeginWait() {
1329} 1329}
1330 1330
1331void KThread::DummyThreadEndWait() { 1331void KThread::DummyThreadEndWait() {
1332 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); 1332 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
1333 ASSERT(this->IsDummyThread()); 1333 ASSERT(this->IsDummyThread());
1334 1334
1335 // Wake up the waiting thread. 1335 // Wake up the waiting thread.
@@ -1347,7 +1347,7 @@ void KThread::BeginWait(KThreadQueue* queue) {
1347 1347
1348void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_) { 1348void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_) {
1349 // Lock the scheduler. 1349 // Lock the scheduler.
1350 KScopedSchedulerLock sl(kernel); 1350 KScopedSchedulerLock sl(m_kernel);
1351 1351
1352 // If we're waiting, notify our queue that we're available. 1352 // If we're waiting, notify our queue that we're available.
1353 if (GetState() == ThreadState::Waiting) { 1353 if (GetState() == ThreadState::Waiting) {
@@ -1357,7 +1357,7 @@ void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wa
1357 1357
1358void KThread::EndWait(Result wait_result_) { 1358void KThread::EndWait(Result wait_result_) {
1359 // Lock the scheduler. 1359 // Lock the scheduler.
1360 KScopedSchedulerLock sl(kernel); 1360 KScopedSchedulerLock sl(m_kernel);
1361 1361
1362 // If we're waiting, notify our queue that we're available. 1362 // If we're waiting, notify our queue that we're available.
1363 if (GetState() == ThreadState::Waiting) { 1363 if (GetState() == ThreadState::Waiting) {
@@ -1373,7 +1373,7 @@ void KThread::EndWait(Result wait_result_) {
1373 1373
1374void KThread::CancelWait(Result wait_result_, bool cancel_timer_task) { 1374void KThread::CancelWait(Result wait_result_, bool cancel_timer_task) {
1375 // Lock the scheduler. 1375 // Lock the scheduler.
1376 KScopedSchedulerLock sl(kernel); 1376 KScopedSchedulerLock sl(m_kernel);
1377 1377
1378 // If we're waiting, notify our queue that we're available. 1378 // If we're waiting, notify our queue that we're available.
1379 if (GetState() == ThreadState::Waiting) { 1379 if (GetState() == ThreadState::Waiting) {
@@ -1382,7 +1382,7 @@ void KThread::CancelWait(Result wait_result_, bool cancel_timer_task) {
1382} 1382}
1383 1383
1384void KThread::SetState(ThreadState state) { 1384void KThread::SetState(ThreadState state) {
1385 KScopedSchedulerLock sl{kernel}; 1385 KScopedSchedulerLock sl{m_kernel};
1386 1386
1387 // Clear debugging state 1387 // Clear debugging state
1388 SetMutexWaitAddressForDebugging({}); 1388 SetMutexWaitAddressForDebugging({});
@@ -1393,7 +1393,7 @@ void KThread::SetState(ThreadState state) {
1393 static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)), 1393 static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)),
1394 std::memory_order_relaxed); 1394 std::memory_order_relaxed);
1395 if (thread_state.load(std::memory_order_relaxed) != old_state) { 1395 if (thread_state.load(std::memory_order_relaxed) != old_state) {
1396 KScheduler::OnThreadStateChanged(kernel, this, old_state); 1396 KScheduler::OnThreadStateChanged(m_kernel, this, old_state);
1397 } 1397 }
1398} 1398}
1399 1399
@@ -1427,20 +1427,20 @@ s32 GetCurrentCoreId(KernelCore& kernel) {
1427 1427
1428KScopedDisableDispatch::~KScopedDisableDispatch() { 1428KScopedDisableDispatch::~KScopedDisableDispatch() {
1429 // If we are shutting down the kernel, none of this is relevant anymore. 1429 // If we are shutting down the kernel, none of this is relevant anymore.
1430 if (kernel.IsShuttingDown()) { 1430 if (m_kernel.IsShuttingDown()) {
1431 return; 1431 return;
1432 } 1432 }
1433 1433
1434 if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) { 1434 if (GetCurrentThread(m_kernel).GetDisableDispatchCount() <= 1) {
1435 auto* scheduler = kernel.CurrentScheduler(); 1435 auto* scheduler = m_kernel.CurrentScheduler();
1436 1436
1437 if (scheduler && !kernel.IsPhantomModeForSingleCore()) { 1437 if (scheduler && !m_kernel.IsPhantomModeForSingleCore()) {
1438 scheduler->RescheduleCurrentCore(); 1438 scheduler->RescheduleCurrentCore();
1439 } else { 1439 } else {
1440 KScheduler::RescheduleCurrentHLEThread(kernel); 1440 KScheduler::RescheduleCurrentHLEThread(m_kernel);
1441 } 1441 }
1442 } else { 1442 } else {
1443 GetCurrentThread(kernel).EnableDispatch(); 1443 GetCurrentThread(m_kernel).EnableDispatch();
1444 } 1444 }
1445} 1445}
1446 1446
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index f4cb861a9..e541ea079 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -128,7 +128,7 @@ public:
128 static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1; 128 static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1;
129 static constexpr s32 DummyThreadPriority = Svc::LowestThreadPriority + 2; 129 static constexpr s32 DummyThreadPriority = Svc::LowestThreadPriority + 2;
130 130
131 explicit KThread(KernelCore& kernel_); 131 explicit KThread(KernelCore& kernel);
132 ~KThread() override; 132 ~KThread() override;
133 133
134public: 134public:
@@ -494,12 +494,12 @@ public:
494 } 494 }
495 495
496 void DisableDispatch() { 496 void DisableDispatch() {
497 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); 497 ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() >= 0);
498 this->GetStackParameters().disable_count++; 498 this->GetStackParameters().disable_count++;
499 } 499 }
500 500
501 void EnableDispatch() { 501 void EnableDispatch() {
502 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); 502 ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() > 0);
503 this->GetStackParameters().disable_count--; 503 this->GetStackParameters().disable_count--;
504 } 504 }
505 505
@@ -970,9 +970,9 @@ public:
970 970
971class KScopedDisableDispatch { 971class KScopedDisableDispatch {
972public: 972public:
973 [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} { 973 [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel) : m_kernel{kernel} {
974 // If we are shutting down the kernel, none of this is relevant anymore. 974 // If we are shutting down the kernel, none of this is relevant anymore.
975 if (kernel.IsShuttingDown()) { 975 if (m_kernel.IsShuttingDown()) {
976 return; 976 return;
977 } 977 }
978 GetCurrentThread(kernel).DisableDispatch(); 978 GetCurrentThread(kernel).DisableDispatch();
@@ -981,7 +981,7 @@ public:
981 ~KScopedDisableDispatch(); 981 ~KScopedDisableDispatch();
982 982
983private: 983private:
984 KernelCore& kernel; 984 KernelCore& m_kernel;
985}; 985};
986 986
987inline void KTimerTask::OnTimer() { 987inline void KTimerTask::OnTimer() {
diff --git a/src/core/hle/kernel/k_thread_queue.h b/src/core/hle/kernel/k_thread_queue.h
index 8ec2f900b..117af0919 100644
--- a/src/core/hle/kernel/k_thread_queue.h
+++ b/src/core/hle/kernel/k_thread_queue.h
@@ -31,7 +31,7 @@ private:
31 31
32class KThreadQueueWithoutEndWait : public KThreadQueue { 32class KThreadQueueWithoutEndWait : public KThreadQueue {
33public: 33public:
34 explicit KThreadQueueWithoutEndWait(KernelCore& kernel_) : KThreadQueue(kernel_) {} 34 explicit KThreadQueueWithoutEndWait(KernelCore& kernel) : KThreadQueue(kernel) {}
35 35
36 void EndWait(KThread* waiting_thread, Result wait_result) override final; 36 void EndWait(KThread* waiting_thread, Result wait_result) override final;
37}; 37};
diff --git a/src/core/hle/kernel/k_transfer_memory.cpp b/src/core/hle/kernel/k_transfer_memory.cpp
index c25cc2e39..ceec364af 100644
--- a/src/core/hle/kernel/k_transfer_memory.cpp
+++ b/src/core/hle/kernel/k_transfer_memory.cpp
@@ -8,23 +8,23 @@
8 8
9namespace Kernel { 9namespace Kernel {
10 10
11KTransferMemory::KTransferMemory(KernelCore& kernel_) 11KTransferMemory::KTransferMemory(KernelCore& kernel)
12 : KAutoObjectWithSlabHeapAndContainer{kernel_} {} 12 : KAutoObjectWithSlabHeapAndContainer{kernel} {}
13 13
14KTransferMemory::~KTransferMemory() = default; 14KTransferMemory::~KTransferMemory() = default;
15 15
16Result KTransferMemory::Initialize(VAddr address_, std::size_t size_, 16Result KTransferMemory::Initialize(VAddr address, std::size_t size,
17 Svc::MemoryPermission owner_perm_) { 17 Svc::MemoryPermission owner_perm) {
18 // Set members. 18 // Set members.
19 m_owner = GetCurrentProcessPointer(kernel); 19 m_owner = GetCurrentProcessPointer(m_kernel);
20 20
21 // TODO(bunnei): Lock for transfer memory 21 // TODO(bunnei): Lock for transfer memory
22 22
23 // Set remaining tracking members. 23 // Set remaining tracking members.
24 m_owner->Open(); 24 m_owner->Open();
25 m_owner_perm = owner_perm_; 25 m_owner_perm = owner_perm;
26 m_address = address_; 26 m_address = address;
27 m_size = size_; 27 m_size = size;
28 m_is_initialized = true; 28 m_is_initialized = true;
29 29
30 R_SUCCEED(); 30 R_SUCCEED();
diff --git a/src/core/hle/kernel/k_transfer_memory.h b/src/core/hle/kernel/k_transfer_memory.h
index 9a37bd903..3d4d795a5 100644
--- a/src/core/hle/kernel/k_transfer_memory.h
+++ b/src/core/hle/kernel/k_transfer_memory.h
@@ -23,10 +23,10 @@ class KTransferMemory final
23 KERNEL_AUTOOBJECT_TRAITS(KTransferMemory, KAutoObject); 23 KERNEL_AUTOOBJECT_TRAITS(KTransferMemory, KAutoObject);
24 24
25public: 25public:
26 explicit KTransferMemory(KernelCore& kernel_); 26 explicit KTransferMemory(KernelCore& kernel);
27 ~KTransferMemory() override; 27 ~KTransferMemory() override;
28 28
29 Result Initialize(VAddr address_, std::size_t size_, Svc::MemoryPermission owner_perm_); 29 Result Initialize(VAddr address, std::size_t size, Svc::MemoryPermission owner_perm);
30 30
31 void Finalize() override; 31 void Finalize() override;
32 32
diff --git a/src/core/hle/kernel/k_worker_task.h b/src/core/hle/kernel/k_worker_task.h
index ef591d831..9a230c03c 100644
--- a/src/core/hle/kernel/k_worker_task.h
+++ b/src/core/hle/kernel/k_worker_task.h
@@ -9,7 +9,7 @@ namespace Kernel {
9 9
10class KWorkerTask : public KSynchronizationObject { 10class KWorkerTask : public KSynchronizationObject {
11public: 11public:
12 explicit KWorkerTask(KernelCore& kernel_); 12 explicit KWorkerTask(KernelCore& kernel);
13 13
14 void DoWorkerTask(); 14 void DoWorkerTask();
15}; 15};
diff --git a/src/core/hle/kernel/k_worker_task_manager.cpp b/src/core/hle/kernel/k_worker_task_manager.cpp
index 04042bf8f..8ead39591 100644
--- a/src/core/hle/kernel/k_worker_task_manager.cpp
+++ b/src/core/hle/kernel/k_worker_task_manager.cpp
@@ -10,7 +10,7 @@
10 10
11namespace Kernel { 11namespace Kernel {
12 12
13KWorkerTask::KWorkerTask(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} 13KWorkerTask::KWorkerTask(KernelCore& kernel) : KSynchronizationObject{kernel} {}
14 14
15void KWorkerTask::DoWorkerTask() { 15void KWorkerTask::DoWorkerTask() {
16 if (auto* const thread = this->DynamicCast<KThread*>(); thread != nullptr) { 16 if (auto* const thread = this->DynamicCast<KThread*>(); thread != nullptr) {
diff --git a/src/core/hle/kernel/k_worker_task_manager.h b/src/core/hle/kernel/k_worker_task_manager.h
index f6618883e..8745a4ce2 100644
--- a/src/core/hle/kernel/k_worker_task_manager.h
+++ b/src/core/hle/kernel/k_worker_task_manager.h
@@ -20,7 +20,7 @@ public:
20 20
21 KWorkerTaskManager(); 21 KWorkerTaskManager();
22 22
23 static void AddTask(KernelCore& kernel_, WorkerType type, KWorkerTask* task); 23 static void AddTask(KernelCore& kernel, WorkerType type, KWorkerTask* task);
24 24
25private: 25private:
26 void AddTask(KernelCore& kernel, KWorkerTask* task); 26 void AddTask(KernelCore& kernel, KWorkerTask* task);
diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h
index b9f5066de..d1bbc7670 100644
--- a/src/core/hle/kernel/slab_helpers.h
+++ b/src/core/hle/kernel/slab_helpers.h
@@ -66,7 +66,7 @@ private:
66 } 66 }
67 67
68public: 68public:
69 explicit KAutoObjectWithSlabHeap(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {} 69 explicit KAutoObjectWithSlabHeap(KernelCore& kernel) : Base(kernel) {}
70 virtual ~KAutoObjectWithSlabHeap() = default; 70 virtual ~KAutoObjectWithSlabHeap() = default;
71 71
72 virtual void Destroy() override { 72 virtual void Destroy() override {
@@ -76,7 +76,7 @@ public:
76 arg = this->GetPostDestroyArgument(); 76 arg = this->GetPostDestroyArgument();
77 this->Finalize(); 77 this->Finalize();
78 } 78 }
79 Free(kernel, static_cast<Derived*>(this)); 79 Free(Base::m_kernel, static_cast<Derived*>(this));
80 if (is_initialized) { 80 if (is_initialized) {
81 Derived::PostDestroy(arg); 81 Derived::PostDestroy(arg);
82 } 82 }
@@ -90,7 +90,7 @@ public:
90 } 90 }
91 91
92 size_t GetSlabIndex() const { 92 size_t GetSlabIndex() const {
93 return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this)); 93 return SlabHeap<Derived>(Base::m_kernel).GetObjectIndex(static_cast<const Derived*>(this));
94 } 94 }
95 95
96public: 96public:
@@ -125,9 +125,6 @@ public:
125 static size_t GetNumRemaining(KernelCore& kernel) { 125 static size_t GetNumRemaining(KernelCore& kernel) {
126 return kernel.SlabHeap<Derived>().GetNumRemaining(); 126 return kernel.SlabHeap<Derived>().GetNumRemaining();
127 } 127 }
128
129protected:
130 KernelCore& kernel;
131}; 128};
132 129
133template <typename Derived, typename Base> 130template <typename Derived, typename Base>
@@ -144,18 +141,18 @@ private:
144 } 141 }
145 142
146public: 143public:
147 KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel_) : Base(kernel_) {} 144 KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel) : Base(kernel) {}
148 virtual ~KAutoObjectWithSlabHeapAndContainer() {} 145 virtual ~KAutoObjectWithSlabHeapAndContainer() {}
149 146
150 virtual void Destroy() override { 147 virtual void Destroy() override {
151 const bool is_initialized = this->IsInitialized(); 148 const bool is_initialized = this->IsInitialized();
152 uintptr_t arg = 0; 149 uintptr_t arg = 0;
153 if (is_initialized) { 150 if (is_initialized) {
154 Base::kernel.ObjectListContainer().Unregister(this); 151 Base::m_kernel.ObjectListContainer().Unregister(this);
155 arg = this->GetPostDestroyArgument(); 152 arg = this->GetPostDestroyArgument();
156 this->Finalize(); 153 this->Finalize();
157 } 154 }
158 Free(Base::kernel, static_cast<Derived*>(this)); 155 Free(Base::m_kernel, static_cast<Derived*>(this));
159 if (is_initialized) { 156 if (is_initialized) {
160 Derived::PostDestroy(arg); 157 Derived::PostDestroy(arg);
161 } 158 }
@@ -169,7 +166,7 @@ public:
169 } 166 }
170 167
171 size_t GetSlabIndex() const { 168 size_t GetSlabIndex() const {
172 return SlabHeap<Derived>(Base::kernel).GetObjectIndex(static_cast<const Derived*>(this)); 169 return SlabHeap<Derived>(Base::m_kernel).GetObjectIndex(static_cast<const Derived*>(this));
173 } 170 }
174 171
175public: 172public: