diff options
Diffstat (limited to 'src/core/hle/kernel')
| -rw-r--r-- | src/core/hle/kernel/k_synchronization_object.cpp | 24 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_synchronization_object.h | 27 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_transfer_memory.cpp | 14 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_transfer_memory.h | 20 | ||||
| -rw-r--r-- | src/core/hle/kernel/physical_core.cpp | 41 | ||||
| -rw-r--r-- | src/core/hle/kernel/physical_core.h | 36 | ||||
| -rw-r--r-- | src/core/hle/kernel/slab_helpers.h | 13 |
7 files changed, 81 insertions, 94 deletions
diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp index 40fd0c038..dd912a82d 100644 --- a/src/core/hle/kernel/k_synchronization_object.cpp +++ b/src/core/hle/kernel/k_synchronization_object.cpp | |||
| @@ -71,26 +71,26 @@ void KSynchronizationObject::Finalize() { | |||
| 71 | KAutoObject::Finalize(); | 71 | KAutoObject::Finalize(); |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index, | 74 | Result KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, |
| 75 | KSynchronizationObject** objects, const s32 num_objects, | 75 | KSynchronizationObject** objects, const s32 num_objects, |
| 76 | s64 timeout) { | 76 | s64 timeout) { |
| 77 | // Allocate space on stack for thread nodes. | 77 | // Allocate space on stack for thread nodes. |
| 78 | std::vector<ThreadListNode> thread_nodes(num_objects); | 78 | std::vector<ThreadListNode> thread_nodes(num_objects); |
| 79 | 79 | ||
| 80 | // Prepare for wait. | 80 | // Prepare for wait. |
| 81 | KThread* thread = GetCurrentThreadPointer(kernel_ctx); | 81 | KThread* thread = GetCurrentThreadPointer(kernel); |
| 82 | KHardwareTimer* timer{}; | 82 | KHardwareTimer* timer{}; |
| 83 | ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel_ctx, objects, | 83 | ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel, objects, thread_nodes.data(), |
| 84 | thread_nodes.data(), num_objects); | 84 | num_objects); |
| 85 | 85 | ||
| 86 | { | 86 | { |
| 87 | // Setup the scheduling lock and sleep. | 87 | // Setup the scheduling lock and sleep. |
| 88 | KScopedSchedulerLockAndSleep slp(kernel_ctx, std::addressof(timer), thread, timeout); | 88 | KScopedSchedulerLockAndSleep slp(kernel, std::addressof(timer), thread, timeout); |
| 89 | 89 | ||
| 90 | // Check if the thread should terminate. | 90 | // Check if the thread should terminate. |
| 91 | if (thread->IsTerminationRequested()) { | 91 | if (thread->IsTerminationRequested()) { |
| 92 | slp.CancelSleep(); | 92 | slp.CancelSleep(); |
| 93 | return ResultTerminationRequested; | 93 | R_THROW(ResultTerminationRequested); |
| 94 | } | 94 | } |
| 95 | 95 | ||
| 96 | // Check if any of the objects are already signaled. | 96 | // Check if any of the objects are already signaled. |
| @@ -100,21 +100,21 @@ Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index, | |||
| 100 | if (objects[i]->IsSignaled()) { | 100 | if (objects[i]->IsSignaled()) { |
| 101 | *out_index = i; | 101 | *out_index = i; |
| 102 | slp.CancelSleep(); | 102 | slp.CancelSleep(); |
| 103 | return ResultSuccess; | 103 | R_THROW(ResultSuccess); |
| 104 | } | 104 | } |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | // Check if the timeout is zero. | 107 | // Check if the timeout is zero. |
| 108 | if (timeout == 0) { | 108 | if (timeout == 0) { |
| 109 | slp.CancelSleep(); | 109 | slp.CancelSleep(); |
| 110 | return ResultTimedOut; | 110 | R_THROW(ResultTimedOut); |
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | // Check if waiting was canceled. | 113 | // Check if waiting was canceled. |
| 114 | if (thread->IsWaitCancelled()) { | 114 | if (thread->IsWaitCancelled()) { |
| 115 | slp.CancelSleep(); | 115 | slp.CancelSleep(); |
| 116 | thread->ClearWaitCancelled(); | 116 | thread->ClearWaitCancelled(); |
| 117 | return ResultCancelled; | 117 | R_THROW(ResultCancelled); |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | // Add the waiters. | 120 | // Add the waiters. |
| @@ -141,7 +141,7 @@ Result KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index, | |||
| 141 | *out_index = thread->GetSyncedIndex(); | 141 | *out_index = thread->GetSyncedIndex(); |
| 142 | 142 | ||
| 143 | // Get the wait result. | 143 | // Get the wait result. |
| 144 | return thread->GetWaitResult(); | 144 | R_RETURN(thread->GetWaitResult()); |
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_) | 147 | KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_) |
| @@ -158,7 +158,7 @@ void KSynchronizationObject::NotifyAvailable(Result result) { | |||
| 158 | } | 158 | } |
| 159 | 159 | ||
| 160 | // Iterate over each thread. | 160 | // Iterate over each thread. |
| 161 | for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { | 161 | for (auto* cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { |
| 162 | cur_node->thread->NotifyAvailable(this, result); | 162 | cur_node->thread->NotifyAvailable(this, result); |
| 163 | } | 163 | } |
| 164 | } | 164 | } |
| @@ -169,7 +169,7 @@ std::vector<KThread*> KSynchronizationObject::GetWaitingThreadsForDebugging() co | |||
| 169 | // If debugging, dump the list of waiters. | 169 | // If debugging, dump the list of waiters. |
| 170 | { | 170 | { |
| 171 | KScopedSchedulerLock lock(kernel); | 171 | KScopedSchedulerLock lock(kernel); |
| 172 | for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { | 172 | for (auto* cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { |
| 173 | threads.emplace_back(cur_node->thread); | 173 | threads.emplace_back(cur_node->thread); |
| 174 | } | 174 | } |
| 175 | } | 175 | } |
diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h index 8d8122ab7..d55a2673d 100644 --- a/src/core/hle/kernel/k_synchronization_object.h +++ b/src/core/hle/kernel/k_synchronization_object.h | |||
| @@ -24,31 +24,30 @@ public: | |||
| 24 | KThread* thread{}; | 24 | KThread* thread{}; |
| 25 | }; | 25 | }; |
| 26 | 26 | ||
| 27 | [[nodiscard]] static Result Wait(KernelCore& kernel, s32* out_index, | 27 | static Result Wait(KernelCore& kernel, s32* out_index, KSynchronizationObject** objects, |
| 28 | KSynchronizationObject** objects, const s32 num_objects, | 28 | const s32 num_objects, s64 timeout); |
| 29 | s64 timeout); | ||
| 30 | 29 | ||
| 31 | void Finalize() override; | 30 | void Finalize() override; |
| 32 | 31 | ||
| 33 | [[nodiscard]] virtual bool IsSignaled() const = 0; | 32 | virtual bool IsSignaled() const = 0; |
| 34 | 33 | ||
| 35 | [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const; | 34 | std::vector<KThread*> GetWaitingThreadsForDebugging() const; |
| 36 | 35 | ||
| 37 | void LinkNode(ThreadListNode* node_) { | 36 | void LinkNode(ThreadListNode* node_) { |
| 38 | // Link the node to the list. | 37 | // Link the node to the list. |
| 39 | if (thread_list_tail == nullptr) { | 38 | if (m_thread_list_tail == nullptr) { |
| 40 | thread_list_head = node_; | 39 | m_thread_list_head = node_; |
| 41 | } else { | 40 | } else { |
| 42 | thread_list_tail->next = node_; | 41 | m_thread_list_tail->next = node_; |
| 43 | } | 42 | } |
| 44 | 43 | ||
| 45 | thread_list_tail = node_; | 44 | m_thread_list_tail = node_; |
| 46 | } | 45 | } |
| 47 | 46 | ||
| 48 | void UnlinkNode(ThreadListNode* node_) { | 47 | void UnlinkNode(ThreadListNode* node_) { |
| 49 | // Unlink the node from the list. | 48 | // Unlink the node from the list. |
| 50 | ThreadListNode* prev_ptr = | 49 | ThreadListNode* prev_ptr = |
| 51 | reinterpret_cast<ThreadListNode*>(std::addressof(thread_list_head)); | 50 | reinterpret_cast<ThreadListNode*>(std::addressof(m_thread_list_head)); |
| 52 | ThreadListNode* prev_val = nullptr; | 51 | ThreadListNode* prev_val = nullptr; |
| 53 | ThreadListNode *prev, *tail_prev; | 52 | ThreadListNode *prev, *tail_prev; |
| 54 | 53 | ||
| @@ -59,8 +58,8 @@ public: | |||
| 59 | prev_val = prev_ptr; | 58 | prev_val = prev_ptr; |
| 60 | } while (prev_ptr != node_); | 59 | } while (prev_ptr != node_); |
| 61 | 60 | ||
| 62 | if (thread_list_tail == node_) { | 61 | if (m_thread_list_tail == node_) { |
| 63 | thread_list_tail = tail_prev; | 62 | m_thread_list_tail = tail_prev; |
| 64 | } | 63 | } |
| 65 | 64 | ||
| 66 | prev->next = node_->next; | 65 | prev->next = node_->next; |
| @@ -78,8 +77,8 @@ protected: | |||
| 78 | } | 77 | } |
| 79 | 78 | ||
| 80 | private: | 79 | private: |
| 81 | ThreadListNode* thread_list_head{}; | 80 | ThreadListNode* m_thread_list_head{}; |
| 82 | ThreadListNode* thread_list_tail{}; | 81 | ThreadListNode* m_thread_list_tail{}; |
| 83 | }; | 82 | }; |
| 84 | 83 | ||
| 85 | } // namespace Kernel | 84 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_transfer_memory.cpp b/src/core/hle/kernel/k_transfer_memory.cpp index faa5c73b5..c25cc2e39 100644 --- a/src/core/hle/kernel/k_transfer_memory.cpp +++ b/src/core/hle/kernel/k_transfer_memory.cpp | |||
| @@ -16,18 +16,18 @@ KTransferMemory::~KTransferMemory() = default; | |||
| 16 | Result KTransferMemory::Initialize(VAddr address_, std::size_t size_, | 16 | Result KTransferMemory::Initialize(VAddr address_, std::size_t size_, |
| 17 | Svc::MemoryPermission owner_perm_) { | 17 | Svc::MemoryPermission owner_perm_) { |
| 18 | // Set members. | 18 | // Set members. |
| 19 | owner = GetCurrentProcessPointer(kernel); | 19 | m_owner = GetCurrentProcessPointer(kernel); |
| 20 | 20 | ||
| 21 | // TODO(bunnei): Lock for transfer memory | 21 | // TODO(bunnei): Lock for transfer memory |
| 22 | 22 | ||
| 23 | // Set remaining tracking members. | 23 | // Set remaining tracking members. |
| 24 | owner->Open(); | 24 | m_owner->Open(); |
| 25 | owner_perm = owner_perm_; | 25 | m_owner_perm = owner_perm_; |
| 26 | address = address_; | 26 | m_address = address_; |
| 27 | size = size_; | 27 | m_size = size_; |
| 28 | is_initialized = true; | 28 | m_is_initialized = true; |
| 29 | 29 | ||
| 30 | return ResultSuccess; | 30 | R_SUCCEED(); |
| 31 | } | 31 | } |
| 32 | 32 | ||
| 33 | void KTransferMemory::Finalize() { | 33 | void KTransferMemory::Finalize() { |
diff --git a/src/core/hle/kernel/k_transfer_memory.h b/src/core/hle/kernel/k_transfer_memory.h index 85d508ee7..9a37bd903 100644 --- a/src/core/hle/kernel/k_transfer_memory.h +++ b/src/core/hle/kernel/k_transfer_memory.h | |||
| @@ -31,33 +31,33 @@ public: | |||
| 31 | void Finalize() override; | 31 | void Finalize() override; |
| 32 | 32 | ||
| 33 | bool IsInitialized() const override { | 33 | bool IsInitialized() const override { |
| 34 | return is_initialized; | 34 | return m_is_initialized; |
| 35 | } | 35 | } |
| 36 | 36 | ||
| 37 | uintptr_t GetPostDestroyArgument() const override { | 37 | uintptr_t GetPostDestroyArgument() const override { |
| 38 | return reinterpret_cast<uintptr_t>(owner); | 38 | return reinterpret_cast<uintptr_t>(m_owner); |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | static void PostDestroy(uintptr_t arg); | 41 | static void PostDestroy(uintptr_t arg); |
| 42 | 42 | ||
| 43 | KProcess* GetOwner() const override { | 43 | KProcess* GetOwner() const override { |
| 44 | return owner; | 44 | return m_owner; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | VAddr GetSourceAddress() const { | 47 | VAddr GetSourceAddress() const { |
| 48 | return address; | 48 | return m_address; |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | size_t GetSize() const { | 51 | size_t GetSize() const { |
| 52 | return is_initialized ? size : 0; | 52 | return m_is_initialized ? m_size : 0; |
| 53 | } | 53 | } |
| 54 | 54 | ||
| 55 | private: | 55 | private: |
| 56 | KProcess* owner{}; | 56 | KProcess* m_owner{}; |
| 57 | VAddr address{}; | 57 | VAddr m_address{}; |
| 58 | Svc::MemoryPermission owner_perm{}; | 58 | Svc::MemoryPermission m_owner_perm{}; |
| 59 | size_t size{}; | 59 | size_t m_size{}; |
| 60 | bool is_initialized{}; | 60 | bool m_is_initialized{}; |
| 61 | }; | 61 | }; |
| 62 | 62 | ||
| 63 | } // namespace Kernel | 63 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp index 3044922ac..2e0c36129 100644 --- a/src/core/hle/kernel/physical_core.cpp +++ b/src/core/hle/kernel/physical_core.cpp | |||
| @@ -10,14 +10,14 @@ | |||
| 10 | 10 | ||
| 11 | namespace Kernel { | 11 | namespace Kernel { |
| 12 | 12 | ||
| 13 | PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_) | 13 | PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system, KScheduler& scheduler) |
| 14 | : core_index{core_index_}, system{system_}, scheduler{scheduler_} { | 14 | : m_core_index{core_index}, m_system{system}, m_scheduler{scheduler} { |
| 15 | #if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64) | 15 | #if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64) |
| 16 | // TODO(bunnei): Initialization relies on a core being available. We may later replace this with | 16 | // TODO(bunnei): Initialization relies on a core being available. We may later replace this with |
| 17 | // a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager. | 17 | // a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager. |
| 18 | auto& kernel = system.Kernel(); | 18 | auto& kernel = system.Kernel(); |
| 19 | arm_interface = std::make_unique<Core::ARM_Dynarmic_64>( | 19 | m_arm_interface = std::make_unique<Core::ARM_Dynarmic_64>( |
| 20 | system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index); | 20 | system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), m_core_index); |
| 21 | #else | 21 | #else |
| 22 | #error Platform not supported yet. | 22 | #error Platform not supported yet. |
| 23 | #endif | 23 | #endif |
| @@ -25,13 +25,13 @@ PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KSche | |||
| 25 | 25 | ||
| 26 | PhysicalCore::~PhysicalCore() = default; | 26 | PhysicalCore::~PhysicalCore() = default; |
| 27 | 27 | ||
| 28 | void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) { | 28 | void PhysicalCore::Initialize(bool is_64_bit) { |
| 29 | #if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64) | 29 | #if defined(ARCHITECTURE_x86_64) || defined(ARCHITECTURE_arm64) |
| 30 | auto& kernel = system.Kernel(); | 30 | auto& kernel = m_system.Kernel(); |
| 31 | if (!is_64_bit) { | 31 | if (!is_64_bit) { |
| 32 | // We already initialized a 64-bit core, replace with a 32-bit one. | 32 | // We already initialized a 64-bit core, replace with a 32-bit one. |
| 33 | arm_interface = std::make_unique<Core::ARM_Dynarmic_32>( | 33 | m_arm_interface = std::make_unique<Core::ARM_Dynarmic_32>( |
| 34 | system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index); | 34 | m_system, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), m_core_index); |
| 35 | } | 35 | } |
| 36 | #else | 36 | #else |
| 37 | #error Platform not supported yet. | 37 | #error Platform not supported yet. |
| @@ -39,31 +39,30 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) { | |||
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | void PhysicalCore::Run() { | 41 | void PhysicalCore::Run() { |
| 42 | arm_interface->Run(); | 42 | m_arm_interface->Run(); |
| 43 | arm_interface->ClearExclusiveState(); | 43 | m_arm_interface->ClearExclusiveState(); |
| 44 | } | 44 | } |
| 45 | 45 | ||
| 46 | void PhysicalCore::Idle() { | 46 | void PhysicalCore::Idle() { |
| 47 | std::unique_lock lk{guard}; | 47 | std::unique_lock lk{m_guard}; |
| 48 | on_interrupt.wait(lk, [this] { return is_interrupted; }); | 48 | m_on_interrupt.wait(lk, [this] { return m_is_interrupted; }); |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | bool PhysicalCore::IsInterrupted() const { | 51 | bool PhysicalCore::IsInterrupted() const { |
| 52 | return is_interrupted; | 52 | return m_is_interrupted; |
| 53 | } | 53 | } |
| 54 | 54 | ||
| 55 | void PhysicalCore::Interrupt() { | 55 | void PhysicalCore::Interrupt() { |
| 56 | std::unique_lock lk{guard}; | 56 | std::unique_lock lk{m_guard}; |
| 57 | is_interrupted = true; | 57 | m_is_interrupted = true; |
| 58 | arm_interface->SignalInterrupt(); | 58 | m_arm_interface->SignalInterrupt(); |
| 59 | on_interrupt.notify_all(); | 59 | m_on_interrupt.notify_all(); |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | void PhysicalCore::ClearInterrupt() { | 62 | void PhysicalCore::ClearInterrupt() { |
| 63 | std::unique_lock lk{guard}; | 63 | std::unique_lock lk{m_guard}; |
| 64 | is_interrupted = false; | 64 | m_is_interrupted = false; |
| 65 | arm_interface->ClearInterrupt(); | 65 | m_arm_interface->ClearInterrupt(); |
| 66 | on_interrupt.notify_all(); | ||
| 67 | } | 66 | } |
| 68 | 67 | ||
| 69 | } // namespace Kernel | 68 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h index fb8e7933e..5cb398fdc 100644 --- a/src/core/hle/kernel/physical_core.h +++ b/src/core/hle/kernel/physical_core.h | |||
| @@ -47,46 +47,38 @@ public: | |||
| 47 | bool IsInterrupted() const; | 47 | bool IsInterrupted() const; |
| 48 | 48 | ||
| 49 | bool IsInitialized() const { | 49 | bool IsInitialized() const { |
| 50 | return arm_interface != nullptr; | 50 | return m_arm_interface != nullptr; |
| 51 | } | 51 | } |
| 52 | 52 | ||
| 53 | Core::ARM_Interface& ArmInterface() { | 53 | Core::ARM_Interface& ArmInterface() { |
| 54 | return *arm_interface; | 54 | return *m_arm_interface; |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | const Core::ARM_Interface& ArmInterface() const { | 57 | const Core::ARM_Interface& ArmInterface() const { |
| 58 | return *arm_interface; | 58 | return *m_arm_interface; |
| 59 | } | ||
| 60 | |||
| 61 | bool IsMainCore() const { | ||
| 62 | return core_index == 0; | ||
| 63 | } | ||
| 64 | |||
| 65 | bool IsSystemCore() const { | ||
| 66 | return core_index == 3; | ||
| 67 | } | 59 | } |
| 68 | 60 | ||
| 69 | std::size_t CoreIndex() const { | 61 | std::size_t CoreIndex() const { |
| 70 | return core_index; | 62 | return m_core_index; |
| 71 | } | 63 | } |
| 72 | 64 | ||
| 73 | Kernel::KScheduler& Scheduler() { | 65 | Kernel::KScheduler& Scheduler() { |
| 74 | return scheduler; | 66 | return m_scheduler; |
| 75 | } | 67 | } |
| 76 | 68 | ||
| 77 | const Kernel::KScheduler& Scheduler() const { | 69 | const Kernel::KScheduler& Scheduler() const { |
| 78 | return scheduler; | 70 | return m_scheduler; |
| 79 | } | 71 | } |
| 80 | 72 | ||
| 81 | private: | 73 | private: |
| 82 | const std::size_t core_index; | 74 | const std::size_t m_core_index; |
| 83 | Core::System& system; | 75 | Core::System& m_system; |
| 84 | Kernel::KScheduler& scheduler; | 76 | Kernel::KScheduler& m_scheduler; |
| 85 | 77 | ||
| 86 | std::mutex guard; | 78 | std::mutex m_guard; |
| 87 | std::condition_variable on_interrupt; | 79 | std::condition_variable m_on_interrupt; |
| 88 | std::unique_ptr<Core::ARM_Interface> arm_interface; | 80 | std::unique_ptr<Core::ARM_Interface> m_arm_interface; |
| 89 | bool is_interrupted{}; | 81 | bool m_is_interrupted{}; |
| 90 | }; | 82 | }; |
| 91 | 83 | ||
| 92 | } // namespace Kernel | 84 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h index 0228ce188..b9f5066de 100644 --- a/src/core/hle/kernel/slab_helpers.h +++ b/src/core/hle/kernel/slab_helpers.h | |||
| @@ -132,7 +132,7 @@ protected: | |||
| 132 | 132 | ||
| 133 | template <typename Derived, typename Base> | 133 | template <typename Derived, typename Base> |
| 134 | class KAutoObjectWithSlabHeapAndContainer : public Base { | 134 | class KAutoObjectWithSlabHeapAndContainer : public Base { |
| 135 | static_assert(std::is_base_of<KAutoObjectWithList, Base>::value); | 135 | static_assert(std::is_base_of_v<KAutoObjectWithList, Base>); |
| 136 | 136 | ||
| 137 | private: | 137 | private: |
| 138 | static Derived* Allocate(KernelCore& kernel) { | 138 | static Derived* Allocate(KernelCore& kernel) { |
| @@ -144,18 +144,18 @@ private: | |||
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | public: | 146 | public: |
| 147 | KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {} | 147 | KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel_) : Base(kernel_) {} |
| 148 | virtual ~KAutoObjectWithSlabHeapAndContainer() {} | 148 | virtual ~KAutoObjectWithSlabHeapAndContainer() {} |
| 149 | 149 | ||
| 150 | virtual void Destroy() override { | 150 | virtual void Destroy() override { |
| 151 | const bool is_initialized = this->IsInitialized(); | 151 | const bool is_initialized = this->IsInitialized(); |
| 152 | uintptr_t arg = 0; | 152 | uintptr_t arg = 0; |
| 153 | if (is_initialized) { | 153 | if (is_initialized) { |
| 154 | kernel.ObjectListContainer().Unregister(this); | 154 | Base::kernel.ObjectListContainer().Unregister(this); |
| 155 | arg = this->GetPostDestroyArgument(); | 155 | arg = this->GetPostDestroyArgument(); |
| 156 | this->Finalize(); | 156 | this->Finalize(); |
| 157 | } | 157 | } |
| 158 | Free(kernel, static_cast<Derived*>(this)); | 158 | Free(Base::kernel, static_cast<Derived*>(this)); |
| 159 | if (is_initialized) { | 159 | if (is_initialized) { |
| 160 | Derived::PostDestroy(arg); | 160 | Derived::PostDestroy(arg); |
| 161 | } | 161 | } |
| @@ -169,7 +169,7 @@ public: | |||
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | size_t GetSlabIndex() const { | 171 | size_t GetSlabIndex() const { |
| 172 | return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this)); | 172 | return SlabHeap<Derived>(Base::kernel).GetObjectIndex(static_cast<const Derived*>(this)); |
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | public: | 175 | public: |
| @@ -209,9 +209,6 @@ public: | |||
| 209 | static size_t GetNumRemaining(KernelCore& kernel) { | 209 | static size_t GetNumRemaining(KernelCore& kernel) { |
| 210 | return kernel.SlabHeap<Derived>().GetNumRemaining(); | 210 | return kernel.SlabHeap<Derived>().GetNumRemaining(); |
| 211 | } | 211 | } |
| 212 | |||
| 213 | protected: | ||
| 214 | KernelCore& kernel; | ||
| 215 | }; | 212 | }; |
| 216 | 213 | ||
| 217 | } // namespace Kernel | 214 | } // namespace Kernel |