diff options
Diffstat (limited to 'src/core/hle/kernel')
| -rw-r--r-- | src/core/hle/kernel/handle_table.cpp | 48 | ||||
| -rw-r--r-- | src/core/hle/kernel/handle_table.h | 40 | ||||
| -rw-r--r-- | src/core/hle/kernel/hle_ipc.h | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_auto_object.h | 28 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_class_token.cpp | 7 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_class_token.h | 131 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_condition_variable.cpp | 30 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_slab_heap.h | 8 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_synchronization_object.cpp | 10 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_synchronization_object.h | 11 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 72 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.h | 11 | ||||
| -rw-r--r-- | src/core/hle/kernel/process.cpp | 15 | ||||
| -rw-r--r-- | src/core/hle/kernel/slab_helpers.h | 32 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc.cpp | 268 | ||||
| -rw-r--r-- | src/core/hle/kernel/time_manager.cpp | 16 |
16 files changed, 442 insertions, 289 deletions
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp index f96d34078..8eec8a3b5 100644 --- a/src/core/hle/kernel/handle_table.cpp +++ b/src/core/hle/kernel/handle_table.cpp | |||
| @@ -72,6 +72,33 @@ ResultVal<Handle> HandleTable::Create(std::shared_ptr<Object> obj) { | |||
| 72 | return MakeResult<Handle>(handle); | 72 | return MakeResult<Handle>(handle); |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | ResultCode HandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { | ||
| 76 | ASSERT(obj != nullptr); | ||
| 77 | |||
| 78 | const u16 slot = next_free_slot; | ||
| 79 | if (slot >= table_size) { | ||
| 80 | LOG_ERROR(Kernel, "Unable to allocate Handle, too many slots in use."); | ||
| 81 | return ResultHandleTableFull; | ||
| 82 | } | ||
| 83 | next_free_slot = generations[slot]; | ||
| 84 | |||
| 85 | const u16 generation = next_generation++; | ||
| 86 | |||
| 87 | // Overflow count so it fits in the 15 bits dedicated to the generation in the handle. | ||
| 88 | // Horizon OS uses zero to represent an invalid handle, so skip to 1. | ||
| 89 | if (next_generation >= (1 << 15)) { | ||
| 90 | next_generation = 1; | ||
| 91 | } | ||
| 92 | |||
| 93 | generations[slot] = generation; | ||
| 94 | objects_new[slot] = obj; | ||
| 95 | obj->Open(); | ||
| 96 | |||
| 97 | *out_handle = generation | (slot << 15); | ||
| 98 | |||
| 99 | return RESULT_SUCCESS; | ||
| 100 | } | ||
| 101 | |||
| 75 | ResultVal<Handle> HandleTable::Duplicate(Handle handle) { | 102 | ResultVal<Handle> HandleTable::Duplicate(Handle handle) { |
| 76 | std::shared_ptr<Object> object = GetGeneric(handle); | 103 | std::shared_ptr<Object> object = GetGeneric(handle); |
| 77 | if (object == nullptr) { | 104 | if (object == nullptr) { |
| @@ -81,30 +108,36 @@ ResultVal<Handle> HandleTable::Duplicate(Handle handle) { | |||
| 81 | return Create(std::move(object)); | 108 | return Create(std::move(object)); |
| 82 | } | 109 | } |
| 83 | 110 | ||
| 84 | ResultCode HandleTable::Close(Handle handle) { | 111 | bool HandleTable::Remove(Handle handle) { |
| 85 | if (!IsValid(handle)) { | 112 | if (!IsValid(handle)) { |
| 86 | LOG_ERROR(Kernel, "Handle is not valid! handle={:08X}", handle); | 113 | LOG_ERROR(Kernel, "Handle is not valid! handle={:08X}", handle); |
| 87 | return ResultInvalidHandle; | 114 | return {}; |
| 88 | } | 115 | } |
| 89 | 116 | ||
| 90 | const u16 slot = GetSlot(handle); | 117 | const u16 slot = GetSlot(handle); |
| 91 | 118 | ||
| 92 | if (objects[slot].use_count() == 1) { | 119 | if (objects[slot]) { |
| 93 | objects[slot]->Finalize(); | 120 | objects[slot]->Close(); |
| 121 | } | ||
| 122 | |||
| 123 | if (objects_new[slot]) { | ||
| 124 | objects_new[slot]->Close(); | ||
| 94 | } | 125 | } |
| 95 | 126 | ||
| 96 | objects[slot] = nullptr; | 127 | objects[slot] = nullptr; |
| 128 | objects_new[slot] = nullptr; | ||
| 97 | 129 | ||
| 98 | generations[slot] = next_free_slot; | 130 | generations[slot] = next_free_slot; |
| 99 | next_free_slot = slot; | 131 | next_free_slot = slot; |
| 100 | return RESULT_SUCCESS; | 132 | |
| 133 | return true; | ||
| 101 | } | 134 | } |
| 102 | 135 | ||
| 103 | bool HandleTable::IsValid(Handle handle) const { | 136 | bool HandleTable::IsValid(Handle handle) const { |
| 104 | const std::size_t slot = GetSlot(handle); | 137 | const std::size_t slot = GetSlot(handle); |
| 105 | const u16 generation = GetGeneration(handle); | 138 | const u16 generation = GetGeneration(handle); |
| 106 | 139 | const bool is_object_valid = (objects[slot] != nullptr) || (objects_new[slot] != nullptr); | |
| 107 | return slot < table_size && objects[slot] != nullptr && generations[slot] == generation; | 140 | return slot < table_size && is_object_valid && generations[slot] == generation; |
| 108 | } | 141 | } |
| 109 | 142 | ||
| 110 | std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const { | 143 | std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const { |
| @@ -124,6 +157,7 @@ void HandleTable::Clear() { | |||
| 124 | for (u16 i = 0; i < table_size; ++i) { | 157 | for (u16 i = 0; i < table_size; ++i) { |
| 125 | generations[i] = static_cast<u16>(i + 1); | 158 | generations[i] = static_cast<u16>(i + 1); |
| 126 | objects[i] = nullptr; | 159 | objects[i] = nullptr; |
| 160 | objects_new[i] = nullptr; | ||
| 127 | } | 161 | } |
| 128 | next_free_slot = 0; | 162 | next_free_slot = 0; |
| 129 | } | 163 | } |
diff --git a/src/core/hle/kernel/handle_table.h b/src/core/hle/kernel/handle_table.h index c9dab8cdd..555fb20e5 100644 --- a/src/core/hle/kernel/handle_table.h +++ b/src/core/hle/kernel/handle_table.h | |||
| @@ -9,6 +9,8 @@ | |||
| 9 | #include <memory> | 9 | #include <memory> |
| 10 | 10 | ||
| 11 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 12 | #include "core/hle/kernel/k_auto_object.h" | ||
| 13 | #include "core/hle/kernel/kernel.h" | ||
| 12 | #include "core/hle/kernel/object.h" | 14 | #include "core/hle/kernel/object.h" |
| 13 | #include "core/hle/result.h" | 15 | #include "core/hle/result.h" |
| 14 | 16 | ||
| @@ -87,7 +89,7 @@ public: | |||
| 87 | * @return `RESULT_SUCCESS` or one of the following errors: | 89 | * @return `RESULT_SUCCESS` or one of the following errors: |
| 88 | * - `ERR_INVALID_HANDLE`: an invalid handle was passed in. | 90 | * - `ERR_INVALID_HANDLE`: an invalid handle was passed in. |
| 89 | */ | 91 | */ |
| 90 | ResultCode Close(Handle handle); | 92 | bool Remove(Handle handle); |
| 91 | 93 | ||
| 92 | /// Checks if a handle is valid and points to an existing object. | 94 | /// Checks if a handle is valid and points to an existing object. |
| 93 | bool IsValid(Handle handle) const; | 95 | bool IsValid(Handle handle) const; |
| @@ -108,12 +110,48 @@ public: | |||
| 108 | return DynamicObjectCast<T>(GetGeneric(handle)); | 110 | return DynamicObjectCast<T>(GetGeneric(handle)); |
| 109 | } | 111 | } |
| 110 | 112 | ||
| 113 | template <typename T = KAutoObject> | ||
| 114 | KScopedAutoObject<T> GetObject(Handle handle) const { | ||
| 115 | if (handle == CurrentThread) { | ||
| 116 | return kernel.CurrentScheduler()->GetCurrentThread()->DynamicCast<T*>(); | ||
| 117 | } else if (handle == CurrentProcess) { | ||
| 118 | return kernel.CurrentProcess()->DynamicCast<T*>(); | ||
| 119 | } | ||
| 120 | |||
| 121 | if (!IsValid(handle)) { | ||
| 122 | return nullptr; | ||
| 123 | } | ||
| 124 | |||
| 125 | auto* obj = objects_new[static_cast<u16>(handle >> 15)]; | ||
| 126 | return obj->DynamicCast<T*>(); | ||
| 127 | } | ||
| 128 | |||
| 129 | template <typename T = KAutoObject> | ||
| 130 | KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { | ||
| 131 | if (!IsValid(handle)) { | ||
| 132 | return nullptr; | ||
| 133 | } | ||
| 134 | auto* obj = objects_new[static_cast<u16>(handle >> 15)]; | ||
| 135 | return obj->DynamicCast<T*>(); | ||
| 136 | } | ||
| 137 | |||
| 111 | /// Closes all handles held in this table. | 138 | /// Closes all handles held in this table. |
| 112 | void Clear(); | 139 | void Clear(); |
| 113 | 140 | ||
| 141 | // NEW IMPL | ||
| 142 | |||
| 143 | template <typename T> | ||
| 144 | ResultCode Add(Handle* out_handle, T* obj) { | ||
| 145 | static_assert(std::is_base_of<KAutoObject, T>::value); | ||
| 146 | return this->Add(out_handle, obj, obj->GetTypeObj().GetClassToken()); | ||
| 147 | } | ||
| 148 | |||
| 149 | ResultCode Add(Handle* out_handle, KAutoObject* obj, u16 type); | ||
| 150 | |||
| 114 | private: | 151 | private: |
| 115 | /// Stores the Object referenced by the handle or null if the slot is empty. | 152 | /// Stores the Object referenced by the handle or null if the slot is empty. |
| 116 | std::array<std::shared_ptr<Object>, MAX_COUNT> objects; | 153 | std::array<std::shared_ptr<Object>, MAX_COUNT> objects; |
| 154 | std::array<KAutoObject*, MAX_COUNT> objects_new{}; | ||
| 117 | 155 | ||
| 118 | /** | 156 | /** |
| 119 | * The value of `next_generation` when the handle was created, used to check for validity. For | 157 | * The value of `next_generation` when the handle was created, used to check for validity. For |
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index 75617bff0..398f1c467 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h | |||
| @@ -291,8 +291,8 @@ private: | |||
| 291 | // TODO(yuriks): Check common usage of this and optimize size accordingly | 291 | // TODO(yuriks): Check common usage of this and optimize size accordingly |
| 292 | boost::container::small_vector<Handle, 8> move_handles; | 292 | boost::container::small_vector<Handle, 8> move_handles; |
| 293 | boost::container::small_vector<Handle, 8> copy_handles; | 293 | boost::container::small_vector<Handle, 8> copy_handles; |
| 294 | boost::container::small_vector<std::shared_ptr<Object>, 8> move_objects; | 294 | boost::container::small_vector<Object*, 8> move_objects; |
| 295 | boost::container::small_vector<std::shared_ptr<Object>, 8> copy_objects; | 295 | boost::container::small_vector<Object*, 8> copy_objects; |
| 296 | boost::container::small_vector<std::shared_ptr<SessionRequestHandler>, 8> domain_objects; | 296 | boost::container::small_vector<std::shared_ptr<SessionRequestHandler>, 8> domain_objects; |
| 297 | 297 | ||
| 298 | std::optional<IPC::CommandHeader> command_header; | 298 | std::optional<IPC::CommandHeader> command_header; |
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h index 567dad204..64c012d44 100644 --- a/src/core/hle/kernel/k_auto_object.h +++ b/src/core/hle/kernel/k_auto_object.h | |||
| @@ -11,9 +11,11 @@ | |||
| 11 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 12 | #include "common/intrusive_red_black_tree.h" | 12 | #include "common/intrusive_red_black_tree.h" |
| 13 | #include "core/hle/kernel/k_class_token.h" | 13 | #include "core/hle/kernel/k_class_token.h" |
| 14 | #include "core/hle/kernel/object.h" | ||
| 14 | 15 | ||
| 15 | namespace Kernel { | 16 | namespace Kernel { |
| 16 | 17 | ||
| 18 | class KernelCore; | ||
| 17 | class Process; | 19 | class Process; |
| 18 | 20 | ||
| 19 | #define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \ | 21 | #define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \ |
| @@ -46,7 +48,7 @@ public: | |||
| 46 | \ | 48 | \ |
| 47 | private: | 49 | private: |
| 48 | 50 | ||
| 49 | class KAutoObject { | 51 | class KAutoObject : public Object { |
| 50 | protected: | 52 | protected: |
| 51 | class TypeObj { | 53 | class TypeObj { |
| 52 | private: | 54 | private: |
| @@ -84,11 +86,14 @@ private: | |||
| 84 | private: | 86 | private: |
| 85 | std::atomic<u32> m_ref_count; | 87 | std::atomic<u32> m_ref_count; |
| 86 | 88 | ||
| 89 | protected: | ||
| 90 | KernelCore& kernel; | ||
| 91 | |||
| 87 | public: | 92 | public: |
| 88 | static KAutoObject* Create(KAutoObject* ptr); | 93 | static KAutoObject* Create(KAutoObject* ptr); |
| 89 | 94 | ||
| 90 | public: | 95 | public: |
| 91 | constexpr explicit KAutoObject() : m_ref_count(0) {} | 96 | explicit KAutoObject(KernelCore& kernel_) : Object{kernel_}, m_ref_count(0), kernel(kernel_) {} |
| 92 | virtual ~KAutoObject() {} | 97 | virtual ~KAutoObject() {} |
| 93 | 98 | ||
| 94 | // Destroy is responsible for destroying the auto object's resources when ref_count hits zero. | 99 | // Destroy is responsible for destroying the auto object's resources when ref_count hits zero. |
| @@ -97,9 +102,7 @@ public: | |||
| 97 | } | 102 | } |
| 98 | 103 | ||
| 99 | // Finalize is responsible for cleaning up resource, but does not destroy the object. | 104 | // Finalize is responsible for cleaning up resource, but does not destroy the object. |
| 100 | virtual void Finalize() { | 105 | virtual void Finalize() {} |
| 101 | UNIMPLEMENTED(); | ||
| 102 | } | ||
| 103 | 106 | ||
| 104 | virtual Process* GetOwner() const { | 107 | virtual Process* GetOwner() const { |
| 105 | return nullptr; | 108 | return nullptr; |
| @@ -179,7 +182,12 @@ private: | |||
| 179 | private: | 182 | private: |
| 180 | Common::IntrusiveRedBlackTreeNode list_node; | 183 | Common::IntrusiveRedBlackTreeNode list_node; |
| 181 | 184 | ||
| 185 | protected: | ||
| 186 | KernelCore& kernel; | ||
| 187 | |||
| 182 | public: | 188 | public: |
| 189 | explicit KAutoObjectWithList(KernelCore& kernel_) : KAutoObject(kernel_), kernel(kernel_) {} | ||
| 190 | |||
| 183 | static int Compare(const KAutoObjectWithList& lhs, const KAutoObjectWithList& rhs) { | 191 | static int Compare(const KAutoObjectWithList& lhs, const KAutoObjectWithList& rhs) { |
| 184 | const u64 lid = lhs.GetId(); | 192 | const u64 lid = lhs.GetId(); |
| 185 | const u64 rid = rhs.GetId(); | 193 | const u64 rid = rhs.GetId(); |
| @@ -208,7 +216,7 @@ private: | |||
| 208 | friend class KScopedAutoObject; | 216 | friend class KScopedAutoObject; |
| 209 | 217 | ||
| 210 | private: | 218 | private: |
| 211 | T* m_obj; | 219 | T* m_obj{}; |
| 212 | 220 | ||
| 213 | private: | 221 | private: |
| 214 | constexpr void Swap(KScopedAutoObject& rhs) { | 222 | constexpr void Swap(KScopedAutoObject& rhs) { |
| @@ -216,8 +224,8 @@ private: | |||
| 216 | } | 224 | } |
| 217 | 225 | ||
| 218 | public: | 226 | public: |
| 219 | constexpr KScopedAutoObject() : m_obj(nullptr) { // ... | 227 | constexpr KScopedAutoObject() = default; |
| 220 | } | 228 | |
| 221 | constexpr KScopedAutoObject(T* o) : m_obj(o) { | 229 | constexpr KScopedAutoObject(T* o) : m_obj(o) { |
| 222 | if (m_obj != nullptr) { | 230 | if (m_obj != nullptr) { |
| 223 | m_obj->Open(); | 231 | m_obj->Open(); |
| @@ -273,6 +281,10 @@ public: | |||
| 273 | return m_obj; | 281 | return m_obj; |
| 274 | } | 282 | } |
| 275 | 283 | ||
| 284 | constexpr T* GetPointerUnsafe() const { | ||
| 285 | return m_obj; | ||
| 286 | } | ||
| 287 | |||
| 276 | constexpr T* ReleasePointerUnsafe() { | 288 | constexpr T* ReleasePointerUnsafe() { |
| 277 | T* ret = m_obj; | 289 | T* ret = m_obj; |
| 278 | m_obj = nullptr; | 290 | m_obj = nullptr; |
diff --git a/src/core/hle/kernel/k_class_token.cpp b/src/core/hle/kernel/k_class_token.cpp new file mode 100644 index 000000000..ec2f8ccad --- /dev/null +++ b/src/core/hle/kernel/k_class_token.cpp | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "core/hle/kernel/k_class_token.h" | ||
| 6 | |||
| 7 | namespace Kernel {} // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_class_token.h b/src/core/hle/kernel/k_class_token.h new file mode 100644 index 000000000..89b80a341 --- /dev/null +++ b/src/core/hle/kernel/k_class_token.h | |||
| @@ -0,0 +1,131 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <atomic> | ||
| 8 | |||
| 9 | #include "common/assert.h" | ||
| 10 | #include "common/bit_util.h" | ||
| 11 | #include "common/common_types.h" | ||
| 12 | |||
| 13 | namespace Kernel { | ||
| 14 | |||
| 15 | class KAutoObject; | ||
| 16 | |||
| 17 | class KClassTokenGenerator { | ||
| 18 | public: | ||
| 19 | using TokenBaseType = u16; | ||
| 20 | |||
| 21 | public: | ||
| 22 | static constexpr size_t BaseClassBits = 8; | ||
| 23 | static constexpr size_t FinalClassBits = (sizeof(TokenBaseType) * CHAR_BIT) - BaseClassBits; | ||
| 24 | // One bit per base class. | ||
| 25 | static constexpr size_t NumBaseClasses = BaseClassBits; | ||
| 26 | // Final classes are permutations of three bits. | ||
| 27 | static constexpr size_t NumFinalClasses = [] { | ||
| 28 | TokenBaseType index = 0; | ||
| 29 | for (size_t i = 0; i < FinalClassBits; i++) { | ||
| 30 | for (size_t j = i + 1; j < FinalClassBits; j++) { | ||
| 31 | for (size_t k = j + 1; k < FinalClassBits; k++) { | ||
| 32 | index++; | ||
| 33 | } | ||
| 34 | } | ||
| 35 | } | ||
| 36 | return index; | ||
| 37 | }(); | ||
| 38 | |||
| 39 | private: | ||
| 40 | template <TokenBaseType Index> | ||
| 41 | static constexpr inline TokenBaseType BaseClassToken = BIT(Index); | ||
| 42 | |||
| 43 | template <TokenBaseType Index> | ||
| 44 | static constexpr inline TokenBaseType FinalClassToken = [] { | ||
| 45 | TokenBaseType index = 0; | ||
| 46 | for (size_t i = 0; i < FinalClassBits; i++) { | ||
| 47 | for (size_t j = i + 1; j < FinalClassBits; j++) { | ||
| 48 | for (size_t k = j + 1; k < FinalClassBits; k++) { | ||
| 49 | if ((index++) == Index) { | ||
| 50 | return static_cast<TokenBaseType>(((1ULL << i) | (1ULL << j) | (1ULL << k)) | ||
| 51 | << BaseClassBits); | ||
| 52 | } | ||
| 53 | } | ||
| 54 | } | ||
| 55 | } | ||
| 56 | }(); | ||
| 57 | |||
| 58 | template <typename T> | ||
| 59 | static constexpr inline TokenBaseType GetClassToken() { | ||
| 60 | static_assert(std::is_base_of<KAutoObject, T>::value); | ||
| 61 | if constexpr (std::is_same<T, KAutoObject>::value) { | ||
| 62 | static_assert(T::ObjectType == ObjectType::KAutoObject); | ||
| 63 | return 0; | ||
| 64 | } else if constexpr (!std::is_final<T>::value) { | ||
| 65 | static_assert(ObjectType::BaseClassesStart <= T::ObjectType && | ||
| 66 | T::ObjectType < ObjectType::BaseClassesEnd); | ||
| 67 | constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) - | ||
| 68 | static_cast<TokenBaseType>(ObjectType::BaseClassesStart); | ||
| 69 | return BaseClassToken<ClassIndex> | GetClassToken<typename T::BaseClass>(); | ||
| 70 | } else if constexpr (ObjectType::FinalClassesStart <= T::ObjectType && | ||
| 71 | T::ObjectType < ObjectType::FinalClassesEnd) { | ||
| 72 | constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) - | ||
| 73 | static_cast<TokenBaseType>(ObjectType::FinalClassesStart); | ||
| 74 | return FinalClassToken<ClassIndex> | GetClassToken<typename T::BaseClass>(); | ||
| 75 | } else { | ||
| 76 | static_assert(!std::is_same<T, T>::value, "GetClassToken: Invalid Type"); | ||
| 77 | } | ||
| 78 | }; | ||
| 79 | |||
| 80 | public: | ||
| 81 | enum class ObjectType { | ||
| 82 | KAutoObject, | ||
| 83 | |||
| 84 | BaseClassesStart, | ||
| 85 | |||
| 86 | KSynchronizationObject = BaseClassesStart, | ||
| 87 | KReadableEvent, | ||
| 88 | |||
| 89 | BaseClassesEnd, | ||
| 90 | |||
| 91 | FinalClassesStart = BaseClassesEnd, | ||
| 92 | |||
| 93 | KInterruptEvent = FinalClassesStart, | ||
| 94 | KDebug, | ||
| 95 | KThread, | ||
| 96 | KServerPort, | ||
| 97 | KServerSession, | ||
| 98 | KClientPort, | ||
| 99 | KClientSession, | ||
| 100 | Process, | ||
| 101 | KResourceLimit, | ||
| 102 | KLightSession, | ||
| 103 | KPort, | ||
| 104 | KSession, | ||
| 105 | KSharedMemory, | ||
| 106 | KEvent, | ||
| 107 | KWritableEvent, | ||
| 108 | KLightClientSession, | ||
| 109 | KLightServerSession, | ||
| 110 | KTransferMemory, | ||
| 111 | KDeviceAddressSpace, | ||
| 112 | KSessionRequest, | ||
| 113 | KCodeMemory, | ||
| 114 | |||
| 115 | // NOTE: True order for these has not been determined yet. | ||
| 116 | KAlpha, | ||
| 117 | KBeta, | ||
| 118 | |||
| 119 | FinalClassesEnd = FinalClassesStart + NumFinalClasses, | ||
| 120 | }; | ||
| 121 | |||
| 122 | template <typename T> | ||
| 123 | static constexpr inline TokenBaseType ClassToken = GetClassToken<T>(); | ||
| 124 | }; | ||
| 125 | |||
| 126 | using ClassTokenType = KClassTokenGenerator::TokenBaseType; | ||
| 127 | |||
| 128 | template <typename T> | ||
| 129 | static constexpr inline ClassTokenType ClassToken = KClassTokenGenerator::ClassToken<T>; | ||
| 130 | |||
| 131 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index 170d8fa0d..930f78974 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include "core/arm/exclusive_monitor.h" | 7 | #include "core/arm/exclusive_monitor.h" |
| 8 | #include "core/core.h" | 8 | #include "core/core.h" |
| 9 | #include "core/hle/kernel/k_condition_variable.h" | 9 | #include "core/hle/kernel/k_condition_variable.h" |
| 10 | #include "core/hle/kernel/k_linked_list.h" | ||
| 10 | #include "core/hle/kernel/k_scheduler.h" | 11 | #include "core/hle/kernel/k_scheduler.h" |
| 11 | #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" | 12 | #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" |
| 12 | #include "core/hle/kernel/k_synchronization_object.h" | 13 | #include "core/hle/kernel/k_synchronization_object.h" |
| @@ -107,8 +108,8 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val | |||
| 107 | 108 | ||
| 108 | // Wait for the address. | 109 | // Wait for the address. |
| 109 | { | 110 | { |
| 110 | std::shared_ptr<KThread> owner_thread; | 111 | KScopedAutoObject<KThread> owner_thread; |
| 111 | ASSERT(!owner_thread); | 112 | ASSERT(owner_thread.IsNull()); |
| 112 | { | 113 | { |
| 113 | KScopedSchedulerLock sl(kernel); | 114 | KScopedSchedulerLock sl(kernel); |
| 114 | cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS); | 115 | cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS); |
| @@ -126,8 +127,10 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val | |||
| 126 | R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS); | 127 | R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS); |
| 127 | 128 | ||
| 128 | // Get the lock owner thread. | 129 | // Get the lock owner thread. |
| 129 | owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>(handle); | 130 | owner_thread = |
| 130 | R_UNLESS(owner_thread, ResultInvalidHandle); | 131 | kernel.CurrentProcess()->GetHandleTable().GetObjectWithoutPseudoHandle<KThread>( |
| 132 | handle); | ||
| 133 | R_UNLESS(owner_thread.IsNotNull(), ResultInvalidHandle); | ||
| 131 | 134 | ||
| 132 | // Update the lock. | 135 | // Update the lock. |
| 133 | cur_thread->SetAddressKey(addr, value); | 136 | cur_thread->SetAddressKey(addr, value); |
| @@ -137,7 +140,7 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val | |||
| 137 | cur_thread->SetMutexWaitAddressForDebugging(addr); | 140 | cur_thread->SetMutexWaitAddressForDebugging(addr); |
| 138 | } | 141 | } |
| 139 | } | 142 | } |
| 140 | ASSERT(owner_thread); | 143 | ASSERT(owner_thread.IsNotNull()); |
| 141 | } | 144 | } |
| 142 | 145 | ||
| 143 | // Remove the thread as a waiter from the lock owner. | 146 | // Remove the thread as a waiter from the lock owner. |
| @@ -182,13 +185,16 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) { | |||
| 182 | thread->Wakeup(); | 185 | thread->Wakeup(); |
| 183 | } else { | 186 | } else { |
| 184 | // Get the previous owner. | 187 | // Get the previous owner. |
| 185 | auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<KThread>( | 188 | KThread* owner_thread = |
| 186 | prev_tag & ~Svc::HandleWaitMask); | 189 | kernel.CurrentProcess()->GetHandleTable() |
| 190 | .GetObjectWithoutPseudoHandle<KThread>( | ||
| 191 | static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask)) | ||
| 192 | .ReleasePointerUnsafe(); | ||
| 187 | 193 | ||
| 188 | if (owner_thread) { | 194 | if (owner_thread) { |
| 189 | // Add the thread as a waiter on the owner. | 195 | // Add the thread as a waiter on the owner. |
| 190 | owner_thread->AddWaiter(thread); | 196 | owner_thread->AddWaiter(thread); |
| 191 | thread_to_close = owner_thread.get(); | 197 | thread_to_close = owner_thread; |
| 192 | } else { | 198 | } else { |
| 193 | // The lock was tagged with a thread that doesn't exist. | 199 | // The lock was tagged with a thread that doesn't exist. |
| 194 | thread->SetSyncedObject(nullptr, ResultInvalidState); | 200 | thread->SetSyncedObject(nullptr, ResultInvalidState); |
| @@ -208,9 +214,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { | |||
| 208 | // Prepare for signaling. | 214 | // Prepare for signaling. |
| 209 | constexpr int MaxThreads = 16; | 215 | constexpr int MaxThreads = 16; |
| 210 | 216 | ||
| 211 | // TODO(bunnei): This should just be Thread once we implement KAutoObject instead of using | 217 | KLinkedList<KThread> thread_list; |
| 212 | // std::shared_ptr. | ||
| 213 | std::vector<std::shared_ptr<KThread>> thread_list; | ||
| 214 | std::array<KThread*, MaxThreads> thread_array; | 218 | std::array<KThread*, MaxThreads> thread_array; |
| 215 | s32 num_to_close{}; | 219 | s32 num_to_close{}; |
| 216 | 220 | ||
| @@ -228,7 +232,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { | |||
| 228 | if (num_to_close < MaxThreads) { | 232 | if (num_to_close < MaxThreads) { |
| 229 | thread_array[num_to_close++] = thread; | 233 | thread_array[num_to_close++] = thread; |
| 230 | } else { | 234 | } else { |
| 231 | thread_list.push_back(SharedFrom(thread)); | 235 | thread_list.push_back(*thread); |
| 232 | } | 236 | } |
| 233 | } | 237 | } |
| 234 | 238 | ||
| @@ -251,7 +255,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { | |||
| 251 | 255 | ||
| 252 | // Close threads in the list. | 256 | // Close threads in the list. |
| 253 | for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) { | 257 | for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) { |
| 254 | (*it)->Close(); | 258 | (*it).Close(); |
| 255 | } | 259 | } |
| 256 | } | 260 | } |
| 257 | 261 | ||
diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h index aa4471d2f..a3948cd27 100644 --- a/src/core/hle/kernel/k_slab_heap.h +++ b/src/core/hle/kernel/k_slab_heap.h | |||
| @@ -148,6 +148,14 @@ public: | |||
| 148 | return obj; | 148 | return obj; |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | T* AllocateWithKernel(KernelCore& kernel) { | ||
| 152 | T* obj = static_cast<T*>(AllocateImpl()); | ||
| 153 | if (obj != nullptr) { | ||
| 154 | new (obj) T(kernel); | ||
| 155 | } | ||
| 156 | return obj; | ||
| 157 | } | ||
| 158 | |||
| 151 | void Free(T* obj) { | 159 | void Free(T* obj) { |
| 152 | FreeImpl(obj); | 160 | FreeImpl(obj); |
| 153 | } | 161 | } |
diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp index 82f72a0fe..460b8a714 100644 --- a/src/core/hle/kernel/k_synchronization_object.cpp +++ b/src/core/hle/kernel/k_synchronization_object.cpp | |||
| @@ -13,6 +13,11 @@ | |||
| 13 | 13 | ||
| 14 | namespace Kernel { | 14 | namespace Kernel { |
| 15 | 15 | ||
| 16 | void KSynchronizationObject::Finalize() { | ||
| 17 | this->OnFinalizeSynchronizationObject(); | ||
| 18 | KAutoObject::Finalize(); | ||
| 19 | } | ||
| 20 | |||
| 16 | ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, | 21 | ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, |
| 17 | KSynchronizationObject** objects, const s32 num_objects, | 22 | KSynchronizationObject** objects, const s32 num_objects, |
| 18 | s64 timeout) { | 23 | s64 timeout) { |
| @@ -130,10 +135,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, | |||
| 130 | return wait_result; | 135 | return wait_result; |
| 131 | } | 136 | } |
| 132 | 137 | ||
| 133 | KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : Object{kernel} {} | 138 | KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : KAutoObjectWithList{kernel} {} |
| 134 | |||
| 135 | KSynchronizationObject::KSynchronizationObject(KernelCore& kernel, std::string&& name) | ||
| 136 | : Object{kernel, std::move(name)} {} | ||
| 137 | 139 | ||
| 138 | KSynchronizationObject::~KSynchronizationObject() = default; | 140 | KSynchronizationObject::~KSynchronizationObject() = default; |
| 139 | 141 | ||
diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h index 5803718fd..c8e840d89 100644 --- a/src/core/hle/kernel/k_synchronization_object.h +++ b/src/core/hle/kernel/k_synchronization_object.h | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | 6 | ||
| 7 | #include <vector> | 7 | #include <vector> |
| 8 | 8 | ||
| 9 | #include "core/hle/kernel/object.h" | 9 | #include "core/hle/kernel/k_auto_object.h" |
| 10 | #include "core/hle/result.h" | 10 | #include "core/hle/result.h" |
| 11 | 11 | ||
| 12 | namespace Kernel { | 12 | namespace Kernel { |
| @@ -16,7 +16,9 @@ class Synchronization; | |||
| 16 | class KThread; | 16 | class KThread; |
| 17 | 17 | ||
| 18 | /// Class that represents a Kernel object that a thread can be waiting on | 18 | /// Class that represents a Kernel object that a thread can be waiting on |
| 19 | class KSynchronizationObject : public Object { | 19 | class KSynchronizationObject : public KAutoObjectWithList { |
| 20 | KERNEL_AUTOOBJECT_TRAITS(KSynchronizationObject, KAutoObject); | ||
| 21 | |||
| 20 | public: | 22 | public: |
| 21 | struct ThreadListNode { | 23 | struct ThreadListNode { |
| 22 | ThreadListNode* next{}; | 24 | ThreadListNode* next{}; |
| @@ -27,15 +29,18 @@ public: | |||
| 27 | KSynchronizationObject** objects, const s32 num_objects, | 29 | KSynchronizationObject** objects, const s32 num_objects, |
| 28 | s64 timeout); | 30 | s64 timeout); |
| 29 | 31 | ||
| 32 | virtual void Finalize() override; | ||
| 33 | |||
| 30 | [[nodiscard]] virtual bool IsSignaled() const = 0; | 34 | [[nodiscard]] virtual bool IsSignaled() const = 0; |
| 31 | 35 | ||
| 32 | [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const; | 36 | [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const; |
| 33 | 37 | ||
| 34 | protected: | 38 | protected: |
| 35 | explicit KSynchronizationObject(KernelCore& kernel); | 39 | explicit KSynchronizationObject(KernelCore& kernel); |
| 36 | explicit KSynchronizationObject(KernelCore& kernel, std::string&& name); | ||
| 37 | virtual ~KSynchronizationObject(); | 40 | virtual ~KSynchronizationObject(); |
| 38 | 41 | ||
| 42 | virtual void OnFinalizeSynchronizationObject() {} | ||
| 43 | |||
| 39 | void NotifyAvailable(ResultCode result); | 44 | void NotifyAvailable(ResultCode result); |
| 40 | void NotifyAvailable() { | 45 | void NotifyAvailable() { |
| 41 | return this->NotifyAvailable(RESULT_SUCCESS); | 46 | return this->NotifyAvailable(RESULT_SUCCESS); |
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 17fe1d59f..d1359e434 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include "core/hardware_properties.h" | 28 | #include "core/hardware_properties.h" |
| 29 | #include "core/hle/kernel/client_port.h" | 29 | #include "core/hle/kernel/client_port.h" |
| 30 | #include "core/hle/kernel/handle_table.h" | 30 | #include "core/hle/kernel/handle_table.h" |
| 31 | #include "core/hle/kernel/init/init_slab_setup.h" | ||
| 31 | #include "core/hle/kernel/k_memory_layout.h" | 32 | #include "core/hle/kernel/k_memory_layout.h" |
| 32 | #include "core/hle/kernel/k_memory_manager.h" | 33 | #include "core/hle/kernel/k_memory_manager.h" |
| 33 | #include "core/hle/kernel/k_resource_limit.h" | 34 | #include "core/hle/kernel/k_resource_limit.h" |
| @@ -51,7 +52,8 @@ namespace Kernel { | |||
| 51 | 52 | ||
| 52 | struct KernelCore::Impl { | 53 | struct KernelCore::Impl { |
| 53 | explicit Impl(Core::System& system, KernelCore& kernel) | 54 | explicit Impl(Core::System& system, KernelCore& kernel) |
| 54 | : time_manager{system}, global_handle_table{kernel}, system{system} {} | 55 | : time_manager{system}, global_handle_table{kernel}, |
| 56 | object_list_container{kernel}, system{system} {} | ||
| 55 | 57 | ||
| 56 | void SetMulticore(bool is_multicore) { | 58 | void SetMulticore(bool is_multicore) { |
| 57 | this->is_multicore = is_multicore; | 59 | this->is_multicore = is_multicore; |
| @@ -69,9 +71,12 @@ struct KernelCore::Impl { | |||
| 69 | // Derive the initial memory layout from the emulated board | 71 | // Derive the initial memory layout from the emulated board |
| 70 | KMemoryLayout memory_layout; | 72 | KMemoryLayout memory_layout; |
| 71 | DeriveInitialMemoryLayout(memory_layout); | 73 | DeriveInitialMemoryLayout(memory_layout); |
| 74 | Init::InitializeSlabHeaps(system, memory_layout); | ||
| 75 | |||
| 76 | // Initialize kernel memory and resources. | ||
| 72 | InitializeMemoryLayout(memory_layout); | 77 | InitializeMemoryLayout(memory_layout); |
| 73 | InitializeSystemResourceLimit(kernel, system.CoreTiming(), memory_layout); | 78 | InitializeSystemResourceLimit(kernel, system.CoreTiming(), memory_layout); |
| 74 | InitializeSlabHeaps(); | 79 | InitializePageSlab(); |
| 75 | InitializeSchedulers(); | 80 | InitializeSchedulers(); |
| 76 | InitializeSuspendThreads(); | 81 | InitializeSuspendThreads(); |
| 77 | InitializePreemption(kernel); | 82 | InitializePreemption(kernel); |
| @@ -99,7 +104,7 @@ struct KernelCore::Impl { | |||
| 99 | 104 | ||
| 100 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | 105 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { |
| 101 | if (suspend_threads[i]) { | 106 | if (suspend_threads[i]) { |
| 102 | suspend_threads[i].reset(); | 107 | suspend_threads[i]->Close(); |
| 103 | } | 108 | } |
| 104 | } | 109 | } |
| 105 | 110 | ||
| @@ -189,15 +194,12 @@ struct KernelCore::Impl { | |||
| 189 | } | 194 | } |
| 190 | 195 | ||
| 191 | void InitializeSuspendThreads() { | 196 | void InitializeSuspendThreads() { |
| 192 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | 197 | for (s32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { |
| 193 | std::string name = "Suspend Thread Id:" + std::to_string(i); | 198 | suspend_threads[core_id] = KThread::CreateWithKernel(system.Kernel()); |
| 194 | std::function<void(void*)> init_func = Core::CpuManager::GetSuspendThreadStartFunc(); | 199 | ASSERT(KThread::InitializeHighPriorityThread(system, suspend_threads[core_id], {}, {}, |
| 195 | void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); | 200 | core_id) |
| 196 | auto thread_res = KThread::CreateThread( | 201 | .IsSuccess()); |
| 197 | system, ThreadType::HighPriority, std::move(name), 0, 0, 0, static_cast<u32>(i), 0, | 202 | suspend_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id)); |
| 198 | nullptr, std::move(init_func), init_func_parameter); | ||
| 199 | |||
| 200 | suspend_threads[i] = std::move(thread_res).Unwrap(); | ||
| 201 | } | 203 | } |
| 202 | } | 204 | } |
| 203 | 205 | ||
| @@ -232,12 +234,15 @@ struct KernelCore::Impl { | |||
| 232 | 234 | ||
| 233 | // Gets the dummy KThread for the caller, allocating a new one if this is the first time | 235 | // Gets the dummy KThread for the caller, allocating a new one if this is the first time |
| 234 | KThread* GetHostDummyThread() { | 236 | KThread* GetHostDummyThread() { |
| 235 | const thread_local auto thread = | 237 | auto make_thread = [this]() { |
| 236 | KThread::CreateThread( | 238 | KThread* thread = KThread::CreateWithKernel(system.Kernel()); |
| 237 | system, ThreadType::Main, fmt::format("DummyThread:{}", GetHostThreadId()), 0, | 239 | ASSERT(KThread::InitializeDummyThread(thread).IsSuccess()); |
| 238 | KThread::DefaultThreadPriority, 0, static_cast<u32>(3), 0, nullptr) | 240 | thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId())); |
| 239 | .Unwrap(); | 241 | return thread; |
| 240 | return thread.get(); | 242 | }; |
| 243 | |||
| 244 | thread_local auto thread = make_thread(); | ||
| 245 | return thread; | ||
| 241 | } | 246 | } |
| 242 | 247 | ||
| 243 | /// Registers a CPU core thread by allocating a host thread ID for it | 248 | /// Registers a CPU core thread by allocating a host thread ID for it |
| @@ -371,7 +376,8 @@ struct KernelCore::Impl { | |||
| 371 | const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit(); | 376 | const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit(); |
| 372 | 377 | ||
| 373 | // Determine the size of the slab region. | 378 | // Determine the size of the slab region. |
| 374 | const size_t slab_region_size = Common::AlignUp(KernelSlabHeapSize, PageSize); | 379 | const size_t slab_region_size = |
| 380 | Common::AlignUp(Init::CalculateTotalSlabHeapSize(), PageSize); | ||
| 375 | ASSERT(slab_region_size <= resource_region_size); | 381 | ASSERT(slab_region_size <= resource_region_size); |
| 376 | 382 | ||
| 377 | // Setup the slab region. | 383 | // Setup the slab region. |
| @@ -587,7 +593,7 @@ struct KernelCore::Impl { | |||
| 587 | "Time:SharedMemory"); | 593 | "Time:SharedMemory"); |
| 588 | } | 594 | } |
| 589 | 595 | ||
| 590 | void InitializeSlabHeaps() { | 596 | void InitializePageSlab() { |
| 591 | // Allocate slab heaps | 597 | // Allocate slab heaps |
| 592 | user_slab_heap_pages = std::make_unique<KSlabHeap<Page>>(); | 598 | user_slab_heap_pages = std::make_unique<KSlabHeap<Page>>(); |
| 593 | 599 | ||
| @@ -596,7 +602,7 @@ struct KernelCore::Impl { | |||
| 596 | // Reserve slab heaps | 602 | // Reserve slab heaps |
| 597 | ASSERT( | 603 | ASSERT( |
| 598 | system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size)); | 604 | system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size)); |
| 599 | // Initialize slab heaps | 605 | // Initialize slab heap |
| 600 | user_slab_heap_pages->Initialize( | 606 | user_slab_heap_pages->Initialize( |
| 601 | system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase), | 607 | system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase), |
| 602 | user_slab_heap_size); | 608 | user_slab_heap_size); |
| @@ -621,6 +627,8 @@ struct KernelCore::Impl { | |||
| 621 | // stores all the objects in place. | 627 | // stores all the objects in place. |
| 622 | HandleTable global_handle_table; | 628 | HandleTable global_handle_table; |
| 623 | 629 | ||
| 630 | KAutoObjectWithListContainer object_list_container; | ||
| 631 | |||
| 624 | /// Map of named ports managed by the kernel, which can be retrieved using | 632 | /// Map of named ports managed by the kernel, which can be retrieved using |
| 625 | /// the ConnectToPort SVC. | 633 | /// the ConnectToPort SVC. |
| 626 | NamedPortTable named_ports; | 634 | NamedPortTable named_ports; |
| @@ -648,7 +656,7 @@ struct KernelCore::Impl { | |||
| 648 | // the release of itself | 656 | // the release of itself |
| 649 | std::unique_ptr<Common::ThreadWorker> service_thread_manager; | 657 | std::unique_ptr<Common::ThreadWorker> service_thread_manager; |
| 650 | 658 | ||
| 651 | std::array<std::shared_ptr<KThread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; | 659 | std::array<KThread*, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; |
| 652 | std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{}; | 660 | std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{}; |
| 653 | std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; | 661 | std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; |
| 654 | 662 | ||
| @@ -687,8 +695,8 @@ std::shared_ptr<KResourceLimit> KernelCore::GetSystemResourceLimit() const { | |||
| 687 | return impl->system_resource_limit; | 695 | return impl->system_resource_limit; |
| 688 | } | 696 | } |
| 689 | 697 | ||
| 690 | std::shared_ptr<KThread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const { | 698 | KScopedAutoObject<KThread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const { |
| 691 | return impl->global_handle_table.Get<KThread>(handle); | 699 | return impl->global_handle_table.GetObject<KThread>(handle); |
| 692 | } | 700 | } |
| 693 | 701 | ||
| 694 | void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) { | 702 | void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) { |
| @@ -781,6 +789,14 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const { | |||
| 781 | return *impl->exclusive_monitor; | 789 | return *impl->exclusive_monitor; |
| 782 | } | 790 | } |
| 783 | 791 | ||
| 792 | KAutoObjectWithListContainer& KernelCore::ObjectListContainer() { | ||
| 793 | return impl->object_list_container; | ||
| 794 | } | ||
| 795 | |||
| 796 | const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const { | ||
| 797 | return impl->object_list_container; | ||
| 798 | } | ||
| 799 | |||
| 784 | void KernelCore::InvalidateAllInstructionCaches() { | 800 | void KernelCore::InvalidateAllInstructionCaches() { |
| 785 | for (auto& physical_core : impl->cores) { | 801 | for (auto& physical_core : impl->cores) { |
| 786 | physical_core.ArmInterface().ClearInstructionCache(); | 802 | physical_core.ArmInterface().ClearInstructionCache(); |
| @@ -960,4 +976,12 @@ void KernelCore::SetIsPhantomModeForSingleCore(bool value) { | |||
| 960 | impl->SetIsPhantomModeForSingleCore(value); | 976 | impl->SetIsPhantomModeForSingleCore(value); |
| 961 | } | 977 | } |
| 962 | 978 | ||
| 979 | Core::System& KernelCore::System() { | ||
| 980 | return impl->system; | ||
| 981 | } | ||
| 982 | |||
| 983 | const Core::System& KernelCore::System() const { | ||
| 984 | return impl->system; | ||
| 985 | } | ||
| 986 | |||
| 963 | } // namespace Kernel | 987 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index a500e63bc..3f5c2aec7 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include "core/hardware_properties.h" | 13 | #include "core/hardware_properties.h" |
| 14 | #include "core/hle/kernel/memory_types.h" | 14 | #include "core/hle/kernel/memory_types.h" |
| 15 | #include "core/hle/kernel/object.h" | 15 | #include "core/hle/kernel/object.h" |
| 16 | #include "core/hle/kernel/k_auto_object.h" | ||
| 16 | 17 | ||
| 17 | namespace Core { | 18 | namespace Core { |
| 18 | class CPUInterruptHandler; | 19 | class CPUInterruptHandler; |
| @@ -30,6 +31,7 @@ namespace Kernel { | |||
| 30 | class ClientPort; | 31 | class ClientPort; |
| 31 | class GlobalSchedulerContext; | 32 | class GlobalSchedulerContext; |
| 32 | class HandleTable; | 33 | class HandleTable; |
| 34 | class KAutoObjectWithListContainer; | ||
| 33 | class KMemoryManager; | 35 | class KMemoryManager; |
| 34 | class KResourceLimit; | 36 | class KResourceLimit; |
| 35 | class KScheduler; | 37 | class KScheduler; |
| @@ -86,7 +88,7 @@ public: | |||
| 86 | std::shared_ptr<KResourceLimit> GetSystemResourceLimit() const; | 88 | std::shared_ptr<KResourceLimit> GetSystemResourceLimit() const; |
| 87 | 89 | ||
| 88 | /// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table. | 90 | /// Retrieves a shared pointer to a Thread instance within the thread wakeup handle table. |
| 89 | std::shared_ptr<KThread> RetrieveThreadFromGlobalHandleTable(Handle handle) const; | 91 | KScopedAutoObject<KThread> RetrieveThreadFromGlobalHandleTable(Handle handle) const; |
| 90 | 92 | ||
| 91 | /// Adds the given shared pointer to an internal list of active processes. | 93 | /// Adds the given shared pointer to an internal list of active processes. |
| 92 | void AppendNewProcess(std::shared_ptr<Process> process); | 94 | void AppendNewProcess(std::shared_ptr<Process> process); |
| @@ -143,6 +145,10 @@ public: | |||
| 143 | 145 | ||
| 144 | const Core::ExclusiveMonitor& GetExclusiveMonitor() const; | 146 | const Core::ExclusiveMonitor& GetExclusiveMonitor() const; |
| 145 | 147 | ||
| 148 | KAutoObjectWithListContainer& ObjectListContainer(); | ||
| 149 | |||
| 150 | const KAutoObjectWithListContainer& ObjectListContainer() const; | ||
| 151 | |||
| 146 | std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts(); | 152 | std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts(); |
| 147 | 153 | ||
| 148 | const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts() const; | 154 | const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts() const; |
| @@ -243,6 +249,9 @@ public: | |||
| 243 | bool IsPhantomModeForSingleCore() const; | 249 | bool IsPhantomModeForSingleCore() const; |
| 244 | void SetIsPhantomModeForSingleCore(bool value); | 250 | void SetIsPhantomModeForSingleCore(bool value); |
| 245 | 251 | ||
| 252 | Core::System& System(); | ||
| 253 | const Core::System& System() const; | ||
| 254 | |||
| 246 | private: | 255 | private: |
| 247 | friend class Object; | 256 | friend class Object; |
| 248 | friend class Process; | 257 | friend class Process; |
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index e35deb8e2..796dca5ef 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp | |||
| @@ -40,14 +40,15 @@ namespace { | |||
| 40 | void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) { | 40 | void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) { |
| 41 | const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart(); | 41 | const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart(); |
| 42 | ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1)); | 42 | ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1)); |
| 43 | auto thread_res = | ||
| 44 | KThread::CreateUserThread(system, ThreadType::User, "main", entry_point, priority, 0, | ||
| 45 | owner_process.GetIdealCoreId(), stack_top, &owner_process); | ||
| 46 | 43 | ||
| 47 | std::shared_ptr<KThread> thread = std::move(thread_res).Unwrap(); | 44 | KThread* thread = KThread::CreateWithKernel(system.Kernel()); |
| 45 | ASSERT(KThread::InitializeUserThread(system, thread, entry_point, 0, stack_top, priority, | ||
| 46 | owner_process.GetIdealCoreId(), &owner_process) | ||
| 47 | .IsSuccess()); | ||
| 48 | 48 | ||
| 49 | // Register 1 must be a handle to the main thread | 49 | // Register 1 must be a handle to the main thread |
| 50 | const Handle thread_handle = owner_process.GetHandleTable().Create(thread).Unwrap(); | 50 | Handle thread_handle{}; |
| 51 | owner_process.GetHandleTable().Add(&thread_handle, thread); | ||
| 51 | thread->GetContext32().cpu_registers[0] = 0; | 52 | thread->GetContext32().cpu_registers[0] = 0; |
| 52 | thread->GetContext64().cpu_registers[0] = 0; | 53 | thread->GetContext64().cpu_registers[0] = 0; |
| 53 | thread->GetContext32().cpu_registers[1] = thread_handle; | 54 | thread->GetContext32().cpu_registers[1] = thread_handle; |
| @@ -337,12 +338,12 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) { | |||
| 337 | void Process::PrepareForTermination() { | 338 | void Process::PrepareForTermination() { |
| 338 | ChangeStatus(ProcessStatus::Exiting); | 339 | ChangeStatus(ProcessStatus::Exiting); |
| 339 | 340 | ||
| 340 | const auto stop_threads = [this](const std::vector<std::shared_ptr<KThread>>& thread_list) { | 341 | const auto stop_threads = [this](const std::vector<KThread*>& thread_list) { |
| 341 | for (auto& thread : thread_list) { | 342 | for (auto& thread : thread_list) { |
| 342 | if (thread->GetOwnerProcess() != this) | 343 | if (thread->GetOwnerProcess() != this) |
| 343 | continue; | 344 | continue; |
| 344 | 345 | ||
| 345 | if (thread.get() == kernel.CurrentScheduler()->GetCurrentThread()) | 346 | if (thread == kernel.CurrentScheduler()->GetCurrentThread()) |
| 346 | continue; | 347 | continue; |
| 347 | 348 | ||
| 348 | // TODO(Subv): When are the other running/ready threads terminated? | 349 | // TODO(Subv): When are the other running/ready threads terminated? |
diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h index d95095da3..ae9d097da 100644 --- a/src/core/hle/kernel/slab_helpers.h +++ b/src/core/hle/kernel/slab_helpers.h | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include "core/hle/kernel/k_auto_object_container.h" | 14 | #include "core/hle/kernel/k_auto_object_container.h" |
| 15 | #include "core/hle/kernel/k_light_lock.h" | 15 | #include "core/hle/kernel/k_light_lock.h" |
| 16 | #include "core/hle/kernel/k_slab_heap.h" | 16 | #include "core/hle/kernel/k_slab_heap.h" |
| 17 | #include "core/hle/kernel/kernel.h" | ||
| 17 | 18 | ||
| 18 | namespace Kernel { | 19 | namespace Kernel { |
| 19 | 20 | ||
| @@ -66,13 +67,17 @@ class KAutoObjectWithSlabHeapAndContainer : public Base { | |||
| 66 | 67 | ||
| 67 | private: | 68 | private: |
| 68 | static inline KSlabHeap<Derived> s_slab_heap; | 69 | static inline KSlabHeap<Derived> s_slab_heap; |
| 69 | static inline KAutoObjectWithListContainer s_container; | 70 | KernelCore& m_kernel; |
| 70 | 71 | ||
| 71 | private: | 72 | private: |
| 72 | static Derived* Allocate() { | 73 | static Derived* Allocate() { |
| 73 | return s_slab_heap.Allocate(); | 74 | return s_slab_heap.Allocate(); |
| 74 | } | 75 | } |
| 75 | 76 | ||
| 77 | static Derived* AllocateWithKernel(KernelCore& kernel) { | ||
| 78 | return s_slab_heap.AllocateWithKernel(kernel); | ||
| 79 | } | ||
| 80 | |||
| 76 | static void Free(Derived* obj) { | 81 | static void Free(Derived* obj) { |
| 77 | s_slab_heap.Free(obj); | 82 | s_slab_heap.Free(obj); |
| 78 | } | 83 | } |
| @@ -80,19 +85,20 @@ private: | |||
| 80 | public: | 85 | public: |
| 81 | class ListAccessor : public KAutoObjectWithListContainer::ListAccessor { | 86 | class ListAccessor : public KAutoObjectWithListContainer::ListAccessor { |
| 82 | public: | 87 | public: |
| 83 | ListAccessor() : KAutoObjectWithListContainer::ListAccessor(s_container) {} | 88 | ListAccessor() |
| 89 | : KAutoObjectWithListContainer::ListAccessor(m_kernel.ObjectListContainer()) {} | ||
| 84 | ~ListAccessor() = default; | 90 | ~ListAccessor() = default; |
| 85 | }; | 91 | }; |
| 86 | 92 | ||
| 87 | public: | 93 | public: |
| 88 | constexpr KAutoObjectWithSlabHeapAndContainer() : Base() {} | 94 | KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel) : Base(kernel), m_kernel(kernel) {} |
| 89 | virtual ~KAutoObjectWithSlabHeapAndContainer() {} | 95 | virtual ~KAutoObjectWithSlabHeapAndContainer() {} |
| 90 | 96 | ||
| 91 | virtual void Destroy() override { | 97 | virtual void Destroy() override { |
| 92 | const bool is_initialized = this->IsInitialized(); | 98 | const bool is_initialized = this->IsInitialized(); |
| 93 | uintptr_t arg = 0; | 99 | uintptr_t arg = 0; |
| 94 | if (is_initialized) { | 100 | if (is_initialized) { |
| 95 | s_container.Unregister(this); | 101 | m_kernel.ObjectListContainer().Unregister(this); |
| 96 | arg = this->GetPostDestroyArgument(); | 102 | arg = this->GetPostDestroyArgument(); |
| 97 | this->Finalize(); | 103 | this->Finalize(); |
| 98 | } | 104 | } |
| @@ -114,21 +120,29 @@ public: | |||
| 114 | } | 120 | } |
| 115 | 121 | ||
| 116 | public: | 122 | public: |
| 117 | static void InitializeSlabHeap(void* memory, size_t memory_size) { | 123 | static void InitializeSlabHeap(KernelCore& kernel, void* memory, size_t memory_size) { |
| 118 | s_slab_heap.Initialize(memory, memory_size); | 124 | s_slab_heap.Initialize(memory, memory_size); |
| 119 | s_container.Initialize(); | 125 | kernel.ObjectListContainer().Initialize(); |
| 120 | } | 126 | } |
| 121 | 127 | ||
| 122 | static Derived* Create() { | 128 | static Derived* Create() { |
| 123 | Derived* obj = Allocate(); | 129 | Derived* obj = Allocate(); |
| 124 | if (AMS_LIKELY(obj != nullptr)) { | 130 | if (obj != nullptr) { |
| 131 | KAutoObject::Create(obj); | ||
| 132 | } | ||
| 133 | return obj; | ||
| 134 | } | ||
| 135 | |||
| 136 | static Derived* CreateWithKernel(KernelCore& kernel) { | ||
| 137 | Derived* obj = AllocateWithKernel(kernel); | ||
| 138 | if (obj != nullptr) { | ||
| 125 | KAutoObject::Create(obj); | 139 | KAutoObject::Create(obj); |
| 126 | } | 140 | } |
| 127 | return obj; | 141 | return obj; |
| 128 | } | 142 | } |
| 129 | 143 | ||
| 130 | static void Register(Derived* obj) { | 144 | static void Register(KernelCore& kernel, Derived* obj) { |
| 131 | return s_container.Register(obj); | 145 | return kernel.ObjectListContainer().Register(obj); |
| 132 | } | 146 | } |
| 133 | 147 | ||
| 134 | static size_t GetObjectSize() { | 148 | static size_t GetObjectSize() { |
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 395962885..813450115 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -355,7 +355,7 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { | |||
| 355 | KScopedSchedulerLock lock(kernel); | 355 | KScopedSchedulerLock lock(kernel); |
| 356 | thread->SetState(ThreadState::Waiting); | 356 | thread->SetState(ThreadState::Waiting); |
| 357 | thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); | 357 | thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); |
| 358 | session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); | 358 | session->SendSyncRequest(thread, system.Memory(), system.CoreTiming()); |
| 359 | } | 359 | } |
| 360 | 360 | ||
| 361 | KSynchronizationObject* dummy{}; | 361 | KSynchronizationObject* dummy{}; |
| @@ -368,18 +368,13 @@ static ResultCode SendSyncRequest32(Core::System& system, Handle handle) { | |||
| 368 | 368 | ||
| 369 | /// Get the ID for the specified thread. | 369 | /// Get the ID for the specified thread. |
| 370 | static ResultCode GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) { | 370 | static ResultCode GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) { |
| 371 | LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle); | ||
| 372 | |||
| 373 | // Get the thread from its handle. | 371 | // Get the thread from its handle. |
| 374 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | 372 | KScopedAutoObject thread = |
| 375 | const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); | 373 | system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle); |
| 376 | if (!thread) { | 374 | R_UNLESS(thread.IsNotNull(), ResultInvalidHandle); |
| 377 | LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle); | ||
| 378 | return ResultInvalidHandle; | ||
| 379 | } | ||
| 380 | 375 | ||
| 381 | // Get the thread's id. | 376 | // Get the thread's id. |
| 382 | *out_thread_id = thread->GetThreadID(); | 377 | *out_thread_id = thread->GetId(); |
| 383 | return RESULT_SUCCESS; | 378 | return RESULT_SUCCESS; |
| 384 | } | 379 | } |
| 385 | 380 | ||
| @@ -396,30 +391,7 @@ static ResultCode GetThreadId32(Core::System& system, u32* out_thread_id_low, | |||
| 396 | 391 | ||
| 397 | /// Gets the ID of the specified process or a specified thread's owning process. | 392 | /// Gets the ID of the specified process or a specified thread's owning process. |
| 398 | static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle handle) { | 393 | static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle handle) { |
| 399 | LOG_DEBUG(Kernel_SVC, "called handle=0x{:08X}", handle); | 394 | __debugbreak(); |
| 400 | |||
| 401 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | ||
| 402 | const std::shared_ptr<Process> process = handle_table.Get<Process>(handle); | ||
| 403 | if (process) { | ||
| 404 | *process_id = process->GetProcessID(); | ||
| 405 | return RESULT_SUCCESS; | ||
| 406 | } | ||
| 407 | |||
| 408 | const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle); | ||
| 409 | if (thread) { | ||
| 410 | const Process* const owner_process = thread->GetOwnerProcess(); | ||
| 411 | if (!owner_process) { | ||
| 412 | LOG_ERROR(Kernel_SVC, "Non-existent owning process encountered."); | ||
| 413 | return ResultInvalidHandle; | ||
| 414 | } | ||
| 415 | |||
| 416 | *process_id = owner_process->GetProcessID(); | ||
| 417 | return RESULT_SUCCESS; | ||
| 418 | } | ||
| 419 | |||
| 420 | // NOTE: This should also handle debug objects before returning. | ||
| 421 | |||
| 422 | LOG_ERROR(Kernel_SVC, "Handle does not exist, handle=0x{:08X}", handle); | ||
| 423 | return ResultInvalidHandle; | 395 | return ResultInvalidHandle; |
| 424 | } | 396 | } |
| 425 | 397 | ||
| @@ -460,14 +432,30 @@ static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr ha | |||
| 460 | 432 | ||
| 461 | for (u64 i = 0; i < handle_count; ++i) { | 433 | for (u64 i = 0; i < handle_count; ++i) { |
| 462 | const Handle handle = memory.Read32(handles_address + i * sizeof(Handle)); | 434 | const Handle handle = memory.Read32(handles_address + i * sizeof(Handle)); |
| 463 | const auto object = handle_table.Get<KSynchronizationObject>(handle); | ||
| 464 | 435 | ||
| 465 | if (object == nullptr) { | 436 | bool succeeded{}; |
| 466 | LOG_ERROR(Kernel_SVC, "Object is a nullptr"); | 437 | { |
| 467 | return ResultInvalidHandle; | 438 | auto object = handle_table.Get<KSynchronizationObject>(handle); |
| 439 | if (object) { | ||
| 440 | objects[i] = object.get(); | ||
| 441 | succeeded = true; | ||
| 442 | } | ||
| 468 | } | 443 | } |
| 469 | 444 | ||
| 470 | objects[i] = object.get(); | 445 | // TODO(bunnei): WORKAROUND WHILE WE HAVE TWO HANDLE TABLES |
| 446 | if (!succeeded) { | ||
| 447 | { | ||
| 448 | auto object = handle_table.GetObject<KSynchronizationObject>(handle); | ||
| 449 | |||
| 450 | if (object.IsNull()) { | ||
| 451 | LOG_ERROR(Kernel_SVC, "Object is a nullptr"); | ||
| 452 | return ResultInvalidHandle; | ||
| 453 | } | ||
| 454 | |||
| 455 | objects[i] = object.GetPointerUnsafe(); | ||
| 456 | succeeded = true; | ||
| 457 | } | ||
| 458 | } | ||
| 471 | } | 459 | } |
| 472 | return KSynchronizationObject::Wait(kernel, index, objects.data(), | 460 | return KSynchronizationObject::Wait(kernel, index, objects.data(), |
| 473 | static_cast<s32>(objects.size()), nano_seconds); | 461 | static_cast<s32>(objects.size()), nano_seconds); |
| @@ -481,19 +469,7 @@ static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u | |||
| 481 | 469 | ||
| 482 | /// Resumes a thread waiting on WaitSynchronization | 470 | /// Resumes a thread waiting on WaitSynchronization |
| 483 | static ResultCode CancelSynchronization(Core::System& system, Handle thread_handle) { | 471 | static ResultCode CancelSynchronization(Core::System& system, Handle thread_handle) { |
| 484 | LOG_TRACE(Kernel_SVC, "called thread=0x{:X}", thread_handle); | 472 | __debugbreak(); |
| 485 | |||
| 486 | // Get the thread from its handle. | ||
| 487 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | ||
| 488 | std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); | ||
| 489 | |||
| 490 | if (!thread) { | ||
| 491 | LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle); | ||
| 492 | return ResultInvalidHandle; | ||
| 493 | } | ||
| 494 | |||
| 495 | // Cancel the thread's wait. | ||
| 496 | thread->WaitCancel(); | ||
| 497 | return RESULT_SUCCESS; | 473 | return RESULT_SUCCESS; |
| 498 | } | 474 | } |
| 499 | 475 | ||
| @@ -899,9 +875,10 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha | |||
| 899 | return ResultInvalidCombination; | 875 | return ResultInvalidCombination; |
| 900 | } | 876 | } |
| 901 | 877 | ||
| 902 | const auto thread = system.Kernel().CurrentProcess()->GetHandleTable().Get<KThread>( | 878 | KScopedAutoObject thread = |
| 903 | static_cast<Handle>(handle)); | 879 | system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>( |
| 904 | if (!thread) { | 880 | static_cast<Handle>(handle)); |
| 881 | if (thread.IsNull()) { | ||
| 905 | LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", | 882 | LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", |
| 906 | static_cast<Handle>(handle)); | 883 | static_cast<Handle>(handle)); |
| 907 | return ResultInvalidHandle; | 884 | return ResultInvalidHandle; |
| @@ -910,7 +887,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha | |||
| 910 | const auto& core_timing = system.CoreTiming(); | 887 | const auto& core_timing = system.CoreTiming(); |
| 911 | const auto& scheduler = *system.Kernel().CurrentScheduler(); | 888 | const auto& scheduler = *system.Kernel().CurrentScheduler(); |
| 912 | const auto* const current_thread = scheduler.GetCurrentThread(); | 889 | const auto* const current_thread = scheduler.GetCurrentThread(); |
| 913 | const bool same_thread = current_thread == thread.get(); | 890 | const bool same_thread = current_thread == thread.GetPointerUnsafe(); |
| 914 | 891 | ||
| 915 | const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks(); | 892 | const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks(); |
| 916 | u64 out_ticks = 0; | 893 | u64 out_ticks = 0; |
| @@ -1055,45 +1032,7 @@ static ResultCode UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size | |||
| 1055 | /// Sets the thread activity | 1032 | /// Sets the thread activity |
| 1056 | static ResultCode SetThreadActivity(Core::System& system, Handle thread_handle, | 1033 | static ResultCode SetThreadActivity(Core::System& system, Handle thread_handle, |
| 1057 | ThreadActivity thread_activity) { | 1034 | ThreadActivity thread_activity) { |
| 1058 | LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", thread_handle, | 1035 | __debugbreak(); |
| 1059 | thread_activity); | ||
| 1060 | |||
| 1061 | // Validate the activity. | ||
| 1062 | constexpr auto IsValidThreadActivity = [](ThreadActivity activity) { | ||
| 1063 | return activity == ThreadActivity::Runnable || activity == ThreadActivity::Paused; | ||
| 1064 | }; | ||
| 1065 | if (!IsValidThreadActivity(thread_activity)) { | ||
| 1066 | LOG_ERROR(Kernel_SVC, "Invalid thread activity value provided (activity={})", | ||
| 1067 | thread_activity); | ||
| 1068 | return ResultInvalidEnumValue; | ||
| 1069 | } | ||
| 1070 | |||
| 1071 | // Get the thread from its handle. | ||
| 1072 | auto& kernel = system.Kernel(); | ||
| 1073 | const auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); | ||
| 1074 | const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); | ||
| 1075 | if (!thread) { | ||
| 1076 | LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={:08X})", thread_handle); | ||
| 1077 | return ResultInvalidHandle; | ||
| 1078 | } | ||
| 1079 | |||
| 1080 | // Check that the activity is being set on a non-current thread for the current process. | ||
| 1081 | if (thread->GetOwnerProcess() != kernel.CurrentProcess()) { | ||
| 1082 | LOG_ERROR(Kernel_SVC, "Invalid owning process for the created thread."); | ||
| 1083 | return ResultInvalidHandle; | ||
| 1084 | } | ||
| 1085 | if (thread.get() == GetCurrentThreadPointer(kernel)) { | ||
| 1086 | LOG_ERROR(Kernel_SVC, "Thread is busy"); | ||
| 1087 | return ResultBusy; | ||
| 1088 | } | ||
| 1089 | |||
| 1090 | // Set the activity. | ||
| 1091 | const auto set_result = thread->SetActivity(thread_activity); | ||
| 1092 | if (set_result.IsError()) { | ||
| 1093 | LOG_ERROR(Kernel_SVC, "Failed to set thread activity."); | ||
| 1094 | return set_result; | ||
| 1095 | } | ||
| 1096 | |||
| 1097 | return RESULT_SUCCESS; | 1036 | return RESULT_SUCCESS; |
| 1098 | } | 1037 | } |
| 1099 | 1038 | ||
| @@ -1107,36 +1046,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Hand | |||
| 1107 | LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context, | 1046 | LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context, |
| 1108 | thread_handle); | 1047 | thread_handle); |
| 1109 | 1048 | ||
| 1110 | // Get the thread from its handle. | 1049 | __debugbreak(); |
| 1111 | const auto* current_process = system.Kernel().CurrentProcess(); | ||
| 1112 | const std::shared_ptr<KThread> thread = | ||
| 1113 | current_process->GetHandleTable().Get<KThread>(thread_handle); | ||
| 1114 | if (!thread) { | ||
| 1115 | LOG_ERROR(Kernel_SVC, "Invalid thread handle provided (handle={})", thread_handle); | ||
| 1116 | return ResultInvalidHandle; | ||
| 1117 | } | ||
| 1118 | |||
| 1119 | // Require the handle be to a non-current thread in the current process. | ||
| 1120 | if (thread->GetOwnerProcess() != current_process) { | ||
| 1121 | LOG_ERROR(Kernel_SVC, "Thread owning process is not the current process."); | ||
| 1122 | return ResultInvalidHandle; | ||
| 1123 | } | ||
| 1124 | if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) { | ||
| 1125 | LOG_ERROR(Kernel_SVC, "Current thread is busy."); | ||
| 1126 | return ResultBusy; | ||
| 1127 | } | ||
| 1128 | |||
| 1129 | // Get the thread context. | ||
| 1130 | std::vector<u8> context; | ||
| 1131 | const auto context_result = thread->GetThreadContext3(context); | ||
| 1132 | if (context_result.IsError()) { | ||
| 1133 | LOG_ERROR(Kernel_SVC, "Unable to successfully retrieve thread context (result: {})", | ||
| 1134 | context_result.raw); | ||
| 1135 | return context_result; | ||
| 1136 | } | ||
| 1137 | |||
| 1138 | // Copy the thread context to user space. | ||
| 1139 | system.Memory().WriteBlock(out_context, context.data(), context.size()); | ||
| 1140 | 1050 | ||
| 1141 | return RESULT_SUCCESS; | 1051 | return RESULT_SUCCESS; |
| 1142 | } | 1052 | } |
| @@ -1164,30 +1074,26 @@ static ResultCode GetThreadPriority32(Core::System& system, u32* out_priority, H | |||
| 1164 | } | 1074 | } |
| 1165 | 1075 | ||
| 1166 | /// Sets the priority for the specified thread | 1076 | /// Sets the priority for the specified thread |
| 1167 | static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 priority) { | 1077 | static ResultCode SetThreadPriority(Core::System& system, Handle thread_handle, u32 priority) { |
| 1168 | LOG_TRACE(Kernel_SVC, "called"); | 1078 | // Get the current process. |
| 1079 | Process& process = *system.Kernel().CurrentProcess(); | ||
| 1169 | 1080 | ||
| 1170 | // Validate the priority. | 1081 | // Validate the priority. |
| 1171 | if (HighestThreadPriority > priority || priority > LowestThreadPriority) { | 1082 | R_UNLESS(HighestThreadPriority <= priority && priority <= LowestThreadPriority, |
| 1172 | LOG_ERROR(Kernel_SVC, "Invalid thread priority specified (priority={})", priority); | 1083 | ResultInvalidPriority); |
| 1173 | return ResultInvalidPriority; | 1084 | R_UNLESS(process.CheckThreadPriority(priority), ResultInvalidPriority); |
| 1174 | } | ||
| 1175 | 1085 | ||
| 1176 | // Get the thread from its handle. | 1086 | // Get the thread from its handle. |
| 1177 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | 1087 | KScopedAutoObject thread = process.GetHandleTable().GetObject<KThread>(thread_handle); |
| 1178 | const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle); | 1088 | R_UNLESS(thread.IsNotNull(), ResultInvalidHandle); |
| 1179 | if (!thread) { | ||
| 1180 | LOG_ERROR(Kernel_SVC, "Invalid handle provided (handle={:08X})", handle); | ||
| 1181 | return ResultInvalidHandle; | ||
| 1182 | } | ||
| 1183 | 1089 | ||
| 1184 | // Set the thread priority. | 1090 | // Set the thread priority. |
| 1185 | thread->SetBasePriority(priority); | 1091 | thread->SetBasePriority(priority); |
| 1186 | return RESULT_SUCCESS; | 1092 | return RESULT_SUCCESS; |
| 1187 | } | 1093 | } |
| 1188 | 1094 | ||
| 1189 | static ResultCode SetThreadPriority32(Core::System& system, Handle handle, u32 priority) { | 1095 | static ResultCode SetThreadPriority32(Core::System& system, Handle thread_handle, u32 priority) { |
| 1190 | return SetThreadPriority(system, handle, priority); | 1096 | return SetThreadPriority(system, thread_handle, priority); |
| 1191 | } | 1097 | } |
| 1192 | 1098 | ||
| 1193 | /// Get which CPU core is executing the current thread | 1099 | /// Get which CPU core is executing the current thread |
| @@ -1480,7 +1386,7 @@ static void ExitProcess32(Core::System& system) { | |||
| 1480 | ExitProcess(system); | 1386 | ExitProcess(system); |
| 1481 | } | 1387 | } |
| 1482 | 1388 | ||
| 1483 | static constexpr bool IsValidCoreId(int32_t core_id) { | 1389 | static constexpr bool IsValidVirtualCoreId(int32_t core_id) { |
| 1484 | return (0 <= core_id && core_id < static_cast<int32_t>(Core::Hardware::NUM_CPU_CORES)); | 1390 | return (0 <= core_id && core_id < static_cast<int32_t>(Core::Hardware::NUM_CPU_CORES)); |
| 1485 | } | 1391 | } |
| 1486 | 1392 | ||
| @@ -1500,7 +1406,7 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e | |||
| 1500 | } | 1406 | } |
| 1501 | 1407 | ||
| 1502 | // Validate arguments. | 1408 | // Validate arguments. |
| 1503 | if (!IsValidCoreId(core_id)) { | 1409 | if (!IsValidVirtualCoreId(core_id)) { |
| 1504 | LOG_ERROR(Kernel_SVC, "Invalid Core ID specified (id={})", core_id); | 1410 | LOG_ERROR(Kernel_SVC, "Invalid Core ID specified (id={})", core_id); |
| 1505 | return ResultInvalidCoreId; | 1411 | return ResultInvalidCoreId; |
| 1506 | } | 1412 | } |
| @@ -1822,8 +1728,11 @@ static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high) | |||
| 1822 | static ResultCode CloseHandle(Core::System& system, Handle handle) { | 1728 | static ResultCode CloseHandle(Core::System& system, Handle handle) { |
| 1823 | LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle); | 1729 | LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle); |
| 1824 | 1730 | ||
| 1825 | auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | 1731 | // Remove the handle. |
| 1826 | return handle_table.Close(handle); | 1732 | R_UNLESS(system.Kernel().CurrentProcess()->GetHandleTable().Remove(handle), |
| 1733 | ResultInvalidHandle); | ||
| 1734 | |||
| 1735 | return RESULT_SUCCESS; | ||
| 1827 | } | 1736 | } |
| 1828 | 1737 | ||
| 1829 | static ResultCode CloseHandle32(Core::System& system, Handle handle) { | 1738 | static ResultCode CloseHandle32(Core::System& system, Handle handle) { |
| @@ -1925,23 +1834,7 @@ static ResultCode CreateTransferMemory32(Core::System& system, Handle* handle, u | |||
| 1925 | 1834 | ||
| 1926 | static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id, | 1835 | static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id, |
| 1927 | u64* out_affinity_mask) { | 1836 | u64* out_affinity_mask) { |
| 1928 | LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle); | 1837 | __debugbreak(); |
| 1929 | |||
| 1930 | // Get the thread from its handle. | ||
| 1931 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | ||
| 1932 | const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); | ||
| 1933 | if (!thread) { | ||
| 1934 | LOG_ERROR(Kernel_SVC, "Invalid thread handle specified (handle={:08X})", thread_handle); | ||
| 1935 | return ResultInvalidHandle; | ||
| 1936 | } | ||
| 1937 | |||
| 1938 | // Get the core mask. | ||
| 1939 | const auto result = thread->GetCoreMask(out_core_id, out_affinity_mask); | ||
| 1940 | if (result.IsError()) { | ||
| 1941 | LOG_ERROR(Kernel_SVC, "Unable to successfully retrieve core mask (result={})", result.raw); | ||
| 1942 | return result; | ||
| 1943 | } | ||
| 1944 | |||
| 1945 | return RESULT_SUCCESS; | 1838 | return RESULT_SUCCESS; |
| 1946 | } | 1839 | } |
| 1947 | 1840 | ||
| @@ -1956,58 +1849,33 @@ static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle | |||
| 1956 | 1849 | ||
| 1957 | static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id, | 1850 | static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id, |
| 1958 | u64 affinity_mask) { | 1851 | u64 affinity_mask) { |
| 1959 | LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core_id=0x{:X}, affinity_mask=0x{:016X}", | ||
| 1960 | thread_handle, core_id, affinity_mask); | ||
| 1961 | |||
| 1962 | const auto& current_process = *system.Kernel().CurrentProcess(); | ||
| 1963 | |||
| 1964 | // Determine the core id/affinity mask. | 1852 | // Determine the core id/affinity mask. |
| 1965 | if (core_id == Svc::IdealCoreUseProcessValue) { | 1853 | if (core_id == IdealCoreUseProcessValue) { |
| 1966 | core_id = current_process.GetIdealCoreId(); | 1854 | core_id = system.Kernel().CurrentProcess()->GetIdealCoreId(); |
| 1967 | affinity_mask = (1ULL << core_id); | 1855 | affinity_mask = (1ULL << core_id); |
| 1968 | } else { | 1856 | } else { |
| 1969 | // Validate the affinity mask. | 1857 | // Validate the affinity mask. |
| 1970 | const u64 process_core_mask = current_process.GetCoreMask(); | 1858 | const u64 process_core_mask = system.Kernel().CurrentProcess()->GetCoreMask(); |
| 1971 | if ((affinity_mask | process_core_mask) != process_core_mask) { | 1859 | R_UNLESS((affinity_mask | process_core_mask) == process_core_mask, ResultInvalidCoreId); |
| 1972 | LOG_ERROR(Kernel_SVC, | 1860 | R_UNLESS(affinity_mask != 0, ResultInvalidCombination); |
| 1973 | "Affinity mask does match the process core mask (affinity mask={:016X}, core " | ||
| 1974 | "mask={:016X})", | ||
| 1975 | affinity_mask, process_core_mask); | ||
| 1976 | return ResultInvalidCoreId; | ||
| 1977 | } | ||
| 1978 | if (affinity_mask == 0) { | ||
| 1979 | LOG_ERROR(Kernel_SVC, "Affinity mask is zero."); | ||
| 1980 | return ResultInvalidCombination; | ||
| 1981 | } | ||
| 1982 | 1861 | ||
| 1983 | // Validate the core id. | 1862 | // Validate the core id. |
| 1984 | if (IsValidCoreId(core_id)) { | 1863 | if (IsValidVirtualCoreId(core_id)) { |
| 1985 | if (((1ULL << core_id) & affinity_mask) == 0) { | 1864 | R_UNLESS(((1ULL << core_id) & affinity_mask) != 0, ResultInvalidCombination); |
| 1986 | LOG_ERROR(Kernel_SVC, "Invalid core ID (ID={})", core_id); | ||
| 1987 | return ResultInvalidCombination; | ||
| 1988 | } | ||
| 1989 | } else { | 1865 | } else { |
| 1990 | if (core_id != IdealCoreNoUpdate && core_id != IdealCoreDontCare) { | 1866 | R_UNLESS(core_id == IdealCoreNoUpdate || core_id == IdealCoreDontCare, |
| 1991 | LOG_ERROR(Kernel_SVC, "Invalid core ID (ID={})", core_id); | 1867 | ResultInvalidCoreId); |
| 1992 | return ResultInvalidCoreId; | ||
| 1993 | } | ||
| 1994 | } | 1868 | } |
| 1995 | } | 1869 | } |
| 1996 | 1870 | ||
| 1997 | // Get the thread from its handle. | 1871 | // Get the thread from its handle. |
| 1998 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | 1872 | KScopedAutoObject thread = |
| 1999 | const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); | 1873 | system.Kernel().CurrentProcess()->GetHandleTable().GetObject<KThread>(thread_handle); |
| 2000 | if (!thread) { | 1874 | R_UNLESS(thread.IsNotNull(), ResultInvalidHandle); |
| 2001 | LOG_ERROR(Kernel_SVC, "Invalid thread handle (handle={:08X})", thread_handle); | ||
| 2002 | return ResultInvalidHandle; | ||
| 2003 | } | ||
| 2004 | 1875 | ||
| 2005 | // Set the core mask. | 1876 | // Set the core mask. |
| 2006 | const auto set_result = thread->SetCoreMask(core_id, affinity_mask); | 1877 | R_TRY(thread->SetCoreMask(core_id, affinity_mask)); |
| 2007 | if (set_result.IsError()) { | 1878 | |
| 2008 | LOG_ERROR(Kernel_SVC, "Unable to successfully set core mask (result={})", set_result.raw); | ||
| 2009 | return set_result; | ||
| 2010 | } | ||
| 2011 | return RESULT_SUCCESS; | 1879 | return RESULT_SUCCESS; |
| 2012 | } | 1880 | } |
| 2013 | 1881 | ||
| @@ -2105,7 +1973,7 @@ static ResultCode CreateEvent(Core::System& system, Handle* out_write, Handle* o | |||
| 2105 | *out_write = *write_create_result; | 1973 | *out_write = *write_create_result; |
| 2106 | 1974 | ||
| 2107 | // Add the writable event to the handle table. | 1975 | // Add the writable event to the handle table. |
| 2108 | auto handle_guard = SCOPE_GUARD({ handle_table.Close(*write_create_result); }); | 1976 | auto handle_guard = SCOPE_GUARD({ handle_table.Remove(*write_create_result); }); |
| 2109 | 1977 | ||
| 2110 | // Add the readable event to the handle table. | 1978 | // Add the readable event to the handle table. |
| 2111 | const auto read_create_result = handle_table.Create(event->GetReadableEvent()); | 1979 | const auto read_create_result = handle_table.Create(event->GetReadableEvent()); |
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index fd0630019..59ebfc51f 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp | |||
| @@ -15,16 +15,12 @@ | |||
| 15 | namespace Kernel { | 15 | namespace Kernel { |
| 16 | 16 | ||
| 17 | TimeManager::TimeManager(Core::System& system_) : system{system_} { | 17 | TimeManager::TimeManager(Core::System& system_) : system{system_} { |
| 18 | time_manager_event_type = Core::Timing::CreateEvent( | 18 | time_manager_event_type = |
| 19 | "Kernel::TimeManagerCallback", | 19 | Core::Timing::CreateEvent("Kernel::TimeManagerCallback", |
| 20 | [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { | 20 | [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { |
| 21 | std::shared_ptr<KThread> thread; | 21 | KThread* thread = reinterpret_cast<KThread*>(thread_handle); |
| 22 | { | 22 | thread->Wakeup(); |
| 23 | std::lock_guard lock{mutex}; | 23 | }); |
| 24 | thread = SharedFrom<KThread>(reinterpret_cast<KThread*>(thread_handle)); | ||
| 25 | } | ||
| 26 | thread->Wakeup(); | ||
| 27 | }); | ||
| 28 | } | 24 | } |
| 29 | 25 | ||
| 30 | void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) { | 26 | void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) { |