diff options
| author | 2020-12-28 23:45:28 -0800 | |
|---|---|---|
| committer | 2021-01-11 14:23:16 -0800 | |
| commit | b4e6d6c38586eaa5aab7f7df3f9d958755a517c2 (patch) | |
| tree | 8298e102b21223af6b174525844917acff2afabb | |
| parent | core: hle: kernel: Begin moving common SVC defintions to its own header. (diff) | |
| download | yuzu-b4e6d6c38586eaa5aab7f7df3f9d958755a517c2.tar.gz yuzu-b4e6d6c38586eaa5aab7f7df3f9d958755a517c2.tar.xz yuzu-b4e6d6c38586eaa5aab7f7df3f9d958755a517c2.zip | |
core: hle: kernel: Update KConditionVariable.
| -rw-r--r-- | src/core/CMakeLists.txt | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_condition_variable.cpp | 347 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_condition_variable.h | 59 | ||||
| -rw-r--r-- | src/core/hle/kernel/object.h | 5 |
4 files changed, 413 insertions, 0 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index cae80bfbf..a870cd8fe 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -158,6 +158,8 @@ add_library(core STATIC | |||
| 158 | hle/kernel/hle_ipc.cpp | 158 | hle/kernel/hle_ipc.cpp |
| 159 | hle/kernel/hle_ipc.h | 159 | hle/kernel/hle_ipc.h |
| 160 | hle/kernel/k_affinity_mask.h | 160 | hle/kernel/k_affinity_mask.h |
| 161 | hle/kernel/k_condition_variable.cpp | ||
| 162 | hle/kernel/k_condition_variable.h | ||
| 161 | hle/kernel/k_priority_queue.h | 163 | hle/kernel/k_priority_queue.h |
| 162 | hle/kernel/k_scheduler.cpp | 164 | hle/kernel/k_scheduler.cpp |
| 163 | hle/kernel/k_scheduler.h | 165 | hle/kernel/k_scheduler.h |
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp new file mode 100644 index 000000000..ef5c17409 --- /dev/null +++ b/src/core/hle/kernel/k_condition_variable.cpp | |||
| @@ -0,0 +1,347 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <vector> | ||
| 6 | |||
| 7 | #include "core/arm/exclusive_monitor.h" | ||
| 8 | #include "core/core.h" | ||
| 9 | #include "core/hle/kernel/k_condition_variable.h" | ||
| 10 | #include "core/hle/kernel/k_scheduler.h" | ||
| 11 | #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" | ||
| 12 | #include "core/hle/kernel/k_synchronization_object.h" | ||
| 13 | #include "core/hle/kernel/kernel.h" | ||
| 14 | #include "core/hle/kernel/process.h" | ||
| 15 | #include "core/hle/kernel/svc_common.h" | ||
| 16 | #include "core/hle/kernel/svc_results.h" | ||
| 17 | #include "core/hle/kernel/thread.h" | ||
| 18 | #include "core/memory.h" | ||
| 19 | |||
| 20 | namespace Kernel { | ||
| 21 | |||
| 22 | namespace { | ||
| 23 | |||
| 24 | bool ReadFromUser(Core::System& system, u32* out, VAddr address) { | ||
| 25 | *out = system.Memory().Read32(address); | ||
| 26 | return true; | ||
| 27 | } | ||
| 28 | |||
| 29 | bool WriteToUser(Core::System& system, VAddr address, const u32* p) { | ||
| 30 | system.Memory().Write32(address, *p); | ||
| 31 | return true; | ||
| 32 | } | ||
| 33 | |||
| 34 | bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, | ||
| 35 | u32 new_orr_mask) { | ||
| 36 | auto& monitor = system.Monitor(); | ||
| 37 | const auto current_core = system.CurrentCoreIndex(); | ||
| 38 | |||
| 39 | // Load the value from the address. | ||
| 40 | const auto expected = monitor.ExclusiveRead32(current_core, address); | ||
| 41 | |||
| 42 | // Orr in the new mask. | ||
| 43 | u32 value = expected | new_orr_mask; | ||
| 44 | |||
| 45 | // If the value is zero, use the if_zero value, otherwise use the newly orr'd value. | ||
| 46 | if (!expected) { | ||
| 47 | value = if_zero; | ||
| 48 | } | ||
| 49 | |||
| 50 | // Try to store. | ||
| 51 | if (!monitor.ExclusiveWrite32(current_core, address, value)) { | ||
| 52 | // If we failed to store, try again. | ||
| 53 | return UpdateLockAtomic(system, out, address, if_zero, new_orr_mask); | ||
| 54 | } | ||
| 55 | |||
| 56 | // We're done. | ||
| 57 | *out = expected; | ||
| 58 | return true; | ||
| 59 | } | ||
| 60 | |||
| 61 | } // namespace | ||
| 62 | |||
| 63 | KConditionVariable::KConditionVariable(Core::System& system_) | ||
| 64 | : system{system_}, kernel{system.Kernel()} {} | ||
| 65 | |||
| 66 | KConditionVariable::~KConditionVariable() = default; | ||
| 67 | |||
| 68 | ResultCode KConditionVariable::SignalToAddress(VAddr addr) { | ||
| 69 | Thread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread(); | ||
| 70 | |||
| 71 | // Signal the address. | ||
| 72 | { | ||
| 73 | KScopedSchedulerLock sl(kernel); | ||
| 74 | |||
| 75 | // Remove waiter thread. | ||
| 76 | s32 num_waiters{}; | ||
| 77 | Thread* next_owner_thread = | ||
| 78 | owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr); | ||
| 79 | |||
| 80 | // Determine the next tag. | ||
| 81 | u32 next_value{}; | ||
| 82 | if (next_owner_thread) { | ||
| 83 | next_value = next_owner_thread->GetAddressKeyValue(); | ||
| 84 | if (num_waiters > 1) { | ||
| 85 | next_value |= Svc::HandleWaitMask; | ||
| 86 | } | ||
| 87 | |||
| 88 | next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS); | ||
| 89 | next_owner_thread->Wakeup(); | ||
| 90 | } | ||
| 91 | |||
| 92 | // Write the value to userspace. | ||
| 93 | if (!WriteToUser(system, addr, std::addressof(next_value))) { | ||
| 94 | if (next_owner_thread) { | ||
| 95 | next_owner_thread->SetSyncedObject(nullptr, Svc::ResultInvalidCurrentMemory); | ||
| 96 | } | ||
| 97 | |||
| 98 | return Svc::ResultInvalidCurrentMemory; | ||
| 99 | } | ||
| 100 | } | ||
| 101 | |||
| 102 | return RESULT_SUCCESS; | ||
| 103 | } | ||
| 104 | |||
| 105 | ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) { | ||
| 106 | Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); | ||
| 107 | |||
| 108 | // Wait for the address. | ||
| 109 | { | ||
| 110 | std::shared_ptr<Thread> owner_thread; | ||
| 111 | ASSERT(!owner_thread); | ||
| 112 | { | ||
| 113 | KScopedSchedulerLock sl(kernel); | ||
| 114 | cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS); | ||
| 115 | |||
| 116 | // Check if the thread should terminate. | ||
| 117 | R_UNLESS(!cur_thread->IsTerminationRequested(), Svc::ResultTerminationRequested); | ||
| 118 | |||
| 119 | { | ||
| 120 | // Read the tag from userspace. | ||
| 121 | u32 test_tag{}; | ||
| 122 | R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), | ||
| 123 | Svc::ResultInvalidCurrentMemory); | ||
| 124 | |||
| 125 | // If the tag isn't the handle (with wait mask), we're done. | ||
| 126 | R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS); | ||
| 127 | |||
| 128 | // Get the lock owner thread. | ||
| 129 | owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<Thread>(handle); | ||
| 130 | R_UNLESS(owner_thread, Svc::ResultInvalidHandle); | ||
| 131 | |||
| 132 | // Update the lock. | ||
| 133 | cur_thread->SetAddressKey(addr, value); | ||
| 134 | owner_thread->AddWaiter(cur_thread); | ||
| 135 | cur_thread->SetState(ThreadState::Waiting); | ||
| 136 | cur_thread->SetMutexWaitAddressForDebugging(addr); | ||
| 137 | } | ||
| 138 | } | ||
| 139 | ASSERT(owner_thread); | ||
| 140 | } | ||
| 141 | |||
| 142 | // Remove the thread as a waiter from the lock owner. | ||
| 143 | { | ||
| 144 | KScopedSchedulerLock sl(kernel); | ||
| 145 | Thread* owner_thread = cur_thread->GetLockOwner(); | ||
| 146 | if (owner_thread != nullptr) { | ||
| 147 | owner_thread->RemoveWaiter(cur_thread); | ||
| 148 | } | ||
| 149 | } | ||
| 150 | |||
| 151 | // Get the wait result. | ||
| 152 | KSynchronizationObject* dummy{}; | ||
| 153 | return cur_thread->GetWaitResult(std::addressof(dummy)); | ||
| 154 | } | ||
| 155 | |||
| 156 | Thread* KConditionVariable::SignalImpl(Thread* thread) { | ||
| 157 | // Check pre-conditions. | ||
| 158 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | ||
| 159 | |||
| 160 | // Update the tag. | ||
| 161 | VAddr address = thread->GetAddressKey(); | ||
| 162 | u32 own_tag = thread->GetAddressKeyValue(); | ||
| 163 | |||
| 164 | u32 prev_tag{}; | ||
| 165 | bool can_access{}; | ||
| 166 | { | ||
| 167 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. | ||
| 168 | // TODO(bunnei): We should call CanAccessAtomic(..) here. | ||
| 169 | can_access = true; | ||
| 170 | if (can_access) { | ||
| 171 | UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag, | ||
| 172 | Svc::HandleWaitMask); | ||
| 173 | } | ||
| 174 | } | ||
| 175 | |||
| 176 | Thread* thread_to_close = nullptr; | ||
| 177 | if (can_access) { | ||
| 178 | if (prev_tag == InvalidHandle) { | ||
| 179 | // If nobody held the lock previously, we're all good. | ||
| 180 | thread->SetSyncedObject(nullptr, RESULT_SUCCESS); | ||
| 181 | thread->Wakeup(); | ||
| 182 | } else { | ||
| 183 | // Get the previous owner. | ||
| 184 | auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<Thread>( | ||
| 185 | prev_tag & ~Svc::HandleWaitMask); | ||
| 186 | |||
| 187 | if (owner_thread) { | ||
| 188 | // Add the thread as a waiter on the owner. | ||
| 189 | owner_thread->AddWaiter(thread); | ||
| 190 | thread_to_close = owner_thread.get(); | ||
| 191 | } else { | ||
| 192 | // The lock was tagged with a thread that doesn't exist. | ||
| 193 | thread->SetSyncedObject(nullptr, Svc::ResultInvalidState); | ||
| 194 | thread->Wakeup(); | ||
| 195 | } | ||
| 196 | } | ||
| 197 | } else { | ||
| 198 | // If the address wasn't accessible, note so. | ||
| 199 | thread->SetSyncedObject(nullptr, Svc::ResultInvalidCurrentMemory); | ||
| 200 | thread->Wakeup(); | ||
| 201 | } | ||
| 202 | |||
| 203 | return thread_to_close; | ||
| 204 | } | ||
| 205 | |||
| 206 | void KConditionVariable::Signal(u64 cv_key, s32 count) { | ||
| 207 | // Prepare for signaling. | ||
| 208 | constexpr int MaxThreads = 16; | ||
| 209 | |||
| 210 | // TODO(bunnei): This should just be Thread once we implement KAutoObject instead of using | ||
| 211 | // std::shared_ptr. | ||
| 212 | std::vector<std::shared_ptr<Thread>> thread_list; | ||
| 213 | std::array<Thread*, MaxThreads> thread_array; | ||
| 214 | s32 num_to_close{}; | ||
| 215 | |||
| 216 | // Perform signaling. | ||
| 217 | s32 num_waiters{}; | ||
| 218 | { | ||
| 219 | KScopedSchedulerLock sl(kernel); | ||
| 220 | |||
| 221 | auto it = thread_tree.nfind_light({cv_key, -1}); | ||
| 222 | while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && | ||
| 223 | (it->GetConditionVariableKey() == cv_key)) { | ||
| 224 | Thread* target_thread = std::addressof(*it); | ||
| 225 | |||
| 226 | if (Thread* thread = SignalImpl(target_thread); thread != nullptr) { | ||
| 227 | if (num_to_close < MaxThreads) { | ||
| 228 | thread_array[num_to_close++] = thread; | ||
| 229 | } else { | ||
| 230 | thread_list.push_back(SharedFrom(thread)); | ||
| 231 | } | ||
| 232 | } | ||
| 233 | |||
| 234 | it = thread_tree.erase(it); | ||
| 235 | target_thread->ClearConditionVariable(); | ||
| 236 | ++num_waiters; | ||
| 237 | } | ||
| 238 | |||
| 239 | // If we have no waiters, clear the has waiter flag. | ||
| 240 | if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) { | ||
| 241 | const u32 has_waiter_flag{}; | ||
| 242 | WriteToUser(system, cv_key, std::addressof(has_waiter_flag)); | ||
| 243 | } | ||
| 244 | } | ||
| 245 | |||
| 246 | // Close threads in the array. | ||
| 247 | for (auto i = 0; i < num_to_close; ++i) { | ||
| 248 | thread_array[i]->Close(); | ||
| 249 | } | ||
| 250 | |||
| 251 | // Close threads in the list. | ||
| 252 | for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) { | ||
| 253 | (*it)->Close(); | ||
| 254 | } | ||
| 255 | } | ||
| 256 | |||
| 257 | ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { | ||
| 258 | // Prepare to wait. | ||
| 259 | Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); | ||
| 260 | Handle timer = InvalidHandle; | ||
| 261 | |||
| 262 | { | ||
| 263 | KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout); | ||
| 264 | |||
| 265 | // Set the synced object. | ||
| 266 | cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut); | ||
| 267 | |||
| 268 | // Check that the thread isn't terminating. | ||
| 269 | if (cur_thread->IsTerminationRequested()) { | ||
| 270 | slp.CancelSleep(); | ||
| 271 | return Svc::ResultTerminationRequested; | ||
| 272 | } | ||
| 273 | |||
| 274 | // Update the value and process for the next owner. | ||
| 275 | { | ||
| 276 | // Remove waiter thread. | ||
| 277 | s32 num_waiters{}; | ||
| 278 | Thread* next_owner_thread = | ||
| 279 | cur_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr); | ||
| 280 | |||
| 281 | // Update for the next owner thread. | ||
| 282 | u32 next_value{}; | ||
| 283 | if (next_owner_thread != nullptr) { | ||
| 284 | // Get the next tag value. | ||
| 285 | next_value = next_owner_thread->GetAddressKeyValue(); | ||
| 286 | if (num_waiters > 1) { | ||
| 287 | next_value |= Svc::HandleWaitMask; | ||
| 288 | } | ||
| 289 | |||
| 290 | // Wake up the next owner. | ||
| 291 | next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS); | ||
| 292 | next_owner_thread->Wakeup(); | ||
| 293 | } | ||
| 294 | |||
| 295 | // Write to the cv key. | ||
| 296 | { | ||
| 297 | const u32 has_waiter_flag = 1; | ||
| 298 | WriteToUser(system, key, std::addressof(has_waiter_flag)); | ||
| 299 | // TODO(bunnei): We should call DataMemoryBarrier(..) here. | ||
| 300 | } | ||
| 301 | |||
| 302 | // Write the value to userspace. | ||
| 303 | if (!WriteToUser(system, addr, std::addressof(next_value))) { | ||
| 304 | slp.CancelSleep(); | ||
| 305 | return Svc::ResultInvalidCurrentMemory; | ||
| 306 | } | ||
| 307 | } | ||
| 308 | |||
| 309 | // Update condition variable tracking. | ||
| 310 | { | ||
| 311 | cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value); | ||
| 312 | thread_tree.insert(*cur_thread); | ||
| 313 | } | ||
| 314 | |||
| 315 | // If the timeout is non-zero, set the thread as waiting. | ||
| 316 | if (timeout != 0) { | ||
| 317 | cur_thread->SetState(ThreadState::Waiting); | ||
| 318 | cur_thread->SetMutexWaitAddressForDebugging(addr); | ||
| 319 | } | ||
| 320 | } | ||
| 321 | |||
| 322 | // Cancel the timer wait. | ||
| 323 | if (timer != InvalidHandle) { | ||
| 324 | auto& time_manager = kernel.TimeManager(); | ||
| 325 | time_manager.UnscheduleTimeEvent(timer); | ||
| 326 | } | ||
| 327 | |||
| 328 | // Remove from the condition variable. | ||
| 329 | { | ||
| 330 | KScopedSchedulerLock sl(kernel); | ||
| 331 | |||
| 332 | if (Thread* owner = cur_thread->GetLockOwner(); owner != nullptr) { | ||
| 333 | owner->RemoveWaiter(cur_thread); | ||
| 334 | } | ||
| 335 | |||
| 336 | if (cur_thread->IsWaitingForConditionVariable()) { | ||
| 337 | thread_tree.erase(thread_tree.iterator_to(*cur_thread)); | ||
| 338 | cur_thread->ClearConditionVariable(); | ||
| 339 | } | ||
| 340 | } | ||
| 341 | |||
| 342 | // Get the result. | ||
| 343 | KSynchronizationObject* dummy{}; | ||
| 344 | return cur_thread->GetWaitResult(std::addressof(dummy)); | ||
| 345 | } | ||
| 346 | |||
| 347 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h new file mode 100644 index 000000000..98ed5b323 --- /dev/null +++ b/src/core/hle/kernel/k_condition_variable.h | |||
| @@ -0,0 +1,59 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include "common/assert.h" | ||
| 8 | #include "common/common_types.h" | ||
| 9 | |||
| 10 | #include "core/hle/kernel/k_scheduler.h" | ||
| 11 | #include "core/hle/kernel/kernel.h" | ||
| 12 | #include "core/hle/kernel/thread.h" | ||
| 13 | #include "core/hle/result.h" | ||
| 14 | |||
| 15 | namespace Core { | ||
| 16 | class System; | ||
| 17 | } | ||
| 18 | |||
| 19 | namespace Kernel { | ||
| 20 | |||
| 21 | class KConditionVariable { | ||
| 22 | public: | ||
| 23 | using ThreadTree = typename Thread::ConditionVariableThreadTreeType; | ||
| 24 | |||
| 25 | explicit KConditionVariable(Core::System& system_); | ||
| 26 | ~KConditionVariable(); | ||
| 27 | |||
| 28 | // Arbitration | ||
| 29 | [[nodiscard]] ResultCode SignalToAddress(VAddr addr); | ||
| 30 | [[nodiscard]] ResultCode WaitForAddress(Handle handle, VAddr addr, u32 value); | ||
| 31 | |||
| 32 | // Condition variable | ||
| 33 | void Signal(u64 cv_key, s32 count); | ||
| 34 | [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout); | ||
| 35 | |||
| 36 | private: | ||
| 37 | [[nodiscard]] Thread* SignalImpl(Thread* thread); | ||
| 38 | |||
| 39 | ThreadTree thread_tree; | ||
| 40 | |||
| 41 | Core::System& system; | ||
| 42 | KernelCore& kernel; | ||
| 43 | }; | ||
| 44 | |||
| 45 | inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree, | ||
| 46 | Thread* thread) { | ||
| 47 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | ||
| 48 | |||
| 49 | tree->erase(tree->iterator_to(*thread)); | ||
| 50 | } | ||
| 51 | |||
| 52 | inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree, | ||
| 53 | Thread* thread) { | ||
| 54 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | ||
| 55 | |||
| 56 | tree->insert(*thread); | ||
| 57 | } | ||
| 58 | |||
| 59 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h index e3391e2af..27124ef67 100644 --- a/src/core/hle/kernel/object.h +++ b/src/core/hle/kernel/object.h | |||
| @@ -50,6 +50,11 @@ public: | |||
| 50 | } | 50 | } |
| 51 | virtual HandleType GetHandleType() const = 0; | 51 | virtual HandleType GetHandleType() const = 0; |
| 52 | 52 | ||
| 53 | void Close() { | ||
| 54 | // TODO(bunnei): This is a placeholder to decrement the reference count, which we will use | ||
| 55 | // when we implement KAutoObject instead of using shared_ptr. | ||
| 56 | } | ||
| 57 | |||
| 53 | /** | 58 | /** |
| 54 | * Check if a thread can wait on the object | 59 | * Check if a thread can wait on the object |
| 55 | * @return True if a thread can wait on the object, otherwise false | 60 | * @return True if a thread can wait on the object, otherwise false |