diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/core/CMakeLists.txt | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/address_arbiter.cpp | 317 | ||||
| -rw-r--r-- | src/core/hle/kernel/address_arbiter.h | 91 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_synchronization_object.cpp | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/mutex.cpp | 170 | ||||
| -rw-r--r-- | src/core/hle/kernel/mutex.h | 42 | ||||
| -rw-r--r-- | src/core/hle/kernel/process.cpp | 48 | ||||
| -rw-r--r-- | src/core/hle/kernel/process.h | 50 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc.cpp | 364 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc_common.h | 1 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc_wrap.h | 38 | ||||
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 227 | ||||
| -rw-r--r-- | src/core/hle/kernel/thread.h | 318 | ||||
| -rw-r--r-- | src/core/hle/kernel/time_manager.cpp | 6 | ||||
| -rw-r--r-- | src/yuzu/debugger/wait_tree.cpp | 10 |
15 files changed, 508 insertions, 1182 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index d29d4573e..1b8ad476e 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -142,8 +142,6 @@ add_library(core STATIC | |||
| 142 | hardware_interrupt_manager.h | 142 | hardware_interrupt_manager.h |
| 143 | hle/ipc.h | 143 | hle/ipc.h |
| 144 | hle/ipc_helpers.h | 144 | hle/ipc_helpers.h |
| 145 | hle/kernel/address_arbiter.cpp | ||
| 146 | hle/kernel/address_arbiter.h | ||
| 147 | hle/kernel/client_port.cpp | 145 | hle/kernel/client_port.cpp |
| 148 | hle/kernel/client_port.h | 146 | hle/kernel/client_port.h |
| 149 | hle/kernel/client_session.cpp | 147 | hle/kernel/client_session.cpp |
| @@ -189,8 +187,6 @@ add_library(core STATIC | |||
| 189 | hle/kernel/memory/slab_heap.h | 187 | hle/kernel/memory/slab_heap.h |
| 190 | hle/kernel/memory/system_control.cpp | 188 | hle/kernel/memory/system_control.cpp |
| 191 | hle/kernel/memory/system_control.h | 189 | hle/kernel/memory/system_control.h |
| 192 | hle/kernel/mutex.cpp | ||
| 193 | hle/kernel/mutex.h | ||
| 194 | hle/kernel/object.cpp | 190 | hle/kernel/object.cpp |
| 195 | hle/kernel/object.h | 191 | hle/kernel/object.h |
| 196 | hle/kernel/physical_core.cpp | 192 | hle/kernel/physical_core.cpp |
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp deleted file mode 100644 index fe8675186..000000000 --- a/src/core/hle/kernel/address_arbiter.cpp +++ /dev/null | |||
| @@ -1,317 +0,0 @@ | |||
| 1 | // Copyright 2018 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <algorithm> | ||
| 6 | #include <vector> | ||
| 7 | |||
| 8 | #include "common/assert.h" | ||
| 9 | #include "common/common_types.h" | ||
| 10 | #include "core/arm/exclusive_monitor.h" | ||
| 11 | #include "core/core.h" | ||
| 12 | #include "core/hle/kernel/address_arbiter.h" | ||
| 13 | #include "core/hle/kernel/errors.h" | ||
| 14 | #include "core/hle/kernel/handle_table.h" | ||
| 15 | #include "core/hle/kernel/k_scheduler.h" | ||
| 16 | #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" | ||
| 17 | #include "core/hle/kernel/kernel.h" | ||
| 18 | #include "core/hle/kernel/thread.h" | ||
| 19 | #include "core/hle/kernel/time_manager.h" | ||
| 20 | #include "core/hle/result.h" | ||
| 21 | #include "core/memory.h" | ||
| 22 | |||
| 23 | namespace Kernel { | ||
| 24 | |||
| 25 | // Wake up num_to_wake (or all) threads in a vector. | ||
| 26 | void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, | ||
| 27 | s32 num_to_wake) { | ||
| 28 | // Only process up to 'target' threads, unless 'target' is <= 0, in which case process | ||
| 29 | // them all. | ||
| 30 | std::size_t last = waiting_threads.size(); | ||
| 31 | if (num_to_wake > 0) { | ||
| 32 | last = std::min(last, static_cast<std::size_t>(num_to_wake)); | ||
| 33 | } | ||
| 34 | |||
| 35 | // Signal the waiting threads. | ||
| 36 | for (std::size_t i = 0; i < last; i++) { | ||
| 37 | waiting_threads[i]->SetSynchronizationResults(nullptr, RESULT_SUCCESS); | ||
| 38 | RemoveThread(waiting_threads[i]); | ||
| 39 | waiting_threads[i]->WaitForArbitration(false); | ||
| 40 | waiting_threads[i]->Wakeup(); | ||
| 41 | } | ||
| 42 | } | ||
| 43 | |||
| 44 | AddressArbiter::AddressArbiter(Core::System& system) : system{system} {} | ||
| 45 | AddressArbiter::~AddressArbiter() = default; | ||
| 46 | |||
| 47 | ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 value, | ||
| 48 | s32 num_to_wake) { | ||
| 49 | switch (type) { | ||
| 50 | case SignalType::Signal: | ||
| 51 | return SignalToAddressOnly(address, num_to_wake); | ||
| 52 | case SignalType::IncrementAndSignalIfEqual: | ||
| 53 | return IncrementAndSignalToAddressIfEqual(address, value, num_to_wake); | ||
| 54 | case SignalType::ModifyByWaitingCountAndSignalIfEqual: | ||
| 55 | return ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, num_to_wake); | ||
| 56 | default: | ||
| 57 | return ERR_INVALID_ENUM_VALUE; | ||
| 58 | } | ||
| 59 | } | ||
| 60 | |||
| 61 | ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) { | ||
| 62 | KScopedSchedulerLock lock(system.Kernel()); | ||
| 63 | const std::vector<std::shared_ptr<Thread>> waiting_threads = | ||
| 64 | GetThreadsWaitingOnAddress(address); | ||
| 65 | WakeThreads(waiting_threads, num_to_wake); | ||
| 66 | return RESULT_SUCCESS; | ||
| 67 | } | ||
| 68 | |||
| 69 | ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, | ||
| 70 | s32 num_to_wake) { | ||
| 71 | KScopedSchedulerLock lock(system.Kernel()); | ||
| 72 | auto& memory = system.Memory(); | ||
| 73 | |||
| 74 | // Ensure that we can write to the address. | ||
| 75 | if (!memory.IsValidVirtualAddress(address)) { | ||
| 76 | return ERR_INVALID_ADDRESS_STATE; | ||
| 77 | } | ||
| 78 | |||
| 79 | const std::size_t current_core = system.CurrentCoreIndex(); | ||
| 80 | auto& monitor = system.Monitor(); | ||
| 81 | u32 current_value; | ||
| 82 | do { | ||
| 83 | current_value = monitor.ExclusiveRead32(current_core, address); | ||
| 84 | |||
| 85 | if (current_value != static_cast<u32>(value)) { | ||
| 86 | return ERR_INVALID_STATE; | ||
| 87 | } | ||
| 88 | current_value++; | ||
| 89 | } while (!monitor.ExclusiveWrite32(current_core, address, current_value)); | ||
| 90 | |||
| 91 | return SignalToAddressOnly(address, num_to_wake); | ||
| 92 | } | ||
| 93 | |||
| 94 | ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, | ||
| 95 | s32 num_to_wake) { | ||
| 96 | KScopedSchedulerLock lock(system.Kernel()); | ||
| 97 | auto& memory = system.Memory(); | ||
| 98 | |||
| 99 | // Ensure that we can write to the address. | ||
| 100 | if (!memory.IsValidVirtualAddress(address)) { | ||
| 101 | return ERR_INVALID_ADDRESS_STATE; | ||
| 102 | } | ||
| 103 | |||
| 104 | // Get threads waiting on the address. | ||
| 105 | const std::vector<std::shared_ptr<Thread>> waiting_threads = | ||
| 106 | GetThreadsWaitingOnAddress(address); | ||
| 107 | |||
| 108 | const std::size_t current_core = system.CurrentCoreIndex(); | ||
| 109 | auto& monitor = system.Monitor(); | ||
| 110 | s32 updated_value; | ||
| 111 | do { | ||
| 112 | updated_value = monitor.ExclusiveRead32(current_core, address); | ||
| 113 | |||
| 114 | if (updated_value != value) { | ||
| 115 | return ERR_INVALID_STATE; | ||
| 116 | } | ||
| 117 | // Determine the modified value depending on the waiting count. | ||
| 118 | if (num_to_wake <= 0) { | ||
| 119 | if (waiting_threads.empty()) { | ||
| 120 | updated_value = value + 1; | ||
| 121 | } else { | ||
| 122 | updated_value = value - 1; | ||
| 123 | } | ||
| 124 | } else { | ||
| 125 | if (waiting_threads.empty()) { | ||
| 126 | updated_value = value + 1; | ||
| 127 | } else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) { | ||
| 128 | updated_value = value - 1; | ||
| 129 | } else { | ||
| 130 | updated_value = value; | ||
| 131 | } | ||
| 132 | } | ||
| 133 | } while (!monitor.ExclusiveWrite32(current_core, address, updated_value)); | ||
| 134 | |||
| 135 | WakeThreads(waiting_threads, num_to_wake); | ||
| 136 | return RESULT_SUCCESS; | ||
| 137 | } | ||
| 138 | |||
| 139 | ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s32 value, | ||
| 140 | s64 timeout_ns) { | ||
| 141 | switch (type) { | ||
| 142 | case ArbitrationType::WaitIfLessThan: | ||
| 143 | return WaitForAddressIfLessThan(address, value, timeout_ns, false); | ||
| 144 | case ArbitrationType::DecrementAndWaitIfLessThan: | ||
| 145 | return WaitForAddressIfLessThan(address, value, timeout_ns, true); | ||
| 146 | case ArbitrationType::WaitIfEqual: | ||
| 147 | return WaitForAddressIfEqual(address, value, timeout_ns); | ||
| 148 | default: | ||
| 149 | return ERR_INVALID_ENUM_VALUE; | ||
| 150 | } | ||
| 151 | } | ||
| 152 | |||
| 153 | ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, | ||
| 154 | bool should_decrement) { | ||
| 155 | auto& memory = system.Memory(); | ||
| 156 | auto& kernel = system.Kernel(); | ||
| 157 | Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread(); | ||
| 158 | |||
| 159 | Handle event_handle = InvalidHandle; | ||
| 160 | { | ||
| 161 | KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); | ||
| 162 | |||
| 163 | if (current_thread->IsTerminationRequested()) { | ||
| 164 | lock.CancelSleep(); | ||
| 165 | return ERR_THREAD_TERMINATING; | ||
| 166 | } | ||
| 167 | |||
| 168 | // Ensure that we can read the address. | ||
| 169 | if (!memory.IsValidVirtualAddress(address)) { | ||
| 170 | lock.CancelSleep(); | ||
| 171 | return ERR_INVALID_ADDRESS_STATE; | ||
| 172 | } | ||
| 173 | |||
| 174 | s32 current_value = static_cast<s32>(memory.Read32(address)); | ||
| 175 | if (current_value >= value) { | ||
| 176 | lock.CancelSleep(); | ||
| 177 | return ERR_INVALID_STATE; | ||
| 178 | } | ||
| 179 | |||
| 180 | current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); | ||
| 181 | |||
| 182 | s32 decrement_value; | ||
| 183 | |||
| 184 | const std::size_t current_core = system.CurrentCoreIndex(); | ||
| 185 | auto& monitor = system.Monitor(); | ||
| 186 | do { | ||
| 187 | current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address)); | ||
| 188 | if (should_decrement) { | ||
| 189 | decrement_value = current_value - 1; | ||
| 190 | } else { | ||
| 191 | decrement_value = current_value; | ||
| 192 | } | ||
| 193 | } while ( | ||
| 194 | !monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value))); | ||
| 195 | |||
| 196 | // Short-circuit without rescheduling, if timeout is zero. | ||
| 197 | if (timeout == 0) { | ||
| 198 | lock.CancelSleep(); | ||
| 199 | return RESULT_TIMEOUT; | ||
| 200 | } | ||
| 201 | |||
| 202 | current_thread->SetArbiterWaitAddress(address); | ||
| 203 | InsertThread(SharedFrom(current_thread)); | ||
| 204 | current_thread->SetState(ThreadState::Waiting); | ||
| 205 | current_thread->WaitForArbitration(true); | ||
| 206 | } | ||
| 207 | |||
| 208 | if (event_handle != InvalidHandle) { | ||
| 209 | auto& time_manager = kernel.TimeManager(); | ||
| 210 | time_manager.UnscheduleTimeEvent(event_handle); | ||
| 211 | } | ||
| 212 | |||
| 213 | { | ||
| 214 | KScopedSchedulerLock lock(kernel); | ||
| 215 | if (current_thread->IsWaitingForArbitration()) { | ||
| 216 | RemoveThread(SharedFrom(current_thread)); | ||
| 217 | current_thread->WaitForArbitration(false); | ||
| 218 | } | ||
| 219 | } | ||
| 220 | |||
| 221 | return current_thread->GetSignalingResult(); | ||
| 222 | } | ||
| 223 | |||
| 224 | ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { | ||
| 225 | auto& memory = system.Memory(); | ||
| 226 | auto& kernel = system.Kernel(); | ||
| 227 | Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread(); | ||
| 228 | |||
| 229 | Handle event_handle = InvalidHandle; | ||
| 230 | { | ||
| 231 | KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); | ||
| 232 | |||
| 233 | if (current_thread->IsTerminationRequested()) { | ||
| 234 | lock.CancelSleep(); | ||
| 235 | return ERR_THREAD_TERMINATING; | ||
| 236 | } | ||
| 237 | |||
| 238 | // Ensure that we can read the address. | ||
| 239 | if (!memory.IsValidVirtualAddress(address)) { | ||
| 240 | lock.CancelSleep(); | ||
| 241 | return ERR_INVALID_ADDRESS_STATE; | ||
| 242 | } | ||
| 243 | |||
| 244 | s32 current_value = static_cast<s32>(memory.Read32(address)); | ||
| 245 | if (current_value != value) { | ||
| 246 | lock.CancelSleep(); | ||
| 247 | return ERR_INVALID_STATE; | ||
| 248 | } | ||
| 249 | |||
| 250 | // Short-circuit without rescheduling, if timeout is zero. | ||
| 251 | if (timeout == 0) { | ||
| 252 | lock.CancelSleep(); | ||
| 253 | return RESULT_TIMEOUT; | ||
| 254 | } | ||
| 255 | |||
| 256 | current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); | ||
| 257 | current_thread->SetArbiterWaitAddress(address); | ||
| 258 | InsertThread(SharedFrom(current_thread)); | ||
| 259 | current_thread->SetState(ThreadState::Waiting); | ||
| 260 | current_thread->WaitForArbitration(true); | ||
| 261 | } | ||
| 262 | |||
| 263 | if (event_handle != InvalidHandle) { | ||
| 264 | auto& time_manager = kernel.TimeManager(); | ||
| 265 | time_manager.UnscheduleTimeEvent(event_handle); | ||
| 266 | } | ||
| 267 | |||
| 268 | { | ||
| 269 | KScopedSchedulerLock lock(kernel); | ||
| 270 | if (current_thread->IsWaitingForArbitration()) { | ||
| 271 | RemoveThread(SharedFrom(current_thread)); | ||
| 272 | current_thread->WaitForArbitration(false); | ||
| 273 | } | ||
| 274 | } | ||
| 275 | |||
| 276 | return current_thread->GetSignalingResult(); | ||
| 277 | } | ||
| 278 | |||
| 279 | void AddressArbiter::InsertThread(std::shared_ptr<Thread> thread) { | ||
| 280 | const VAddr arb_addr = thread->GetArbiterWaitAddress(); | ||
| 281 | std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr]; | ||
| 282 | |||
| 283 | const auto iter = | ||
| 284 | std::find_if(thread_list.cbegin(), thread_list.cend(), [&thread](const auto& entry) { | ||
| 285 | return entry->GetPriority() >= thread->GetPriority(); | ||
| 286 | }); | ||
| 287 | |||
| 288 | if (iter == thread_list.cend()) { | ||
| 289 | thread_list.push_back(std::move(thread)); | ||
| 290 | } else { | ||
| 291 | thread_list.insert(iter, std::move(thread)); | ||
| 292 | } | ||
| 293 | } | ||
| 294 | |||
| 295 | void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) { | ||
| 296 | const VAddr arb_addr = thread->GetArbiterWaitAddress(); | ||
| 297 | std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr]; | ||
| 298 | |||
| 299 | const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(), | ||
| 300 | [&thread](const auto& entry) { return thread == entry; }); | ||
| 301 | |||
| 302 | if (iter != thread_list.cend()) { | ||
| 303 | thread_list.erase(iter); | ||
| 304 | } | ||
| 305 | } | ||
| 306 | |||
| 307 | std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress( | ||
| 308 | VAddr address) const { | ||
| 309 | const auto iter = arb_threads.find(address); | ||
| 310 | if (iter == arb_threads.cend()) { | ||
| 311 | return {}; | ||
| 312 | } | ||
| 313 | |||
| 314 | const std::list<std::shared_ptr<Thread>>& thread_list = iter->second; | ||
| 315 | return {thread_list.cbegin(), thread_list.cend()}; | ||
| 316 | } | ||
| 317 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/address_arbiter.h b/src/core/hle/kernel/address_arbiter.h deleted file mode 100644 index b91edc67d..000000000 --- a/src/core/hle/kernel/address_arbiter.h +++ /dev/null | |||
| @@ -1,91 +0,0 @@ | |||
| 1 | // Copyright 2018 yuzu emulator team | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <list> | ||
| 8 | #include <memory> | ||
| 9 | #include <unordered_map> | ||
| 10 | #include <vector> | ||
| 11 | |||
| 12 | #include "common/common_types.h" | ||
| 13 | |||
| 14 | union ResultCode; | ||
| 15 | |||
| 16 | namespace Core { | ||
| 17 | class System; | ||
| 18 | } | ||
| 19 | |||
| 20 | namespace Kernel { | ||
| 21 | |||
| 22 | class Thread; | ||
| 23 | |||
| 24 | class AddressArbiter { | ||
| 25 | public: | ||
| 26 | enum class ArbitrationType { | ||
| 27 | WaitIfLessThan = 0, | ||
| 28 | DecrementAndWaitIfLessThan = 1, | ||
| 29 | WaitIfEqual = 2, | ||
| 30 | }; | ||
| 31 | |||
| 32 | enum class SignalType { | ||
| 33 | Signal = 0, | ||
| 34 | IncrementAndSignalIfEqual = 1, | ||
| 35 | ModifyByWaitingCountAndSignalIfEqual = 2, | ||
| 36 | }; | ||
| 37 | |||
| 38 | explicit AddressArbiter(Core::System& system); | ||
| 39 | ~AddressArbiter(); | ||
| 40 | |||
| 41 | AddressArbiter(const AddressArbiter&) = delete; | ||
| 42 | AddressArbiter& operator=(const AddressArbiter&) = delete; | ||
| 43 | |||
| 44 | AddressArbiter(AddressArbiter&&) = default; | ||
| 45 | AddressArbiter& operator=(AddressArbiter&&) = delete; | ||
| 46 | |||
| 47 | /// Signals an address being waited on with a particular signaling type. | ||
| 48 | ResultCode SignalToAddress(VAddr address, SignalType type, s32 value, s32 num_to_wake); | ||
| 49 | |||
| 50 | /// Waits on an address with a particular arbitration type. | ||
| 51 | ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns); | ||
| 52 | |||
| 53 | private: | ||
| 54 | /// Signals an address being waited on. | ||
| 55 | ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake); | ||
| 56 | |||
| 57 | /// Signals an address being waited on and increments its value if equal to the value argument. | ||
| 58 | ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake); | ||
| 59 | |||
| 60 | /// Signals an address being waited on and modifies its value based on waiting thread count if | ||
| 61 | /// equal to the value argument. | ||
| 62 | ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, | ||
| 63 | s32 num_to_wake); | ||
| 64 | |||
| 65 | /// Waits on an address if the value passed is less than the argument value, | ||
| 66 | /// optionally decrementing. | ||
| 67 | ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, | ||
| 68 | bool should_decrement); | ||
| 69 | |||
| 70 | /// Waits on an address if the value passed is equal to the argument value. | ||
| 71 | ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout); | ||
| 72 | |||
| 73 | /// Wake up num_to_wake (or all) threads in a vector. | ||
| 74 | void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake); | ||
| 75 | |||
| 76 | /// Insert a thread into the address arbiter container | ||
| 77 | void InsertThread(std::shared_ptr<Thread> thread); | ||
| 78 | |||
| 79 | /// Removes a thread from the address arbiter container | ||
| 80 | void RemoveThread(std::shared_ptr<Thread> thread); | ||
| 81 | |||
| 82 | // Gets the threads waiting on an address. | ||
| 83 | std::vector<std::shared_ptr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const; | ||
| 84 | |||
| 85 | /// List of threads waiting for a address arbiter | ||
| 86 | std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> arb_threads; | ||
| 87 | |||
| 88 | Core::System& system; | ||
| 89 | }; | ||
| 90 | |||
| 91 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp index 64c566caa..11b989ecd 100644 --- a/src/core/hle/kernel/k_synchronization_object.cpp +++ b/src/core/hle/kernel/k_synchronization_object.cpp | |||
| @@ -72,7 +72,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, | |||
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | // For debugging only | 74 | // For debugging only |
| 75 | thread->SetWaitObjectsForDebugging(objects, num_objects); | 75 | thread->SetWaitObjectsForDebugging({objects, static_cast<std::size_t>(num_objects)}); |
| 76 | 76 | ||
| 77 | // Mark the thread as waiting. | 77 | // Mark the thread as waiting. |
| 78 | thread->SetCancellable(); | 78 | thread->SetCancellable(); |
| @@ -86,7 +86,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index, | |||
| 86 | thread->ClearCancellable(); | 86 | thread->ClearCancellable(); |
| 87 | 87 | ||
| 88 | // For debugging only | 88 | // For debugging only |
| 89 | thread->SetWaitObjectsForDebugging(nullptr, 0); | 89 | thread->SetWaitObjectsForDebugging({}); |
| 90 | 90 | ||
| 91 | // Cancel the timer as needed. | 91 | // Cancel the timer as needed. |
| 92 | if (timer != InvalidHandle) { | 92 | if (timer != InvalidHandle) { |
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp deleted file mode 100644 index 8a0faacf8..000000000 --- a/src/core/hle/kernel/mutex.cpp +++ /dev/null | |||
| @@ -1,170 +0,0 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <memory> | ||
| 6 | #include <utility> | ||
| 7 | #include <vector> | ||
| 8 | |||
| 9 | #include "common/assert.h" | ||
| 10 | #include "common/logging/log.h" | ||
| 11 | #include "core/core.h" | ||
| 12 | #include "core/hle/kernel/errors.h" | ||
| 13 | #include "core/hle/kernel/handle_table.h" | ||
| 14 | #include "core/hle/kernel/k_scheduler.h" | ||
| 15 | #include "core/hle/kernel/kernel.h" | ||
| 16 | #include "core/hle/kernel/mutex.h" | ||
| 17 | #include "core/hle/kernel/object.h" | ||
| 18 | #include "core/hle/kernel/process.h" | ||
| 19 | #include "core/hle/kernel/thread.h" | ||
| 20 | #include "core/hle/result.h" | ||
| 21 | #include "core/memory.h" | ||
| 22 | |||
| 23 | namespace Kernel { | ||
| 24 | |||
| 25 | /// Returns the number of threads that are waiting for a mutex, and the highest priority one among | ||
| 26 | /// those. | ||
| 27 | static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThread( | ||
| 28 | const std::shared_ptr<Thread>& current_thread, VAddr mutex_addr) { | ||
| 29 | |||
| 30 | std::shared_ptr<Thread> highest_priority_thread; | ||
| 31 | u32 num_waiters = 0; | ||
| 32 | |||
| 33 | for (const auto& thread : current_thread->GetMutexWaitingThreads()) { | ||
| 34 | if (thread->GetMutexWaitAddress() != mutex_addr) | ||
| 35 | continue; | ||
| 36 | |||
| 37 | ++num_waiters; | ||
| 38 | if (highest_priority_thread == nullptr || | ||
| 39 | thread->GetPriority() < highest_priority_thread->GetPriority()) { | ||
| 40 | highest_priority_thread = thread; | ||
| 41 | } | ||
| 42 | } | ||
| 43 | |||
| 44 | return {highest_priority_thread, num_waiters}; | ||
| 45 | } | ||
| 46 | |||
| 47 | /// Update the mutex owner field of all threads waiting on the mutex to point to the new owner. | ||
| 48 | static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr<Thread> current_thread, | ||
| 49 | std::shared_ptr<Thread> new_owner) { | ||
| 50 | current_thread->RemoveMutexWaiter(new_owner); | ||
| 51 | const auto threads = current_thread->GetMutexWaitingThreads(); | ||
| 52 | for (const auto& thread : threads) { | ||
| 53 | if (thread->GetMutexWaitAddress() != mutex_addr) | ||
| 54 | continue; | ||
| 55 | |||
| 56 | ASSERT(thread->GetLockOwner() == current_thread.get()); | ||
| 57 | current_thread->RemoveMutexWaiter(thread); | ||
| 58 | if (new_owner != thread) | ||
| 59 | new_owner->AddMutexWaiter(thread); | ||
| 60 | } | ||
| 61 | } | ||
| 62 | |||
| 63 | Mutex::Mutex(Core::System& system) : system{system} {} | ||
| 64 | Mutex::~Mutex() = default; | ||
| 65 | |||
| 66 | ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle, | ||
| 67 | Handle requesting_thread_handle) { | ||
| 68 | // The mutex address must be 4-byte aligned | ||
| 69 | if ((address % sizeof(u32)) != 0) { | ||
| 70 | LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address); | ||
| 71 | return ERR_INVALID_ADDRESS; | ||
| 72 | } | ||
| 73 | |||
| 74 | auto& kernel = system.Kernel(); | ||
| 75 | std::shared_ptr<Thread> current_thread = | ||
| 76 | SharedFrom(kernel.CurrentScheduler()->GetCurrentThread()); | ||
| 77 | { | ||
| 78 | KScopedSchedulerLock lock(kernel); | ||
| 79 | // The mutex address must be 4-byte aligned | ||
| 80 | if ((address % sizeof(u32)) != 0) { | ||
| 81 | return ERR_INVALID_ADDRESS; | ||
| 82 | } | ||
| 83 | |||
| 84 | const auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); | ||
| 85 | std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle); | ||
| 86 | std::shared_ptr<Thread> requesting_thread = | ||
| 87 | handle_table.Get<Thread>(requesting_thread_handle); | ||
| 88 | |||
| 89 | // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of | ||
| 90 | // another thread. | ||
| 91 | ASSERT(requesting_thread == current_thread); | ||
| 92 | |||
| 93 | current_thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS); | ||
| 94 | |||
| 95 | const u32 addr_value = system.Memory().Read32(address); | ||
| 96 | |||
| 97 | // If the mutex isn't being held, just return success. | ||
| 98 | if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) { | ||
| 99 | return RESULT_SUCCESS; | ||
| 100 | } | ||
| 101 | |||
| 102 | if (holding_thread == nullptr) { | ||
| 103 | return ERR_INVALID_HANDLE; | ||
| 104 | } | ||
| 105 | |||
| 106 | // Wait until the mutex is released | ||
| 107 | current_thread->SetMutexWaitAddress(address); | ||
| 108 | current_thread->SetWaitHandle(requesting_thread_handle); | ||
| 109 | |||
| 110 | current_thread->SetState(ThreadState::Waiting); | ||
| 111 | |||
| 112 | // Update the lock holder thread's priority to prevent priority inversion. | ||
| 113 | holding_thread->AddMutexWaiter(current_thread); | ||
| 114 | } | ||
| 115 | |||
| 116 | { | ||
| 117 | KScopedSchedulerLock lock(kernel); | ||
| 118 | auto* owner = current_thread->GetLockOwner(); | ||
| 119 | if (owner != nullptr) { | ||
| 120 | owner->RemoveMutexWaiter(current_thread); | ||
| 121 | } | ||
| 122 | } | ||
| 123 | return current_thread->GetSignalingResult(); | ||
| 124 | } | ||
| 125 | |||
| 126 | std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thread> owner, | ||
| 127 | VAddr address) { | ||
| 128 | // The mutex address must be 4-byte aligned | ||
| 129 | if ((address % sizeof(u32)) != 0) { | ||
| 130 | LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address); | ||
| 131 | return {ERR_INVALID_ADDRESS, nullptr}; | ||
| 132 | } | ||
| 133 | |||
| 134 | auto [new_owner, num_waiters] = GetHighestPriorityMutexWaitingThread(owner, address); | ||
| 135 | if (new_owner == nullptr) { | ||
| 136 | system.Memory().Write32(address, 0); | ||
| 137 | return {RESULT_SUCCESS, nullptr}; | ||
| 138 | } | ||
| 139 | // Transfer the ownership of the mutex from the previous owner to the new one. | ||
| 140 | TransferMutexOwnership(address, owner, new_owner); | ||
| 141 | u32 mutex_value = new_owner->GetWaitHandle(); | ||
| 142 | if (num_waiters >= 2) { | ||
| 143 | // Notify the guest that there are still some threads waiting for the mutex | ||
| 144 | mutex_value |= Mutex::MutexHasWaitersFlag; | ||
| 145 | } | ||
| 146 | new_owner->SetSynchronizationResults(nullptr, RESULT_SUCCESS); | ||
| 147 | new_owner->SetLockOwner(nullptr); | ||
| 148 | new_owner->Wakeup(); | ||
| 149 | |||
| 150 | system.Memory().Write32(address, mutex_value); | ||
| 151 | return {RESULT_SUCCESS, new_owner}; | ||
| 152 | } | ||
| 153 | |||
| 154 | ResultCode Mutex::Release(VAddr address) { | ||
| 155 | auto& kernel = system.Kernel(); | ||
| 156 | KScopedSchedulerLock lock(kernel); | ||
| 157 | |||
| 158 | std::shared_ptr<Thread> current_thread = | ||
| 159 | SharedFrom(kernel.CurrentScheduler()->GetCurrentThread()); | ||
| 160 | |||
| 161 | auto [result, new_owner] = Unlock(current_thread, address); | ||
| 162 | |||
| 163 | if (result != RESULT_SUCCESS && new_owner != nullptr) { | ||
| 164 | new_owner->SetSynchronizationResults(nullptr, result); | ||
| 165 | } | ||
| 166 | |||
| 167 | return result; | ||
| 168 | } | ||
| 169 | |||
| 170 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/mutex.h b/src/core/hle/kernel/mutex.h deleted file mode 100644 index 3b81dc3df..000000000 --- a/src/core/hle/kernel/mutex.h +++ /dev/null | |||
| @@ -1,42 +0,0 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include "common/common_types.h" | ||
| 8 | |||
| 9 | union ResultCode; | ||
| 10 | |||
| 11 | namespace Core { | ||
| 12 | class System; | ||
| 13 | } | ||
| 14 | |||
| 15 | namespace Kernel { | ||
| 16 | |||
| 17 | class Mutex final { | ||
| 18 | public: | ||
| 19 | explicit Mutex(Core::System& system); | ||
| 20 | ~Mutex(); | ||
| 21 | |||
| 22 | /// Flag that indicates that a mutex still has threads waiting for it. | ||
| 23 | static constexpr u32 MutexHasWaitersFlag = 0x40000000; | ||
| 24 | /// Mask of the bits in a mutex address value that contain the mutex owner. | ||
| 25 | static constexpr u32 MutexOwnerMask = 0xBFFFFFFF; | ||
| 26 | |||
| 27 | /// Attempts to acquire a mutex at the specified address. | ||
| 28 | ResultCode TryAcquire(VAddr address, Handle holding_thread_handle, | ||
| 29 | Handle requesting_thread_handle); | ||
| 30 | |||
| 31 | /// Unlocks a mutex for owner at address | ||
| 32 | std::pair<ResultCode, std::shared_ptr<Thread>> Unlock(std::shared_ptr<Thread> owner, | ||
| 33 | VAddr address); | ||
| 34 | |||
| 35 | /// Releases the mutex at the specified address. | ||
| 36 | ResultCode Release(VAddr address); | ||
| 37 | |||
| 38 | private: | ||
| 39 | Core::System& system; | ||
| 40 | }; | ||
| 41 | |||
| 42 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index a306c7c73..37b77fa6e 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp | |||
| @@ -162,48 +162,6 @@ u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { | |||
| 162 | return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); | 162 | return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); |
| 163 | } | 163 | } |
| 164 | 164 | ||
| 165 | void Process::InsertConditionVariableThread(std::shared_ptr<Thread> thread) { | ||
| 166 | VAddr cond_var_addr = thread->GetCondVarWaitAddress(); | ||
| 167 | std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr]; | ||
| 168 | auto it = thread_list.begin(); | ||
| 169 | while (it != thread_list.end()) { | ||
| 170 | const std::shared_ptr<Thread> current_thread = *it; | ||
| 171 | if (current_thread->GetPriority() > thread->GetPriority()) { | ||
| 172 | thread_list.insert(it, thread); | ||
| 173 | return; | ||
| 174 | } | ||
| 175 | ++it; | ||
| 176 | } | ||
| 177 | thread_list.push_back(thread); | ||
| 178 | } | ||
| 179 | |||
| 180 | void Process::RemoveConditionVariableThread(std::shared_ptr<Thread> thread) { | ||
| 181 | VAddr cond_var_addr = thread->GetCondVarWaitAddress(); | ||
| 182 | std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr]; | ||
| 183 | auto it = thread_list.begin(); | ||
| 184 | while (it != thread_list.end()) { | ||
| 185 | const std::shared_ptr<Thread> current_thread = *it; | ||
| 186 | if (current_thread.get() == thread.get()) { | ||
| 187 | thread_list.erase(it); | ||
| 188 | return; | ||
| 189 | } | ||
| 190 | ++it; | ||
| 191 | } | ||
| 192 | } | ||
| 193 | |||
| 194 | std::vector<std::shared_ptr<Thread>> Process::GetConditionVariableThreads( | ||
| 195 | const VAddr cond_var_addr) { | ||
| 196 | std::vector<std::shared_ptr<Thread>> result{}; | ||
| 197 | std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr]; | ||
| 198 | auto it = thread_list.begin(); | ||
| 199 | while (it != thread_list.end()) { | ||
| 200 | std::shared_ptr<Thread> current_thread = *it; | ||
| 201 | result.push_back(current_thread); | ||
| 202 | ++it; | ||
| 203 | } | ||
| 204 | return result; | ||
| 205 | } | ||
| 206 | |||
| 207 | void Process::RegisterThread(const Thread* thread) { | 165 | void Process::RegisterThread(const Thread* thread) { |
| 208 | thread_list.push_back(thread); | 166 | thread_list.push_back(thread); |
| 209 | } | 167 | } |
| @@ -412,9 +370,9 @@ bool Process::IsSignaled() const { | |||
| 412 | } | 370 | } |
| 413 | 371 | ||
| 414 | Process::Process(Core::System& system) | 372 | Process::Process(Core::System& system) |
| 415 | : KSynchronizationObject{system.Kernel()}, page_table{std::make_unique<Memory::PageTable>( | 373 | : KSynchronizationObject{system.Kernel()}, |
| 416 | system)}, | 374 | page_table{std::make_unique<Memory::PageTable>(system)}, handle_table{system.Kernel()}, |
| 417 | handle_table{system.Kernel()}, address_arbiter{system}, mutex{system}, system{system} {} | 375 | address_arbiter{system}, condition_var{system}, system{system} {} |
| 418 | 376 | ||
| 419 | Process::~Process() = default; | 377 | Process::~Process() = default; |
| 420 | 378 | ||
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index 901f1ff27..564e1f27d 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h | |||
| @@ -11,10 +11,10 @@ | |||
| 11 | #include <unordered_map> | 11 | #include <unordered_map> |
| 12 | #include <vector> | 12 | #include <vector> |
| 13 | #include "common/common_types.h" | 13 | #include "common/common_types.h" |
| 14 | #include "core/hle/kernel/address_arbiter.h" | ||
| 15 | #include "core/hle/kernel/handle_table.h" | 14 | #include "core/hle/kernel/handle_table.h" |
| 15 | #include "core/hle/kernel/k_address_arbiter.h" | ||
| 16 | #include "core/hle/kernel/k_condition_variable.h" | ||
| 16 | #include "core/hle/kernel/k_synchronization_object.h" | 17 | #include "core/hle/kernel/k_synchronization_object.h" |
| 17 | #include "core/hle/kernel/mutex.h" | ||
| 18 | #include "core/hle/kernel/process_capability.h" | 18 | #include "core/hle/kernel/process_capability.h" |
| 19 | #include "core/hle/result.h" | 19 | #include "core/hle/result.h" |
| 20 | 20 | ||
| @@ -123,24 +123,30 @@ public: | |||
| 123 | return handle_table; | 123 | return handle_table; |
| 124 | } | 124 | } |
| 125 | 125 | ||
| 126 | /// Gets a reference to the process' address arbiter. | 126 | ResultCode SignalToAddress(VAddr address) { |
| 127 | AddressArbiter& GetAddressArbiter() { | 127 | return condition_var.SignalToAddress(address); |
| 128 | return address_arbiter; | ||
| 129 | } | 128 | } |
| 130 | 129 | ||
| 131 | /// Gets a const reference to the process' address arbiter. | 130 | ResultCode WaitForAddress(Handle handle, VAddr address, u32 tag) { |
| 132 | const AddressArbiter& GetAddressArbiter() const { | 131 | return condition_var.WaitForAddress(handle, address, tag); |
| 133 | return address_arbiter; | ||
| 134 | } | 132 | } |
| 135 | 133 | ||
| 136 | /// Gets a reference to the process' mutex lock. | 134 | void SignalConditionVariable(u64 cv_key, int32_t count) { |
| 137 | Mutex& GetMutex() { | 135 | return condition_var.Signal(cv_key, count); |
| 138 | return mutex; | ||
| 139 | } | 136 | } |
| 140 | 137 | ||
| 141 | /// Gets a const reference to the process' mutex lock | 138 | ResultCode WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) { |
| 142 | const Mutex& GetMutex() const { | 139 | return condition_var.Wait(address, cv_key, tag, ns); |
| 143 | return mutex; | 140 | } |
| 141 | |||
| 142 | ResultCode SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, | ||
| 143 | s32 count) { | ||
| 144 | return address_arbiter.SignalToAddress(address, signal_type, value, count); | ||
| 145 | } | ||
| 146 | |||
| 147 | ResultCode WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value, | ||
| 148 | s64 timeout) { | ||
| 149 | return address_arbiter.WaitForAddress(address, arb_type, value, timeout); | ||
| 144 | } | 150 | } |
| 145 | 151 | ||
| 146 | /// Gets the address to the process' dedicated TLS region. | 152 | /// Gets the address to the process' dedicated TLS region. |
| @@ -250,15 +256,6 @@ public: | |||
| 250 | return thread_list; | 256 | return thread_list; |
| 251 | } | 257 | } |
| 252 | 258 | ||
| 253 | /// Insert a thread into the condition variable wait container | ||
| 254 | void InsertConditionVariableThread(std::shared_ptr<Thread> thread); | ||
| 255 | |||
| 256 | /// Remove a thread from the condition variable wait container | ||
| 257 | void RemoveConditionVariableThread(std::shared_ptr<Thread> thread); | ||
| 258 | |||
| 259 | /// Obtain all condition variable threads waiting for some address | ||
| 260 | std::vector<std::shared_ptr<Thread>> GetConditionVariableThreads(VAddr cond_var_addr); | ||
| 261 | |||
| 262 | /// Registers a thread as being created under this process, | 259 | /// Registers a thread as being created under this process, |
| 263 | /// adding it to this process' thread list. | 260 | /// adding it to this process' thread list. |
| 264 | void RegisterThread(const Thread* thread); | 261 | void RegisterThread(const Thread* thread); |
| @@ -369,12 +366,12 @@ private: | |||
| 369 | HandleTable handle_table; | 366 | HandleTable handle_table; |
| 370 | 367 | ||
| 371 | /// Per-process address arbiter. | 368 | /// Per-process address arbiter. |
| 372 | AddressArbiter address_arbiter; | 369 | KAddressArbiter address_arbiter; |
| 373 | 370 | ||
| 374 | /// The per-process mutex lock instance used for handling various | 371 | /// The per-process mutex lock instance used for handling various |
| 375 | /// forms of services, such as lock arbitration, and condition | 372 | /// forms of services, such as lock arbitration, and condition |
| 376 | /// variable related facilities. | 373 | /// variable related facilities. |
| 377 | Mutex mutex; | 374 | KConditionVariable condition_var; |
| 378 | 375 | ||
| 379 | /// Address indicating the location of the process' dedicated TLS region. | 376 | /// Address indicating the location of the process' dedicated TLS region. |
| 380 | VAddr tls_region_address = 0; | 377 | VAddr tls_region_address = 0; |
| @@ -385,9 +382,6 @@ private: | |||
| 385 | /// List of threads that are running with this process as their owner. | 382 | /// List of threads that are running with this process as their owner. |
| 386 | std::list<const Thread*> thread_list; | 383 | std::list<const Thread*> thread_list; |
| 387 | 384 | ||
| 388 | /// List of threads waiting for a condition variable | ||
| 389 | std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> cond_var_threads; | ||
| 390 | |||
| 391 | /// Address of the top of the main thread's stack | 385 | /// Address of the top of the main thread's stack |
| 392 | VAddr main_thread_stack_top{}; | 386 | VAddr main_thread_stack_top{}; |
| 393 | 387 | ||
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 304b8727d..99bb4ea20 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | 10 | ||
| 11 | #include "common/alignment.h" | 11 | #include "common/alignment.h" |
| 12 | #include "common/assert.h" | 12 | #include "common/assert.h" |
| 13 | #include "common/common_funcs.h" | ||
| 13 | #include "common/fiber.h" | 14 | #include "common/fiber.h" |
| 14 | #include "common/logging/log.h" | 15 | #include "common/logging/log.h" |
| 15 | #include "common/microprofile.h" | 16 | #include "common/microprofile.h" |
| @@ -19,24 +20,26 @@ | |||
| 19 | #include "core/core_timing.h" | 20 | #include "core/core_timing.h" |
| 20 | #include "core/core_timing_util.h" | 21 | #include "core/core_timing_util.h" |
| 21 | #include "core/cpu_manager.h" | 22 | #include "core/cpu_manager.h" |
| 22 | #include "core/hle/kernel/address_arbiter.h" | ||
| 23 | #include "core/hle/kernel/client_port.h" | 23 | #include "core/hle/kernel/client_port.h" |
| 24 | #include "core/hle/kernel/client_session.h" | 24 | #include "core/hle/kernel/client_session.h" |
| 25 | #include "core/hle/kernel/errors.h" | 25 | #include "core/hle/kernel/errors.h" |
| 26 | #include "core/hle/kernel/handle_table.h" | 26 | #include "core/hle/kernel/handle_table.h" |
| 27 | #include "core/hle/kernel/k_address_arbiter.h" | ||
| 28 | #include "core/hle/kernel/k_condition_variable.h" | ||
| 27 | #include "core/hle/kernel/k_scheduler.h" | 29 | #include "core/hle/kernel/k_scheduler.h" |
| 28 | #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" | 30 | #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" |
| 29 | #include "core/hle/kernel/k_synchronization_object.h" | 31 | #include "core/hle/kernel/k_synchronization_object.h" |
| 30 | #include "core/hle/kernel/kernel.h" | 32 | #include "core/hle/kernel/kernel.h" |
| 31 | #include "core/hle/kernel/memory/memory_block.h" | 33 | #include "core/hle/kernel/memory/memory_block.h" |
| 34 | #include "core/hle/kernel/memory/memory_layout.h" | ||
| 32 | #include "core/hle/kernel/memory/page_table.h" | 35 | #include "core/hle/kernel/memory/page_table.h" |
| 33 | #include "core/hle/kernel/mutex.h" | ||
| 34 | #include "core/hle/kernel/physical_core.h" | 36 | #include "core/hle/kernel/physical_core.h" |
| 35 | #include "core/hle/kernel/process.h" | 37 | #include "core/hle/kernel/process.h" |
| 36 | #include "core/hle/kernel/readable_event.h" | 38 | #include "core/hle/kernel/readable_event.h" |
| 37 | #include "core/hle/kernel/resource_limit.h" | 39 | #include "core/hle/kernel/resource_limit.h" |
| 38 | #include "core/hle/kernel/shared_memory.h" | 40 | #include "core/hle/kernel/shared_memory.h" |
| 39 | #include "core/hle/kernel/svc.h" | 41 | #include "core/hle/kernel/svc.h" |
| 42 | #include "core/hle/kernel/svc_results.h" | ||
| 40 | #include "core/hle/kernel/svc_types.h" | 43 | #include "core/hle/kernel/svc_types.h" |
| 41 | #include "core/hle/kernel/svc_wrap.h" | 44 | #include "core/hle/kernel/svc_wrap.h" |
| 42 | #include "core/hle/kernel/thread.h" | 45 | #include "core/hle/kernel/thread.h" |
| @@ -347,12 +350,6 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { | |||
| 347 | session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); | 350 | session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); |
| 348 | } | 351 | } |
| 349 | 352 | ||
| 350 | Handle event_handle = thread->GetHLETimeEvent(); | ||
| 351 | if (event_handle != InvalidHandle) { | ||
| 352 | auto& time_manager = kernel.TimeManager(); | ||
| 353 | time_manager.UnscheduleTimeEvent(event_handle); | ||
| 354 | } | ||
| 355 | |||
| 356 | return thread->GetSignalingResult(); | 353 | return thread->GetSignalingResult(); |
| 357 | } | 354 | } |
| 358 | 355 | ||
| @@ -491,56 +488,37 @@ static ResultCode CancelSynchronization32(Core::System& system, Handle thread_ha | |||
| 491 | return CancelSynchronization(system, thread_handle); | 488 | return CancelSynchronization(system, thread_handle); |
| 492 | } | 489 | } |
| 493 | 490 | ||
| 494 | /// Attempts to locks a mutex, creating it if it does not already exist | 491 | /// Attempts to locks a mutex |
| 495 | static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_handle, | 492 | static ResultCode ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address, |
| 496 | VAddr mutex_addr, Handle requesting_thread_handle) { | 493 | u32 tag) { |
| 497 | LOG_TRACE(Kernel_SVC, | 494 | LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}", |
| 498 | "called holding_thread_handle=0x{:08X}, mutex_addr=0x{:X}, " | 495 | thread_handle, address, tag); |
| 499 | "requesting_current_thread_handle=0x{:08X}", | ||
| 500 | holding_thread_handle, mutex_addr, requesting_thread_handle); | ||
| 501 | |||
| 502 | if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) { | ||
| 503 | LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}", | ||
| 504 | mutex_addr); | ||
| 505 | return ERR_INVALID_ADDRESS_STATE; | ||
| 506 | } | ||
| 507 | 496 | ||
| 508 | if (!Common::IsWordAligned(mutex_addr)) { | 497 | // Validate the input address. |
| 509 | LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr); | 498 | R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory); |
| 510 | return ERR_INVALID_ADDRESS; | 499 | R_UNLESS(Common::IsAligned(address, sizeof(u32)), Svc::ResultInvalidAddress); |
| 511 | } | ||
| 512 | 500 | ||
| 513 | auto* const current_process = system.Kernel().CurrentProcess(); | 501 | return system.Kernel().CurrentProcess()->WaitForAddress(thread_handle, address, tag); |
| 514 | return current_process->GetMutex().TryAcquire(mutex_addr, holding_thread_handle, | ||
| 515 | requesting_thread_handle); | ||
| 516 | } | 502 | } |
| 517 | 503 | ||
| 518 | static ResultCode ArbitrateLock32(Core::System& system, Handle holding_thread_handle, | 504 | static ResultCode ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address, |
| 519 | u32 mutex_addr, Handle requesting_thread_handle) { | 505 | u32 tag) { |
| 520 | return ArbitrateLock(system, holding_thread_handle, mutex_addr, requesting_thread_handle); | 506 | return ArbitrateLock(system, thread_handle, address, tag); |
| 521 | } | 507 | } |
| 522 | 508 | ||
| 523 | /// Unlock a mutex | 509 | /// Unlock a mutex |
| 524 | static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) { | 510 | static ResultCode ArbitrateUnlock(Core::System& system, VAddr address) { |
| 525 | LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr); | 511 | LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address); |
| 526 | 512 | ||
| 527 | if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) { | 513 | // Validate the input address. |
| 528 | LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}", | 514 | R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory); |
| 529 | mutex_addr); | 515 | R_UNLESS(Common::IsAligned(address, sizeof(u32)), Svc::ResultInvalidAddress); |
| 530 | return ERR_INVALID_ADDRESS_STATE; | ||
| 531 | } | ||
| 532 | 516 | ||
| 533 | if (!Common::IsWordAligned(mutex_addr)) { | 517 | return system.Kernel().CurrentProcess()->SignalToAddress(address); |
| 534 | LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr); | ||
| 535 | return ERR_INVALID_ADDRESS; | ||
| 536 | } | ||
| 537 | |||
| 538 | auto* const current_process = system.Kernel().CurrentProcess(); | ||
| 539 | return current_process->GetMutex().Release(mutex_addr); | ||
| 540 | } | 518 | } |
| 541 | 519 | ||
| 542 | static ResultCode ArbitrateUnlock32(Core::System& system, u32 mutex_addr) { | 520 | static ResultCode ArbitrateUnlock32(Core::System& system, u32 address) { |
| 543 | return ArbitrateUnlock(system, mutex_addr); | 521 | return ArbitrateUnlock(system, address); |
| 544 | } | 522 | } |
| 545 | 523 | ||
| 546 | enum class BreakType : u32 { | 524 | enum class BreakType : u32 { |
| @@ -1167,7 +1145,7 @@ static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 pri | |||
| 1167 | return ERR_INVALID_HANDLE; | 1145 | return ERR_INVALID_HANDLE; |
| 1168 | } | 1146 | } |
| 1169 | 1147 | ||
| 1170 | thread->SetPriority(priority); | 1148 | thread->SetBasePriority(priority); |
| 1171 | 1149 | ||
| 1172 | return RESULT_SUCCESS; | 1150 | return RESULT_SUCCESS; |
| 1173 | } | 1151 | } |
| @@ -1607,223 +1585,135 @@ static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanosec | |||
| 1607 | } | 1585 | } |
| 1608 | 1586 | ||
| 1609 | /// Wait process wide key atomic | 1587 | /// Wait process wide key atomic |
| 1610 | static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_addr, | 1588 | static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key, |
| 1611 | VAddr condition_variable_addr, Handle thread_handle, | 1589 | u32 tag, s64 timeout_ns) { |
| 1612 | s64 nano_seconds) { | 1590 | LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address, |
| 1613 | LOG_TRACE( | 1591 | cv_key, tag, timeout_ns); |
| 1614 | Kernel_SVC, | 1592 | |
| 1615 | "called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}", | 1593 | // Validate input. |
| 1616 | mutex_addr, condition_variable_addr, thread_handle, nano_seconds); | 1594 | R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory); |
| 1617 | 1595 | R_UNLESS(Common::IsAligned(address, sizeof(int32_t)), Svc::ResultInvalidAddress); | |
| 1618 | if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) { | 1596 | |
| 1619 | LOG_ERROR( | 1597 | // Convert timeout from nanoseconds to ticks. |
| 1620 | Kernel_SVC, | 1598 | s64 timeout{}; |
| 1621 | "Given mutex address must not be within the kernel address space. address=0x{:016X}", | 1599 | if (timeout_ns > 0) { |
| 1622 | mutex_addr); | 1600 | const s64 offset_tick(timeout_ns); |
| 1623 | return ERR_INVALID_ADDRESS_STATE; | 1601 | if (offset_tick > 0) { |
| 1624 | } | 1602 | timeout = offset_tick + 2; |
| 1625 | 1603 | if (timeout <= 0) { | |
| 1626 | if (!Common::IsWordAligned(mutex_addr)) { | 1604 | timeout = std::numeric_limits<s64>::max(); |
| 1627 | LOG_ERROR(Kernel_SVC, "Given mutex address must be word-aligned. address=0x{:016X}", | 1605 | } |
| 1628 | mutex_addr); | 1606 | } else { |
| 1629 | return ERR_INVALID_ADDRESS; | 1607 | timeout = std::numeric_limits<s64>::max(); |
| 1630 | } | ||
| 1631 | |||
| 1632 | ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); | ||
| 1633 | auto& kernel = system.Kernel(); | ||
| 1634 | Handle event_handle; | ||
| 1635 | Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread(); | ||
| 1636 | auto* const current_process = kernel.CurrentProcess(); | ||
| 1637 | { | ||
| 1638 | KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds); | ||
| 1639 | const auto& handle_table = current_process->GetHandleTable(); | ||
| 1640 | std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle); | ||
| 1641 | ASSERT(thread); | ||
| 1642 | |||
| 1643 | current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); | ||
| 1644 | |||
| 1645 | if (thread->IsTerminationRequested()) { | ||
| 1646 | lock.CancelSleep(); | ||
| 1647 | return ERR_THREAD_TERMINATING; | ||
| 1648 | } | ||
| 1649 | |||
| 1650 | const auto release_result = current_process->GetMutex().Release(mutex_addr); | ||
| 1651 | if (release_result.IsError()) { | ||
| 1652 | lock.CancelSleep(); | ||
| 1653 | return release_result; | ||
| 1654 | } | ||
| 1655 | |||
| 1656 | if (nano_seconds == 0) { | ||
| 1657 | lock.CancelSleep(); | ||
| 1658 | return RESULT_TIMEOUT; | ||
| 1659 | } | ||
| 1660 | |||
| 1661 | current_thread->SetCondVarWaitAddress(condition_variable_addr); | ||
| 1662 | current_thread->SetMutexWaitAddress(mutex_addr); | ||
| 1663 | current_thread->SetWaitHandle(thread_handle); | ||
| 1664 | current_thread->SetState(ThreadState::Waiting); | ||
| 1665 | current_thread->SetWaitingCondVar(true); | ||
| 1666 | current_process->InsertConditionVariableThread(SharedFrom(current_thread)); | ||
| 1667 | } | ||
| 1668 | |||
| 1669 | if (event_handle != InvalidHandle) { | ||
| 1670 | auto& time_manager = kernel.TimeManager(); | ||
| 1671 | time_manager.UnscheduleTimeEvent(event_handle); | ||
| 1672 | } | ||
| 1673 | |||
| 1674 | { | ||
| 1675 | KScopedSchedulerLock lock(kernel); | ||
| 1676 | |||
| 1677 | auto* owner = current_thread->GetLockOwner(); | ||
| 1678 | if (owner != nullptr) { | ||
| 1679 | owner->RemoveMutexWaiter(SharedFrom(current_thread)); | ||
| 1680 | } | 1608 | } |
| 1681 | 1609 | } else { | |
| 1682 | current_process->RemoveConditionVariableThread(SharedFrom(current_thread)); | 1610 | timeout = timeout_ns; |
| 1683 | } | 1611 | } |
| 1684 | // Note: Deliberately don't attempt to inherit the lock owner's priority. | ||
| 1685 | 1612 | ||
| 1686 | return current_thread->GetSignalingResult(); | 1613 | // Wait on the condition variable. |
| 1614 | return system.Kernel().CurrentProcess()->WaitConditionVariable( | ||
| 1615 | address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout); | ||
| 1687 | } | 1616 | } |
| 1688 | 1617 | ||
| 1689 | static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 mutex_addr, | 1618 | static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag, |
| 1690 | u32 condition_variable_addr, Handle thread_handle, | 1619 | u32 timeout_ns_low, u32 timeout_ns_high) { |
| 1691 | u32 nanoseconds_low, u32 nanoseconds_high) { | 1620 | const auto timeout_ns = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32)); |
| 1692 | const auto nanoseconds = static_cast<s64>(nanoseconds_low | (u64{nanoseconds_high} << 32)); | 1621 | return WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns); |
| 1693 | return WaitProcessWideKeyAtomic(system, mutex_addr, condition_variable_addr, thread_handle, | ||
| 1694 | nanoseconds); | ||
| 1695 | } | 1622 | } |
| 1696 | 1623 | ||
| 1697 | /// Signal process wide key | 1624 | /// Signal process wide key |
| 1698 | static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_addr, s32 target) { | 1625 | static void SignalProcessWideKey(Core::System& system, VAddr cv_key, s32 count) { |
| 1699 | LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}", | 1626 | LOG_TRACE(Kernel_SVC, "called, cv_key=0x{:X}, count=0x{:08X}", cv_key, count); |
| 1700 | condition_variable_addr, target); | 1627 | |
| 1628 | // Signal the condition variable. | ||
| 1629 | return system.Kernel().CurrentProcess()->SignalConditionVariable( | ||
| 1630 | Common::AlignDown(cv_key, sizeof(u32)), count); | ||
| 1631 | } | ||
| 1701 | 1632 | ||
| 1702 | ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); | 1633 | static void SignalProcessWideKey32(Core::System& system, u32 cv_key, s32 count) { |
| 1634 | SignalProcessWideKey(system, cv_key, count); | ||
| 1635 | } | ||
| 1703 | 1636 | ||
| 1704 | // Retrieve a list of all threads that are waiting for this condition variable. | 1637 | namespace { |
| 1705 | auto& kernel = system.Kernel(); | ||
| 1706 | KScopedSchedulerLock lock(kernel); | ||
| 1707 | auto* const current_process = kernel.CurrentProcess(); | ||
| 1708 | std::vector<std::shared_ptr<Thread>> waiting_threads = | ||
| 1709 | current_process->GetConditionVariableThreads(condition_variable_addr); | ||
| 1710 | |||
| 1711 | // Only process up to 'target' threads, unless 'target' is less equal 0, in which case process | ||
| 1712 | // them all. | ||
| 1713 | std::size_t last = waiting_threads.size(); | ||
| 1714 | if (target > 0) { | ||
| 1715 | last = std::min(waiting_threads.size(), static_cast<std::size_t>(target)); | ||
| 1716 | } | ||
| 1717 | for (std::size_t index = 0; index < last; ++index) { | ||
| 1718 | auto& thread = waiting_threads[index]; | ||
| 1719 | |||
| 1720 | ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr); | ||
| 1721 | |||
| 1722 | // liberate Cond Var Thread. | ||
| 1723 | current_process->RemoveConditionVariableThread(thread); | ||
| 1724 | |||
| 1725 | const std::size_t current_core = system.CurrentCoreIndex(); | ||
| 1726 | auto& monitor = system.Monitor(); | ||
| 1727 | |||
| 1728 | // Atomically read the value of the mutex. | ||
| 1729 | u32 mutex_val = 0; | ||
| 1730 | u32 update_val = 0; | ||
| 1731 | const VAddr mutex_address = thread->GetMutexWaitAddress(); | ||
| 1732 | do { | ||
| 1733 | // If the mutex is not yet acquired, acquire it. | ||
| 1734 | mutex_val = monitor.ExclusiveRead32(current_core, mutex_address); | ||
| 1735 | |||
| 1736 | if (mutex_val != 0) { | ||
| 1737 | update_val = mutex_val | Mutex::MutexHasWaitersFlag; | ||
| 1738 | } else { | ||
| 1739 | update_val = thread->GetWaitHandle(); | ||
| 1740 | } | ||
| 1741 | } while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val)); | ||
| 1742 | monitor.ClearExclusive(); | ||
| 1743 | if (mutex_val == 0) { | ||
| 1744 | // We were able to acquire the mutex, resume this thread. | ||
| 1745 | auto* const lock_owner = thread->GetLockOwner(); | ||
| 1746 | if (lock_owner != nullptr) { | ||
| 1747 | lock_owner->RemoveMutexWaiter(thread); | ||
| 1748 | } | ||
| 1749 | 1638 | ||
| 1750 | thread->SetLockOwner(nullptr); | 1639 | constexpr bool IsValidSignalType(Svc::SignalType type) { |
| 1751 | thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS); | 1640 | switch (type) { |
| 1752 | thread->Wakeup(); | 1641 | case Svc::SignalType::Signal: |
| 1753 | } else { | 1642 | case Svc::SignalType::SignalAndIncrementIfEqual: |
| 1754 | // The mutex is already owned by some other thread, make this thread wait on it. | 1643 | case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual: |
| 1755 | const Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask); | 1644 | return true; |
| 1756 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | 1645 | default: |
| 1757 | auto owner = handle_table.Get<Thread>(owner_handle); | 1646 | return false; |
| 1758 | ASSERT(owner); | ||
| 1759 | thread->SetWaitingCondVar(false); | ||
| 1760 | |||
| 1761 | owner->AddMutexWaiter(thread); | ||
| 1762 | } | ||
| 1763 | } | 1647 | } |
| 1764 | } | 1648 | } |
| 1765 | 1649 | ||
| 1766 | static void SignalProcessWideKey32(Core::System& system, u32 condition_variable_addr, s32 target) { | 1650 | constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) { |
| 1767 | SignalProcessWideKey(system, condition_variable_addr, target); | 1651 | switch (type) { |
| 1652 | case Svc::ArbitrationType::WaitIfLessThan: | ||
| 1653 | case Svc::ArbitrationType::DecrementAndWaitIfLessThan: | ||
| 1654 | case Svc::ArbitrationType::WaitIfEqual: | ||
| 1655 | return true; | ||
| 1656 | default: | ||
| 1657 | return false; | ||
| 1658 | } | ||
| 1768 | } | 1659 | } |
| 1769 | 1660 | ||
| 1770 | // Wait for an address (via Address Arbiter) | 1661 | } // namespace |
| 1771 | static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type, s32 value, | ||
| 1772 | s64 timeout) { | ||
| 1773 | LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, timeout={}", address, | ||
| 1774 | type, value, timeout); | ||
| 1775 | |||
| 1776 | // If the passed address is a kernel virtual address, return invalid memory state. | ||
| 1777 | if (Core::Memory::IsKernelVirtualAddress(address)) { | ||
| 1778 | LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address); | ||
| 1779 | return ERR_INVALID_ADDRESS_STATE; | ||
| 1780 | } | ||
| 1781 | 1662 | ||
| 1782 | // If the address is not properly aligned to 4 bytes, return invalid address. | 1663 | // Wait for an address (via Address Arbiter) |
| 1783 | if (!Common::IsWordAligned(address)) { | 1664 | static ResultCode WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type, |
| 1784 | LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address); | 1665 | s32 value, s64 timeout_ns) { |
| 1785 | return ERR_INVALID_ADDRESS; | 1666 | LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}", |
| 1667 | address, arb_type, value, timeout_ns); | ||
| 1668 | |||
| 1669 | // Validate input. | ||
| 1670 | R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory); | ||
| 1671 | R_UNLESS(Common::IsAligned(address, sizeof(int32_t)), Svc::ResultInvalidAddress); | ||
| 1672 | R_UNLESS(IsValidArbitrationType(arb_type), Svc::ResultInvalidEnumValue); | ||
| 1673 | |||
| 1674 | // Convert timeout from nanoseconds to ticks. | ||
| 1675 | s64 timeout{}; | ||
| 1676 | if (timeout_ns > 0) { | ||
| 1677 | const s64 offset_tick(timeout_ns); | ||
| 1678 | if (offset_tick > 0) { | ||
| 1679 | timeout = offset_tick + 2; | ||
| 1680 | if (timeout <= 0) { | ||
| 1681 | timeout = std::numeric_limits<s64>::max(); | ||
| 1682 | } | ||
| 1683 | } else { | ||
| 1684 | timeout = std::numeric_limits<s64>::max(); | ||
| 1685 | } | ||
| 1686 | } else { | ||
| 1687 | timeout = timeout_ns; | ||
| 1786 | } | 1688 | } |
| 1787 | 1689 | ||
| 1788 | const auto arbitration_type = static_cast<AddressArbiter::ArbitrationType>(type); | 1690 | return system.Kernel().CurrentProcess()->WaitAddressArbiter(address, arb_type, value, timeout); |
| 1789 | auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter(); | ||
| 1790 | const ResultCode result = | ||
| 1791 | address_arbiter.WaitForAddress(address, arbitration_type, value, timeout); | ||
| 1792 | return result; | ||
| 1793 | } | 1691 | } |
| 1794 | 1692 | ||
| 1795 | static ResultCode WaitForAddress32(Core::System& system, u32 address, u32 type, s32 value, | 1693 | static ResultCode WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type, |
| 1796 | u32 timeout_low, u32 timeout_high) { | 1694 | s32 value, u32 timeout_ns_low, u32 timeout_ns_high) { |
| 1797 | const auto timeout = static_cast<s64>(timeout_low | (u64{timeout_high} << 32)); | 1695 | const auto timeout = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32)); |
| 1798 | return WaitForAddress(system, address, type, value, timeout); | 1696 | return WaitForAddress(system, address, arb_type, value, timeout); |
| 1799 | } | 1697 | } |
| 1800 | 1698 | ||
| 1801 | // Signals to an address (via Address Arbiter) | 1699 | // Signals to an address (via Address Arbiter) |
| 1802 | static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, s32 value, | 1700 | static ResultCode SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type, |
| 1803 | s32 num_to_wake) { | 1701 | s32 value, s32 count) { |
| 1804 | LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, num_to_wake=0x{:X}", | 1702 | LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}", |
| 1805 | address, type, value, num_to_wake); | 1703 | address, signal_type, value, count); |
| 1806 | |||
| 1807 | // If the passed address is a kernel virtual address, return invalid memory state. | ||
| 1808 | if (Core::Memory::IsKernelVirtualAddress(address)) { | ||
| 1809 | LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address); | ||
| 1810 | return ERR_INVALID_ADDRESS_STATE; | ||
| 1811 | } | ||
| 1812 | 1704 | ||
| 1813 | // If the address is not properly aligned to 4 bytes, return invalid address. | 1705 | // Validate input. |
| 1814 | if (!Common::IsWordAligned(address)) { | 1706 | R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory); |
| 1815 | LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address); | 1707 | R_UNLESS(Common::IsAligned(address, sizeof(s32)), Svc::ResultInvalidAddress); |
| 1816 | return ERR_INVALID_ADDRESS; | 1708 | R_UNLESS(IsValidSignalType(signal_type), Svc::ResultInvalidEnumValue); |
| 1817 | } | ||
| 1818 | 1709 | ||
| 1819 | const auto signal_type = static_cast<AddressArbiter::SignalType>(type); | 1710 | return system.Kernel().CurrentProcess()->SignalAddressArbiter(address, signal_type, value, |
| 1820 | auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter(); | 1711 | count); |
| 1821 | return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake); | ||
| 1822 | } | 1712 | } |
| 1823 | 1713 | ||
| 1824 | static ResultCode SignalToAddress32(Core::System& system, u32 address, u32 type, s32 value, | 1714 | static ResultCode SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type, |
| 1825 | s32 num_to_wake) { | 1715 | s32 value, s32 count) { |
| 1826 | return SignalToAddress(system, address, type, value, num_to_wake); | 1716 | return SignalToAddress(system, address, signal_type, value, count); |
| 1827 | } | 1717 | } |
| 1828 | 1718 | ||
| 1829 | static void KernelDebug([[maybe_unused]] Core::System& system, | 1719 | static void KernelDebug([[maybe_unused]] Core::System& system, |
diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h index 7734bb236..4af049551 100644 --- a/src/core/hle/kernel/svc_common.h +++ b/src/core/hle/kernel/svc_common.h | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | 8 | ||
| 9 | namespace Kernel::Svc { | 9 | namespace Kernel::Svc { |
| 10 | 10 | ||
| 11 | constexpr s32 ArgumentHandleCountMax = 0x40; | ||
| 11 | constexpr u32 HandleWaitMask{1u << 30}; | 12 | constexpr u32 HandleWaitMask{1u << 30}; |
| 12 | 13 | ||
| 13 | } // namespace Kernel::Svc | 14 | } // namespace Kernel::Svc |
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index f94c487ba..a32750ed7 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include "common/common_types.h" | 7 | #include "common/common_types.h" |
| 8 | #include "core/arm/arm_interface.h" | 8 | #include "core/arm/arm_interface.h" |
| 9 | #include "core/core.h" | 9 | #include "core/core.h" |
| 10 | #include "core/hle/kernel/svc_types.h" | ||
| 10 | #include "core/hle/result.h" | 11 | #include "core/hle/result.h" |
| 11 | 12 | ||
| 12 | namespace Kernel { | 13 | namespace Kernel { |
| @@ -277,18 +278,22 @@ void SvcWrap64(Core::System& system) { | |||
| 277 | FuncReturn(system, retval); | 278 | FuncReturn(system, retval); |
| 278 | } | 279 | } |
| 279 | 280 | ||
| 280 | template <ResultCode func(Core::System&, u64, u32, s32, s64)> | 281 | // Used by WaitForAddress |
| 282 | template <ResultCode func(Core::System&, u64, Svc::ArbitrationType, s32, s64)> | ||
| 281 | void SvcWrap64(Core::System& system) { | 283 | void SvcWrap64(Core::System& system) { |
| 282 | FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)), | 284 | FuncReturn(system, |
| 283 | static_cast<s32>(Param(system, 2)), static_cast<s64>(Param(system, 3))) | 285 | func(system, Param(system, 0), static_cast<Svc::ArbitrationType>(Param(system, 1)), |
| 284 | .raw); | 286 | static_cast<s32>(Param(system, 2)), static_cast<s64>(Param(system, 3))) |
| 287 | .raw); | ||
| 285 | } | 288 | } |
| 286 | 289 | ||
| 287 | template <ResultCode func(Core::System&, u64, u32, s32, s32)> | 290 | // Used by SignalToAddress |
| 291 | template <ResultCode func(Core::System&, u64, Svc::SignalType, s32, s32)> | ||
| 288 | void SvcWrap64(Core::System& system) { | 292 | void SvcWrap64(Core::System& system) { |
| 289 | FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)), | 293 | FuncReturn(system, |
| 290 | static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3))) | 294 | func(system, Param(system, 0), static_cast<Svc::SignalType>(Param(system, 1)), |
| 291 | .raw); | 295 | static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3))) |
| 296 | .raw); | ||
| 292 | } | 297 | } |
| 293 | 298 | ||
| 294 | //////////////////////////////////////////////////////////////////////////////////////////////////// | 299 | //////////////////////////////////////////////////////////////////////////////////////////////////// |
| @@ -504,22 +509,23 @@ void SvcWrap32(Core::System& system) { | |||
| 504 | } | 509 | } |
| 505 | 510 | ||
| 506 | // Used by WaitForAddress32 | 511 | // Used by WaitForAddress32 |
| 507 | template <ResultCode func(Core::System&, u32, u32, s32, u32, u32)> | 512 | template <ResultCode func(Core::System&, u32, Svc::ArbitrationType, s32, u32, u32)> |
| 508 | void SvcWrap32(Core::System& system) { | 513 | void SvcWrap32(Core::System& system) { |
| 509 | const u32 retval = func(system, static_cast<u32>(Param(system, 0)), | 514 | const u32 retval = func(system, static_cast<u32>(Param(system, 0)), |
| 510 | static_cast<u32>(Param(system, 1)), static_cast<s32>(Param(system, 2)), | 515 | static_cast<Svc::ArbitrationType>(Param(system, 1)), |
| 511 | static_cast<u32>(Param(system, 3)), static_cast<u32>(Param(system, 4))) | 516 | static_cast<s32>(Param(system, 2)), static_cast<u32>(Param(system, 3)), |
| 517 | static_cast<u32>(Param(system, 4))) | ||
| 512 | .raw; | 518 | .raw; |
| 513 | FuncReturn(system, retval); | 519 | FuncReturn(system, retval); |
| 514 | } | 520 | } |
| 515 | 521 | ||
| 516 | // Used by SignalToAddress32 | 522 | // Used by SignalToAddress32 |
| 517 | template <ResultCode func(Core::System&, u32, u32, s32, s32)> | 523 | template <ResultCode func(Core::System&, u32, Svc::SignalType, s32, s32)> |
| 518 | void SvcWrap32(Core::System& system) { | 524 | void SvcWrap32(Core::System& system) { |
| 519 | const u32 retval = | 525 | const u32 retval = func(system, static_cast<u32>(Param(system, 0)), |
| 520 | func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)), | 526 | static_cast<Svc::SignalType>(Param(system, 1)), |
| 521 | static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3))) | 527 | static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3))) |
| 522 | .raw; | 528 | .raw; |
| 523 | FuncReturn(system, retval); | 529 | FuncReturn(system, retval); |
| 524 | } | 530 | } |
| 525 | 531 | ||
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 33a4e1fa3..eda56c31c 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -17,9 +17,11 @@ | |||
| 17 | #include "core/hardware_properties.h" | 17 | #include "core/hardware_properties.h" |
| 18 | #include "core/hle/kernel/errors.h" | 18 | #include "core/hle/kernel/errors.h" |
| 19 | #include "core/hle/kernel/handle_table.h" | 19 | #include "core/hle/kernel/handle_table.h" |
| 20 | #include "core/hle/kernel/k_condition_variable.h" | ||
| 20 | #include "core/hle/kernel/k_scheduler.h" | 21 | #include "core/hle/kernel/k_scheduler.h" |
| 21 | #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" | 22 | #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" |
| 22 | #include "core/hle/kernel/kernel.h" | 23 | #include "core/hle/kernel/kernel.h" |
| 24 | #include "core/hle/kernel/memory/memory_layout.h" | ||
| 23 | #include "core/hle/kernel/object.h" | 25 | #include "core/hle/kernel/object.h" |
| 24 | #include "core/hle/kernel/process.h" | 26 | #include "core/hle/kernel/process.h" |
| 25 | #include "core/hle/kernel/thread.h" | 27 | #include "core/hle/kernel/thread.h" |
| @@ -62,24 +64,6 @@ void Thread::Stop() { | |||
| 62 | 64 | ||
| 63 | void Thread::Wakeup() { | 65 | void Thread::Wakeup() { |
| 64 | KScopedSchedulerLock lock(kernel); | 66 | KScopedSchedulerLock lock(kernel); |
| 65 | switch (thread_state) { | ||
| 66 | case ThreadState::Runnable: | ||
| 67 | // If the thread is waiting on multiple wait objects, it might be awoken more than once | ||
| 68 | // before actually resuming. We can ignore subsequent wakeups if the thread status has | ||
| 69 | // already been set to ThreadStatus::Ready. | ||
| 70 | return; | ||
| 71 | case ThreadState::Terminated: | ||
| 72 | // This should never happen, as threads must complete before being stopped. | ||
| 73 | DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.", | ||
| 74 | GetObjectId()); | ||
| 75 | return; | ||
| 76 | } | ||
| 77 | |||
| 78 | SetState(ThreadState::Runnable); | ||
| 79 | } | ||
| 80 | |||
| 81 | void Thread::OnWakeUp() { | ||
| 82 | KScopedSchedulerLock lock(kernel); | ||
| 83 | SetState(ThreadState::Runnable); | 67 | SetState(ThreadState::Runnable); |
| 84 | } | 68 | } |
| 85 | 69 | ||
| @@ -167,15 +151,14 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy | |||
| 167 | thread->stack_top = stack_top; | 151 | thread->stack_top = stack_top; |
| 168 | thread->disable_count = 1; | 152 | thread->disable_count = 1; |
| 169 | thread->tpidr_el0 = 0; | 153 | thread->tpidr_el0 = 0; |
| 170 | thread->nominal_priority = thread->current_priority = priority; | 154 | thread->current_priority = priority; |
| 155 | thread->base_priority = priority; | ||
| 156 | thread->lock_owner = nullptr; | ||
| 171 | thread->schedule_count = -1; | 157 | thread->schedule_count = -1; |
| 172 | thread->last_scheduled_tick = 0; | 158 | thread->last_scheduled_tick = 0; |
| 173 | thread->processor_id = processor_id; | 159 | thread->processor_id = processor_id; |
| 174 | thread->ideal_core = processor_id; | 160 | thread->ideal_core = processor_id; |
| 175 | thread->affinity_mask.SetAffinity(processor_id, true); | 161 | thread->affinity_mask.SetAffinity(processor_id, true); |
| 176 | thread->mutex_wait_address = 0; | ||
| 177 | thread->condvar_wait_address = 0; | ||
| 178 | thread->wait_handle = 0; | ||
| 179 | thread->name = std::move(name); | 162 | thread->name = std::move(name); |
| 180 | thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap(); | 163 | thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap(); |
| 181 | thread->owner_process = owner_process; | 164 | thread->owner_process = owner_process; |
| @@ -205,12 +188,17 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy | |||
| 205 | return MakeResult<std::shared_ptr<Thread>>(std::move(thread)); | 188 | return MakeResult<std::shared_ptr<Thread>>(std::move(thread)); |
| 206 | } | 189 | } |
| 207 | 190 | ||
| 208 | void Thread::SetPriority(u32 priority) { | 191 | void Thread::SetBasePriority(u32 priority) { |
| 209 | KScopedSchedulerLock lock(kernel); | ||
| 210 | ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, | 192 | ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, |
| 211 | "Invalid priority value."); | 193 | "Invalid priority value."); |
| 212 | nominal_priority = priority; | 194 | |
| 213 | UpdatePriority(); | 195 | KScopedSchedulerLock lock(kernel); |
| 196 | |||
| 197 | // Change our base priority. | ||
| 198 | base_priority = priority; | ||
| 199 | |||
| 200 | // Perform a priority restoration. | ||
| 201 | RestorePriority(kernel, this); | ||
| 214 | } | 202 | } |
| 215 | 203 | ||
| 216 | void Thread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) { | 204 | void Thread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) { |
| @@ -224,95 +212,146 @@ VAddr Thread::GetCommandBufferAddress() const { | |||
| 224 | return GetTLSAddress() + command_header_offset; | 212 | return GetTLSAddress() + command_header_offset; |
| 225 | } | 213 | } |
| 226 | 214 | ||
| 227 | void Thread::SetState(ThreadState new_status) { | 215 | void Thread::SetState(ThreadState state) { |
| 228 | if (new_status == thread_state) { | 216 | KScopedSchedulerLock sl(kernel); |
| 229 | return; | 217 | |
| 218 | SetMutexWaitAddressForDebugging(0); | ||
| 219 | const ThreadState old_state = thread_state; | ||
| 220 | thread_state = | ||
| 221 | static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)); | ||
| 222 | if (thread_state != old_state) { | ||
| 223 | KScheduler::OnThreadStateChanged(kernel, this, old_state); | ||
| 230 | } | 224 | } |
| 225 | } | ||
| 226 | |||
| 227 | void Thread::AddWaiterImpl(Thread* thread) { | ||
| 228 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); | ||
| 231 | 229 | ||
| 232 | if (new_status != ThreadState::Waiting) { | 230 | // Find the right spot to insert the waiter. |
| 233 | SetWaitingCondVar(false); | 231 | auto it = waiter_list.begin(); |
| 232 | while (it != waiter_list.end()) { | ||
| 233 | if (it->GetPriority() > thread->GetPriority()) { | ||
| 234 | break; | ||
| 235 | } | ||
| 236 | it++; | ||
| 234 | } | 237 | } |
| 235 | 238 | ||
| 236 | SetSchedulingStatus(new_status); | 239 | // Keep track of how many kernel waiters we have. |
| 240 | if (Memory::IsKernelAddressKey(thread->GetAddressKey())) { | ||
| 241 | ASSERT((num_kernel_waiters++) >= 0); | ||
| 242 | } | ||
| 237 | 243 | ||
| 238 | thread_state = new_status; | 244 | // Insert the waiter. |
| 245 | waiter_list.insert(it, *thread); | ||
| 246 | thread->SetLockOwner(this); | ||
| 239 | } | 247 | } |
| 240 | 248 | ||
| 241 | void Thread::AddMutexWaiter(std::shared_ptr<Thread> thread) { | 249 | void Thread::RemoveWaiterImpl(Thread* thread) { |
| 242 | if (thread->lock_owner.get() == this) { | 250 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); |
| 243 | // If the thread is already waiting for this thread to release the mutex, ensure that the | 251 | |
| 244 | // waiters list is consistent and return without doing anything. | 252 | // Keep track of how many kernel waiters we have. |
| 245 | const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); | 253 | if (Memory::IsKernelAddressKey(thread->GetAddressKey())) { |
| 246 | ASSERT(iter != wait_mutex_threads.end()); | 254 | ASSERT((num_kernel_waiters--) > 0); |
| 247 | return; | ||
| 248 | } | 255 | } |
| 249 | 256 | ||
| 250 | // A thread can't wait on two different mutexes at the same time. | 257 | // Remove the waiter. |
| 251 | ASSERT(thread->lock_owner == nullptr); | 258 | waiter_list.erase(waiter_list.iterator_to(*thread)); |
| 259 | thread->SetLockOwner(nullptr); | ||
| 260 | } | ||
| 252 | 261 | ||
| 253 | // Ensure that the thread is not already in the list of mutex waiters | 262 | void Thread::RestorePriority(KernelCore& kernel, Thread* thread) { |
| 254 | const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); | 263 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); |
| 255 | ASSERT(iter == wait_mutex_threads.end()); | ||
| 256 | 264 | ||
| 257 | // Keep the list in an ordered fashion | 265 | while (true) { |
| 258 | const auto insertion_point = std::find_if( | 266 | // We want to inherit priority where possible. |
| 259 | wait_mutex_threads.begin(), wait_mutex_threads.end(), | 267 | s32 new_priority = thread->GetBasePriority(); |
| 260 | [&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); }); | 268 | if (thread->HasWaiters()) { |
| 261 | wait_mutex_threads.insert(insertion_point, thread); | 269 | new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority()); |
| 262 | thread->lock_owner = SharedFrom(this); | 270 | } |
| 263 | 271 | ||
| 264 | UpdatePriority(); | 272 | // If the priority we would inherit is not different from ours, don't do anything. |
| 265 | } | 273 | if (new_priority == thread->GetPriority()) { |
| 274 | return; | ||
| 275 | } | ||
| 266 | 276 | ||
| 267 | void Thread::RemoveMutexWaiter(std::shared_ptr<Thread> thread) { | 277 | // Ensure we don't violate condition variable red black tree invariants. |
| 268 | ASSERT(thread->lock_owner.get() == this); | 278 | if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) { |
| 279 | BeforeUpdatePriority(kernel, cv_tree, thread); | ||
| 280 | } | ||
| 269 | 281 | ||
| 270 | // Ensure that the thread is in the list of mutex waiters | 282 | // Change the priority. |
| 271 | const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); | 283 | const s32 old_priority = thread->GetPriority(); |
| 272 | ASSERT(iter != wait_mutex_threads.end()); | 284 | thread->SetPriority(new_priority); |
| 273 | 285 | ||
| 274 | wait_mutex_threads.erase(iter); | 286 | // Restore the condition variable, if relevant. |
| 287 | if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) { | ||
| 288 | AfterUpdatePriority(kernel, cv_tree, thread); | ||
| 289 | } | ||
| 275 | 290 | ||
| 276 | thread->lock_owner = nullptr; | 291 | // Update the scheduler. |
| 277 | UpdatePriority(); | 292 | KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority); |
| 278 | } | ||
| 279 | 293 | ||
| 280 | void Thread::UpdatePriority() { | 294 | // Keep the lock owner up to date. |
| 281 | // If any of the threads waiting on the mutex have a higher priority | 295 | Thread* lock_owner = thread->GetLockOwner(); |
| 282 | // (taking into account priority inheritance), then this thread inherits | 296 | if (lock_owner == nullptr) { |
| 283 | // that thread's priority. | 297 | return; |
| 284 | u32 new_priority = nominal_priority; | ||
| 285 | if (!wait_mutex_threads.empty()) { | ||
| 286 | if (wait_mutex_threads.front()->current_priority < new_priority) { | ||
| 287 | new_priority = wait_mutex_threads.front()->current_priority; | ||
| 288 | } | 298 | } |
| 289 | } | ||
| 290 | 299 | ||
| 291 | if (new_priority == current_priority) { | 300 | // Update the thread in the lock owner's sorted list, and continue inheriting. |
| 292 | return; | 301 | lock_owner->RemoveWaiterImpl(thread); |
| 302 | lock_owner->AddWaiterImpl(thread); | ||
| 303 | thread = lock_owner; | ||
| 293 | } | 304 | } |
| 305 | } | ||
| 294 | 306 | ||
| 295 | if (GetState() == ThreadState::Waiting && is_waiting_on_condvar) { | 307 | void Thread::AddWaiter(Thread* thread) { |
| 296 | owner_process->RemoveConditionVariableThread(SharedFrom(this)); | 308 | AddWaiterImpl(thread); |
| 297 | } | 309 | RestorePriority(kernel, this); |
| 310 | } | ||
| 298 | 311 | ||
| 299 | SetCurrentPriority(new_priority); | 312 | void Thread::RemoveWaiter(Thread* thread) { |
| 313 | RemoveWaiterImpl(thread); | ||
| 314 | RestorePriority(kernel, this); | ||
| 315 | } | ||
| 300 | 316 | ||
| 301 | if (GetState() == ThreadState::Waiting && is_waiting_on_condvar) { | 317 | Thread* Thread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) { |
| 302 | owner_process->InsertConditionVariableThread(SharedFrom(this)); | 318 | ASSERT(kernel.GlobalSchedulerContext().IsLocked()); |
| 303 | } | ||
| 304 | 319 | ||
| 305 | if (!lock_owner) { | 320 | s32 num_waiters{}; |
| 306 | return; | 321 | Thread* next_lock_owner{}; |
| 322 | auto it = waiter_list.begin(); | ||
| 323 | while (it != waiter_list.end()) { | ||
| 324 | if (it->GetAddressKey() == key) { | ||
| 325 | Thread* thread = std::addressof(*it); | ||
| 326 | |||
| 327 | // Keep track of how many kernel waiters we have. | ||
| 328 | if (Memory::IsKernelAddressKey(thread->GetAddressKey())) { | ||
| 329 | ASSERT((num_kernel_waiters--) > 0); | ||
| 330 | } | ||
| 331 | it = waiter_list.erase(it); | ||
| 332 | |||
| 333 | // Update the next lock owner. | ||
| 334 | if (next_lock_owner == nullptr) { | ||
| 335 | next_lock_owner = thread; | ||
| 336 | next_lock_owner->SetLockOwner(nullptr); | ||
| 337 | } else { | ||
| 338 | next_lock_owner->AddWaiterImpl(thread); | ||
| 339 | } | ||
| 340 | num_waiters++; | ||
| 341 | } else { | ||
| 342 | it++; | ||
| 343 | } | ||
| 307 | } | 344 | } |
| 308 | 345 | ||
| 309 | // Ensure that the thread is within the correct location in the waiting list. | 346 | // Do priority updates, if we have a next owner. |
| 310 | auto old_owner = lock_owner; | 347 | if (next_lock_owner) { |
| 311 | lock_owner->RemoveMutexWaiter(SharedFrom(this)); | 348 | RestorePriority(kernel, this); |
| 312 | old_owner->AddMutexWaiter(SharedFrom(this)); | 349 | RestorePriority(kernel, next_lock_owner); |
| 350 | } | ||
| 313 | 351 | ||
| 314 | // Recursively update the priority of the thread that depends on the priority of this one. | 352 | // Return output. |
| 315 | lock_owner->UpdatePriority(); | 353 | *out_num_waiters = num_waiters; |
| 354 | return next_lock_owner; | ||
| 316 | } | 355 | } |
| 317 | 356 | ||
| 318 | ResultCode Thread::SetActivity(ThreadActivity value) { | 357 | ResultCode Thread::SetActivity(ThreadActivity value) { |
| @@ -372,18 +411,6 @@ void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { | |||
| 372 | KScheduler::OnThreadStateChanged(kernel, this, old_state); | 411 | KScheduler::OnThreadStateChanged(kernel, this, old_state); |
| 373 | } | 412 | } |
| 374 | 413 | ||
| 375 | void Thread::SetSchedulingStatus(ThreadState new_status) { | ||
| 376 | const auto old_state = GetRawState(); | ||
| 377 | thread_state = (thread_state & ThreadState::HighMask) | new_status; | ||
| 378 | KScheduler::OnThreadStateChanged(kernel, this, old_state); | ||
| 379 | } | ||
| 380 | |||
| 381 | void Thread::SetCurrentPriority(u32 new_priority) { | ||
| 382 | const u32 old_priority = std::exchange(current_priority, new_priority); | ||
| 383 | KScheduler::OnThreadPriorityChanged(kernel, this, kernel.CurrentScheduler()->GetCurrentThread(), | ||
| 384 | old_priority); | ||
| 385 | } | ||
| 386 | |||
| 387 | ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { | 414 | ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { |
| 388 | KScopedSchedulerLock lock(kernel); | 415 | KScopedSchedulerLock lock(kernel); |
| 389 | const auto HighestSetCore = [](u64 mask, u32 max_cores) { | 416 | const auto HighestSetCore = [](u64 mask, u32 max_cores) { |
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index 06dd2ef2d..820ea524f 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h | |||
| @@ -6,16 +6,21 @@ | |||
| 6 | 6 | ||
| 7 | #include <array> | 7 | #include <array> |
| 8 | #include <functional> | 8 | #include <functional> |
| 9 | #include <span> | ||
| 9 | #include <string> | 10 | #include <string> |
| 10 | #include <utility> | 11 | #include <utility> |
| 11 | #include <vector> | 12 | #include <vector> |
| 12 | 13 | ||
| 14 | #include <boost/intrusive/list.hpp> | ||
| 15 | |||
| 13 | #include "common/common_types.h" | 16 | #include "common/common_types.h" |
| 17 | #include "common/intrusive_red_black_tree.h" | ||
| 14 | #include "common/spin_lock.h" | 18 | #include "common/spin_lock.h" |
| 15 | #include "core/arm/arm_interface.h" | 19 | #include "core/arm/arm_interface.h" |
| 16 | #include "core/hle/kernel/k_affinity_mask.h" | 20 | #include "core/hle/kernel/k_affinity_mask.h" |
| 17 | #include "core/hle/kernel/k_synchronization_object.h" | 21 | #include "core/hle/kernel/k_synchronization_object.h" |
| 18 | #include "core/hle/kernel/object.h" | 22 | #include "core/hle/kernel/object.h" |
| 23 | #include "core/hle/kernel/svc_common.h" | ||
| 19 | #include "core/hle/result.h" | 24 | #include "core/hle/result.h" |
| 20 | 25 | ||
| 21 | namespace Common { | 26 | namespace Common { |
| @@ -89,8 +94,6 @@ enum class ThreadState : u16 { | |||
| 89 | InitSuspended = (1 << (4 + SuspendShift)), | 94 | InitSuspended = (1 << (4 + SuspendShift)), |
| 90 | 95 | ||
| 91 | SuspendFlagMask = ((1 << 5) - 1) << SuspendShift, | 96 | SuspendFlagMask = ((1 << 5) - 1) << SuspendShift, |
| 92 | |||
| 93 | HighMask = 0xfff0, | ||
| 94 | }; | 97 | }; |
| 95 | DECLARE_ENUM_FLAG_OPERATORS(ThreadState); | 98 | DECLARE_ENUM_FLAG_OPERATORS(ThreadState); |
| 96 | 99 | ||
| @@ -111,7 +114,10 @@ enum class ThreadSchedFlags : u32 { | |||
| 111 | KernelInitPauseFlag = 1 << 8, | 114 | KernelInitPauseFlag = 1 << 8, |
| 112 | }; | 115 | }; |
| 113 | 116 | ||
| 114 | class Thread final : public KSynchronizationObject { | 117 | class Thread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> { |
| 118 | friend class KScheduler; | ||
| 119 | friend class Process; | ||
| 120 | |||
| 115 | public: | 121 | public: |
| 116 | explicit Thread(KernelCore& kernel); | 122 | explicit Thread(KernelCore& kernel); |
| 117 | ~Thread() override; | 123 | ~Thread() override; |
| @@ -180,49 +186,46 @@ public: | |||
| 180 | * Gets the thread's current priority | 186 | * Gets the thread's current priority |
| 181 | * @return The current thread's priority | 187 | * @return The current thread's priority |
| 182 | */ | 188 | */ |
| 183 | u32 GetPriority() const { | 189 | [[nodiscard]] s32 GetPriority() const { |
| 184 | return current_priority; | 190 | return current_priority; |
| 185 | } | 191 | } |
| 186 | 192 | ||
| 187 | /** | 193 | /** |
| 194 | * Sets the thread's current priority. | ||
| 195 | * @param priority The new priority. | ||
| 196 | */ | ||
| 197 | void SetPriority(s32 priority) { | ||
| 198 | current_priority = priority; | ||
| 199 | } | ||
| 200 | |||
| 201 | /** | ||
| 188 | * Gets the thread's nominal priority. | 202 | * Gets the thread's nominal priority. |
| 189 | * @return The current thread's nominal priority. | 203 | * @return The current thread's nominal priority. |
| 190 | */ | 204 | */ |
| 191 | u32 GetNominalPriority() const { | 205 | [[nodiscard]] s32 GetBasePriority() const { |
| 192 | return nominal_priority; | 206 | return base_priority; |
| 193 | } | 207 | } |
| 194 | 208 | ||
| 195 | /** | 209 | /** |
| 196 | * Sets the thread's current priority | 210 | * Sets the thread's nominal priority. |
| 197 | * @param priority The new priority | 211 | * @param priority The new priority. |
| 198 | */ | 212 | */ |
| 199 | void SetPriority(u32 priority); | 213 | void SetBasePriority(u32 priority); |
| 200 | |||
| 201 | /// Adds a thread to the list of threads that are waiting for a lock held by this thread. | ||
| 202 | void AddMutexWaiter(std::shared_ptr<Thread> thread); | ||
| 203 | |||
| 204 | /// Removes a thread from the list of threads that are waiting for a lock held by this thread. | ||
| 205 | void RemoveMutexWaiter(std::shared_ptr<Thread> thread); | ||
| 206 | |||
| 207 | /// Recalculates the current priority taking into account priority inheritance. | ||
| 208 | void UpdatePriority(); | ||
| 209 | 214 | ||
| 210 | /// Changes the core that the thread is running or scheduled to run on. | 215 | /// Changes the core that the thread is running or scheduled to run on. |
| 211 | ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask); | 216 | [[nodiscard]] ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask); |
| 212 | 217 | ||
| 213 | /** | 218 | /** |
| 214 | * Gets the thread's thread ID | 219 | * Gets the thread's thread ID |
| 215 | * @return The thread's ID | 220 | * @return The thread's ID |
| 216 | */ | 221 | */ |
| 217 | u64 GetThreadID() const { | 222 | [[nodiscard]] u64 GetThreadID() const { |
| 218 | return thread_id; | 223 | return thread_id; |
| 219 | } | 224 | } |
| 220 | 225 | ||
| 221 | /// Resumes a thread from waiting | 226 | /// Resumes a thread from waiting |
| 222 | void Wakeup(); | 227 | void Wakeup(); |
| 223 | 228 | ||
| 224 | void OnWakeUp(); | ||
| 225 | |||
| 226 | ResultCode Start(); | 229 | ResultCode Start(); |
| 227 | 230 | ||
| 228 | virtual bool IsSignaled() const override; | 231 | virtual bool IsSignaled() const override; |
| @@ -242,7 +245,7 @@ public: | |||
| 242 | } | 245 | } |
| 243 | 246 | ||
| 244 | ResultCode GetWaitResult(KSynchronizationObject** out) const { | 247 | ResultCode GetWaitResult(KSynchronizationObject** out) const { |
| 245 | *out = this->signaling_object; | 248 | *out = signaling_object; |
| 246 | return signaling_result; | 249 | return signaling_result; |
| 247 | } | 250 | } |
| 248 | 251 | ||
| @@ -328,18 +331,14 @@ public: | |||
| 328 | return thread_state; | 331 | return thread_state; |
| 329 | } | 332 | } |
| 330 | 333 | ||
| 331 | void SetState(ThreadState new_state); | 334 | void SetState(ThreadState state); |
| 332 | |||
| 333 | void SetWaitingCondVar(bool value) { | ||
| 334 | is_waiting_on_condvar = value; | ||
| 335 | } | ||
| 336 | 335 | ||
| 337 | s64 GetLastScheduledTick() const { | 336 | s64 GetLastScheduledTick() const { |
| 338 | return this->last_scheduled_tick; | 337 | return last_scheduled_tick; |
| 339 | } | 338 | } |
| 340 | 339 | ||
| 341 | void SetLastScheduledTick(s64 tick) { | 340 | void SetLastScheduledTick(s64 tick) { |
| 342 | this->last_scheduled_tick = tick; | 341 | last_scheduled_tick = tick; |
| 343 | } | 342 | } |
| 344 | 343 | ||
| 345 | u64 GetTotalCPUTimeTicks() const { | 344 | u64 GetTotalCPUTimeTicks() const { |
| @@ -379,55 +378,13 @@ public: | |||
| 379 | } | 378 | } |
| 380 | 379 | ||
| 381 | Thread* GetLockOwner() const { | 380 | Thread* GetLockOwner() const { |
| 382 | return lock_owner.get(); | 381 | return lock_owner; |
| 383 | } | ||
| 384 | |||
| 385 | void SetLockOwner(std::shared_ptr<Thread> owner) { | ||
| 386 | lock_owner = std::move(owner); | ||
| 387 | } | ||
| 388 | |||
| 389 | VAddr GetCondVarWaitAddress() const { | ||
| 390 | return condvar_wait_address; | ||
| 391 | } | ||
| 392 | |||
| 393 | void SetCondVarWaitAddress(VAddr address) { | ||
| 394 | condvar_wait_address = address; | ||
| 395 | } | ||
| 396 | |||
| 397 | VAddr GetMutexWaitAddress() const { | ||
| 398 | return mutex_wait_address; | ||
| 399 | } | 382 | } |
| 400 | 383 | ||
| 401 | void SetMutexWaitAddress(VAddr address) { | 384 | void SetLockOwner(Thread* owner) { |
| 402 | mutex_wait_address = address; | 385 | lock_owner = owner; |
| 403 | } | 386 | } |
| 404 | 387 | ||
| 405 | Handle GetWaitHandle() const { | ||
| 406 | return wait_handle; | ||
| 407 | } | ||
| 408 | |||
| 409 | void SetWaitHandle(Handle handle) { | ||
| 410 | wait_handle = handle; | ||
| 411 | } | ||
| 412 | |||
| 413 | VAddr GetArbiterWaitAddress() const { | ||
| 414 | return arb_wait_address; | ||
| 415 | } | ||
| 416 | |||
| 417 | void SetArbiterWaitAddress(VAddr address) { | ||
| 418 | arb_wait_address = address; | ||
| 419 | } | ||
| 420 | |||
| 421 | void SetHLETimeEvent(Handle time_event) { | ||
| 422 | hle_time_event = time_event; | ||
| 423 | } | ||
| 424 | |||
| 425 | Handle GetHLETimeEvent() const { | ||
| 426 | return hle_time_event; | ||
| 427 | } | ||
| 428 | |||
| 429 | bool InvokeHLECallback(std::shared_ptr<Thread> thread); | ||
| 430 | |||
| 431 | u32 GetIdealCore() const { | 388 | u32 GetIdealCore() const { |
| 432 | return ideal_core; | 389 | return ideal_core; |
| 433 | } | 390 | } |
| @@ -442,11 +399,11 @@ public: | |||
| 442 | ResultCode Sleep(s64 nanoseconds); | 399 | ResultCode Sleep(s64 nanoseconds); |
| 443 | 400 | ||
| 444 | s64 GetYieldScheduleCount() const { | 401 | s64 GetYieldScheduleCount() const { |
| 445 | return this->schedule_count; | 402 | return schedule_count; |
| 446 | } | 403 | } |
| 447 | 404 | ||
| 448 | void SetYieldScheduleCount(s64 count) { | 405 | void SetYieldScheduleCount(s64 count) { |
| 449 | this->schedule_count = count; | 406 | schedule_count = count; |
| 450 | } | 407 | } |
| 451 | 408 | ||
| 452 | bool IsRunning() const { | 409 | bool IsRunning() const { |
| @@ -469,14 +426,6 @@ public: | |||
| 469 | return global_handle; | 426 | return global_handle; |
| 470 | } | 427 | } |
| 471 | 428 | ||
| 472 | bool IsWaitingForArbitration() const { | ||
| 473 | return waiting_for_arbitration; | ||
| 474 | } | ||
| 475 | |||
| 476 | void WaitForArbitration(bool set) { | ||
| 477 | waiting_for_arbitration = set; | ||
| 478 | } | ||
| 479 | |||
| 480 | bool IsCancellable() const { | 429 | bool IsCancellable() const { |
| 481 | return is_cancellable; | 430 | return is_cancellable; |
| 482 | } | 431 | } |
| @@ -490,7 +439,7 @@ public: | |||
| 490 | } | 439 | } |
| 491 | 440 | ||
| 492 | bool IsTerminationRequested() const { | 441 | bool IsTerminationRequested() const { |
| 493 | return will_be_terminated || GetState() == ThreadState::Terminated; | 442 | return will_be_terminated || GetRawState() == ThreadState::Terminated; |
| 494 | } | 443 | } |
| 495 | 444 | ||
| 496 | bool IsPaused() const { | 445 | bool IsPaused() const { |
| @@ -522,21 +471,21 @@ public: | |||
| 522 | constexpr QueueEntry() = default; | 471 | constexpr QueueEntry() = default; |
| 523 | 472 | ||
| 524 | constexpr void Initialize() { | 473 | constexpr void Initialize() { |
| 525 | this->prev = nullptr; | 474 | prev = nullptr; |
| 526 | this->next = nullptr; | 475 | next = nullptr; |
| 527 | } | 476 | } |
| 528 | 477 | ||
| 529 | constexpr Thread* GetPrev() const { | 478 | constexpr Thread* GetPrev() const { |
| 530 | return this->prev; | 479 | return prev; |
| 531 | } | 480 | } |
| 532 | constexpr Thread* GetNext() const { | 481 | constexpr Thread* GetNext() const { |
| 533 | return this->next; | 482 | return next; |
| 534 | } | 483 | } |
| 535 | constexpr void SetPrev(Thread* thread) { | 484 | constexpr void SetPrev(Thread* thread) { |
| 536 | this->prev = thread; | 485 | prev = thread; |
| 537 | } | 486 | } |
| 538 | constexpr void SetNext(Thread* thread) { | 487 | constexpr void SetNext(Thread* thread) { |
| 539 | this->next = thread; | 488 | next = thread; |
| 540 | } | 489 | } |
| 541 | 490 | ||
| 542 | private: | 491 | private: |
| @@ -545,11 +494,11 @@ public: | |||
| 545 | }; | 494 | }; |
| 546 | 495 | ||
| 547 | QueueEntry& GetPriorityQueueEntry(s32 core) { | 496 | QueueEntry& GetPriorityQueueEntry(s32 core) { |
| 548 | return this->per_core_priority_queue_entry[core]; | 497 | return per_core_priority_queue_entry[core]; |
| 549 | } | 498 | } |
| 550 | 499 | ||
| 551 | const QueueEntry& GetPriorityQueueEntry(s32 core) const { | 500 | const QueueEntry& GetPriorityQueueEntry(s32 core) const { |
| 552 | return this->per_core_priority_queue_entry[core]; | 501 | return per_core_priority_queue_entry[core]; |
| 553 | } | 502 | } |
| 554 | 503 | ||
| 555 | s32 GetDisableDispatchCount() const { | 504 | s32 GetDisableDispatchCount() const { |
| @@ -566,27 +515,155 @@ public: | |||
| 566 | disable_count--; | 515 | disable_count--; |
| 567 | } | 516 | } |
| 568 | 517 | ||
| 569 | void SetWaitObjectsForDebugging(KSynchronizationObject** objects, s32 num_objects) { | 518 | void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) { |
| 570 | wait_objects_for_debugging.clear(); | 519 | wait_objects_for_debugging.clear(); |
| 571 | wait_objects_for_debugging.reserve(num_objects); | 520 | wait_objects_for_debugging.reserve(objects.size()); |
| 572 | for (auto i = 0; i < num_objects; ++i) { | 521 | for (const auto& object : objects) { |
| 573 | wait_objects_for_debugging.emplace_back(objects[i]); | 522 | wait_objects_for_debugging.emplace_back(object); |
| 574 | } | 523 | } |
| 575 | } | 524 | } |
| 576 | 525 | ||
| 577 | const std::vector<KSynchronizationObject*>& GetWaitObjectsForDebugging() const { | 526 | [[nodiscard]] const std::vector<KSynchronizationObject*>& GetWaitObjectsForDebugging() const { |
| 578 | return wait_objects_for_debugging; | 527 | return wait_objects_for_debugging; |
| 579 | } | 528 | } |
| 580 | 529 | ||
| 530 | void SetMutexWaitAddressForDebugging(VAddr address) { | ||
| 531 | mutex_wait_address_for_debugging = address; | ||
| 532 | } | ||
| 533 | |||
| 534 | [[nodiscard]] VAddr GetMutexWaitAddressForDebugging() const { | ||
| 535 | return mutex_wait_address_for_debugging; | ||
| 536 | } | ||
| 537 | |||
| 538 | void AddWaiter(Thread* thread); | ||
| 539 | |||
| 540 | void RemoveWaiter(Thread* thread); | ||
| 541 | |||
| 542 | [[nodiscard]] Thread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key); | ||
| 543 | |||
| 544 | [[nodiscard]] VAddr GetAddressKey() const { | ||
| 545 | return address_key; | ||
| 546 | } | ||
| 547 | |||
| 548 | [[nodiscard]] u32 GetAddressKeyValue() const { | ||
| 549 | return address_key_value; | ||
| 550 | } | ||
| 551 | |||
| 552 | void SetAddressKey(VAddr key) { | ||
| 553 | address_key = key; | ||
| 554 | } | ||
| 555 | |||
| 556 | void SetAddressKey(VAddr key, u32 val) { | ||
| 557 | address_key = key; | ||
| 558 | address_key_value = val; | ||
| 559 | } | ||
| 560 | |||
| 581 | private: | 561 | private: |
| 582 | friend class GlobalSchedulerContext; | 562 | static constexpr size_t PriorityInheritanceCountMax = 10; |
| 583 | friend class KScheduler; | 563 | union SyncObjectBuffer { |
| 584 | friend class Process; | 564 | std::array<KSynchronizationObject*, Svc::ArgumentHandleCountMax> sync_objects{}; |
| 565 | std::array<Handle, | ||
| 566 | Svc::ArgumentHandleCountMax*(sizeof(KSynchronizationObject*) / sizeof(Handle))> | ||
| 567 | handles; | ||
| 568 | constexpr SyncObjectBuffer() {} | ||
| 569 | }; | ||
| 570 | static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles)); | ||
| 571 | |||
| 572 | struct ConditionVariableComparator { | ||
| 573 | struct LightCompareType { | ||
| 574 | u64 cv_key{}; | ||
| 575 | s32 priority{}; | ||
| 576 | |||
| 577 | [[nodiscard]] constexpr u64 GetConditionVariableKey() const { | ||
| 578 | return cv_key; | ||
| 579 | } | ||
| 580 | |||
| 581 | [[nodiscard]] constexpr s32 GetPriority() const { | ||
| 582 | return priority; | ||
| 583 | } | ||
| 584 | }; | ||
| 585 | |||
| 586 | template <typename T> | ||
| 587 | requires( | ||
| 588 | std::same_as<T, Thread> || | ||
| 589 | std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs, | ||
| 590 | const Thread& rhs) { | ||
| 591 | const uintptr_t l_key = lhs.GetConditionVariableKey(); | ||
| 592 | const uintptr_t r_key = rhs.GetConditionVariableKey(); | ||
| 593 | |||
| 594 | if (l_key < r_key) { | ||
| 595 | // Sort first by key | ||
| 596 | return -1; | ||
| 597 | } else if (l_key == r_key && lhs.GetPriority() < rhs.GetPriority()) { | ||
| 598 | // And then by priority. | ||
| 599 | return -1; | ||
| 600 | } else { | ||
| 601 | return 1; | ||
| 602 | } | ||
| 603 | } | ||
| 604 | }; | ||
| 605 | |||
| 606 | Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{}; | ||
| 607 | |||
| 608 | using ConditionVariableThreadTreeTraits = | ||
| 609 | Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&Thread::condvar_arbiter_tree_node>; | ||
| 610 | using ConditionVariableThreadTree = | ||
| 611 | ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>; | ||
| 612 | |||
| 613 | public: | ||
| 614 | using ConditionVariableThreadTreeType = ConditionVariableThreadTree; | ||
| 585 | 615 | ||
| 586 | void SetSchedulingStatus(ThreadState new_status); | 616 | [[nodiscard]] uintptr_t GetConditionVariableKey() const { |
| 617 | return condvar_key; | ||
| 618 | } | ||
| 619 | |||
| 620 | [[nodiscard]] uintptr_t GetAddressArbiterKey() const { | ||
| 621 | return condvar_key; | ||
| 622 | } | ||
| 623 | |||
| 624 | void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, uintptr_t cv_key, | ||
| 625 | u32 value) { | ||
| 626 | condvar_tree = tree; | ||
| 627 | condvar_key = cv_key; | ||
| 628 | address_key = address; | ||
| 629 | address_key_value = value; | ||
| 630 | } | ||
| 631 | |||
| 632 | void ClearConditionVariable() { | ||
| 633 | condvar_tree = nullptr; | ||
| 634 | } | ||
| 635 | |||
| 636 | [[nodiscard]] bool IsWaitingForConditionVariable() const { | ||
| 637 | return condvar_tree != nullptr; | ||
| 638 | } | ||
| 639 | |||
| 640 | void SetAddressArbiter(ConditionVariableThreadTree* tree, uintptr_t address) { | ||
| 641 | condvar_tree = tree; | ||
| 642 | condvar_key = address; | ||
| 643 | } | ||
| 644 | |||
| 645 | void ClearAddressArbiter() { | ||
| 646 | condvar_tree = nullptr; | ||
| 647 | } | ||
| 648 | |||
| 649 | [[nodiscard]] bool IsWaitingForAddressArbiter() const { | ||
| 650 | return condvar_tree != nullptr; | ||
| 651 | } | ||
| 652 | |||
| 653 | [[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const { | ||
| 654 | return condvar_tree; | ||
| 655 | } | ||
| 656 | |||
| 657 | [[nodiscard]] bool HasWaiters() const { | ||
| 658 | return !waiter_list.empty(); | ||
| 659 | } | ||
| 660 | |||
| 661 | private: | ||
| 587 | void AddSchedulingFlag(ThreadSchedFlags flag); | 662 | void AddSchedulingFlag(ThreadSchedFlags flag); |
| 588 | void RemoveSchedulingFlag(ThreadSchedFlags flag); | 663 | void RemoveSchedulingFlag(ThreadSchedFlags flag); |
| 589 | void SetCurrentPriority(u32 new_priority); | 664 | void AddWaiterImpl(Thread* thread); |
| 665 | void RemoveWaiterImpl(Thread* thread); | ||
| 666 | static void RestorePriority(KernelCore& kernel, Thread* thread); | ||
| 590 | 667 | ||
| 591 | Common::SpinLock context_guard{}; | 668 | Common::SpinLock context_guard{}; |
| 592 | ThreadContext32 context_32{}; | 669 | ThreadContext32 context_32{}; |
| @@ -606,11 +683,11 @@ private: | |||
| 606 | /// Nominal thread priority, as set by the emulated application. | 683 | /// Nominal thread priority, as set by the emulated application. |
| 607 | /// The nominal priority is the thread priority without priority | 684 | /// The nominal priority is the thread priority without priority |
| 608 | /// inheritance taken into account. | 685 | /// inheritance taken into account. |
| 609 | u32 nominal_priority = 0; | 686 | s32 base_priority{}; |
| 610 | 687 | ||
| 611 | /// Current thread priority. This may change over the course of the | 688 | /// Current thread priority. This may change over the course of the |
| 612 | /// thread's lifetime in order to facilitate priority inheritance. | 689 | /// thread's lifetime in order to facilitate priority inheritance. |
| 613 | u32 current_priority = 0; | 690 | s32 current_priority{}; |
| 614 | 691 | ||
| 615 | u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. | 692 | u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. |
| 616 | s64 schedule_count{}; | 693 | s64 schedule_count{}; |
| @@ -628,6 +705,9 @@ private: | |||
| 628 | /// passed to WaitSynchronization. This is used for debugging only. | 705 | /// passed to WaitSynchronization. This is used for debugging only. |
| 629 | std::vector<KSynchronizationObject*> wait_objects_for_debugging; | 706 | std::vector<KSynchronizationObject*> wait_objects_for_debugging; |
| 630 | 707 | ||
| 708 | /// The current mutex wait address. This is used for debugging only. | ||
| 709 | VAddr mutex_wait_address_for_debugging{}; | ||
| 710 | |||
| 631 | KSynchronizationObject* signaling_object; | 711 | KSynchronizationObject* signaling_object; |
| 632 | ResultCode signaling_result{RESULT_SUCCESS}; | 712 | ResultCode signaling_result{RESULT_SUCCESS}; |
| 633 | 713 | ||
| @@ -635,25 +715,11 @@ private: | |||
| 635 | MutexWaitingThreads wait_mutex_threads; | 715 | MutexWaitingThreads wait_mutex_threads; |
| 636 | 716 | ||
| 637 | /// Thread that owns the lock that this thread is waiting for. | 717 | /// Thread that owns the lock that this thread is waiting for. |
| 638 | std::shared_ptr<Thread> lock_owner; | 718 | Thread* lock_owner{}; |
| 639 | |||
| 640 | /// If waiting on a ConditionVariable, this is the ConditionVariable address | ||
| 641 | VAddr condvar_wait_address = 0; | ||
| 642 | bool is_waiting_on_condvar{}; | ||
| 643 | /// If waiting on a Mutex, this is the mutex address | ||
| 644 | VAddr mutex_wait_address = 0; | ||
| 645 | /// The handle used to wait for the mutex. | ||
| 646 | Handle wait_handle = 0; | ||
| 647 | |||
| 648 | /// If waiting for an AddressArbiter, this is the address being waited on. | ||
| 649 | VAddr arb_wait_address{0}; | ||
| 650 | bool waiting_for_arbitration{}; | ||
| 651 | 719 | ||
| 652 | /// Handle used as userdata to reference this object when inserting into the CoreTiming queue. | 720 | /// Handle used as userdata to reference this object when inserting into the CoreTiming queue. |
| 653 | Handle global_handle = 0; | 721 | Handle global_handle = 0; |
| 654 | 722 | ||
| 655 | Handle hle_time_event; | ||
| 656 | |||
| 657 | KScheduler* scheduler = nullptr; | 723 | KScheduler* scheduler = nullptr; |
| 658 | 724 | ||
| 659 | std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{}; | 725 | std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{}; |
| @@ -679,6 +745,16 @@ private: | |||
| 679 | 745 | ||
| 680 | bool signaled{}; | 746 | bool signaled{}; |
| 681 | 747 | ||
| 748 | ConditionVariableThreadTree* condvar_tree{}; | ||
| 749 | uintptr_t condvar_key{}; | ||
| 750 | VAddr address_key{}; | ||
| 751 | u32 address_key_value{}; | ||
| 752 | s32 num_kernel_waiters{}; | ||
| 753 | |||
| 754 | using WaiterList = boost::intrusive::list<Thread>; | ||
| 755 | WaiterList waiter_list{}; | ||
| 756 | WaiterList pinned_waiter_list{}; | ||
| 757 | |||
| 682 | std::string name; | 758 | std::string name; |
| 683 | }; | 759 | }; |
| 684 | 760 | ||
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index b58a76dba..832edd629 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp | |||
| @@ -18,12 +18,10 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} { | |||
| 18 | time_manager_event_type = Core::Timing::CreateEvent( | 18 | time_manager_event_type = Core::Timing::CreateEvent( |
| 19 | "Kernel::TimeManagerCallback", | 19 | "Kernel::TimeManagerCallback", |
| 20 | [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { | 20 | [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { |
| 21 | const KScopedSchedulerLock lock(system.Kernel()); | ||
| 22 | const auto proper_handle = static_cast<Handle>(thread_handle); | ||
| 23 | |||
| 24 | std::shared_ptr<Thread> thread; | 21 | std::shared_ptr<Thread> thread; |
| 25 | { | 22 | { |
| 26 | std::lock_guard lock{mutex}; | 23 | std::lock_guard lock{mutex}; |
| 24 | const auto proper_handle = static_cast<Handle>(thread_handle); | ||
| 27 | if (cancelled_events[proper_handle]) { | 25 | if (cancelled_events[proper_handle]) { |
| 28 | return; | 26 | return; |
| 29 | } | 27 | } |
| @@ -32,7 +30,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} { | |||
| 32 | 30 | ||
| 33 | if (thread) { | 31 | if (thread) { |
| 34 | // Thread can be null if process has exited | 32 | // Thread can be null if process has exited |
| 35 | thread->OnWakeUp(); | 33 | thread->Wakeup(); |
| 36 | } | 34 | } |
| 37 | }); | 35 | }); |
| 38 | } | 36 | } |
diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp index acf6b7ab5..3ded85720 100644 --- a/src/yuzu/debugger/wait_tree.cpp +++ b/src/yuzu/debugger/wait_tree.cpp | |||
| @@ -15,9 +15,9 @@ | |||
| 15 | #include "core/hle/kernel/handle_table.h" | 15 | #include "core/hle/kernel/handle_table.h" |
| 16 | #include "core/hle/kernel/k_scheduler.h" | 16 | #include "core/hle/kernel/k_scheduler.h" |
| 17 | #include "core/hle/kernel/k_synchronization_object.h" | 17 | #include "core/hle/kernel/k_synchronization_object.h" |
| 18 | #include "core/hle/kernel/mutex.h" | ||
| 19 | #include "core/hle/kernel/process.h" | 18 | #include "core/hle/kernel/process.h" |
| 20 | #include "core/hle/kernel/readable_event.h" | 19 | #include "core/hle/kernel/readable_event.h" |
| 20 | #include "core/hle/kernel/svc_common.h" | ||
| 21 | #include "core/hle/kernel/thread.h" | 21 | #include "core/hle/kernel/thread.h" |
| 22 | #include "core/memory.h" | 22 | #include "core/memory.h" |
| 23 | 23 | ||
| @@ -116,7 +116,7 @@ QString WaitTreeText::GetText() const { | |||
| 116 | WaitTreeMutexInfo::WaitTreeMutexInfo(VAddr mutex_address, const Kernel::HandleTable& handle_table) | 116 | WaitTreeMutexInfo::WaitTreeMutexInfo(VAddr mutex_address, const Kernel::HandleTable& handle_table) |
| 117 | : mutex_address(mutex_address) { | 117 | : mutex_address(mutex_address) { |
| 118 | mutex_value = Core::System::GetInstance().Memory().Read32(mutex_address); | 118 | mutex_value = Core::System::GetInstance().Memory().Read32(mutex_address); |
| 119 | owner_handle = static_cast<Kernel::Handle>(mutex_value & Kernel::Mutex::MutexOwnerMask); | 119 | owner_handle = static_cast<Kernel::Handle>(mutex_value & Kernel::Svc::HandleWaitMask); |
| 120 | owner = handle_table.Get<Kernel::Thread>(owner_handle); | 120 | owner = handle_table.Get<Kernel::Thread>(owner_handle); |
| 121 | } | 121 | } |
| 122 | 122 | ||
| @@ -127,7 +127,7 @@ QString WaitTreeMutexInfo::GetText() const { | |||
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutexInfo::GetChildren() const { | 129 | std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutexInfo::GetChildren() const { |
| 130 | const bool has_waiters = (mutex_value & Kernel::Mutex::MutexHasWaitersFlag) != 0; | 130 | const bool has_waiters = (mutex_value & Kernel::Svc::HandleWaitMask) != 0; |
| 131 | 131 | ||
| 132 | std::vector<std::unique_ptr<WaitTreeItem>> list; | 132 | std::vector<std::unique_ptr<WaitTreeItem>> list; |
| 133 | list.push_back(std::make_unique<WaitTreeText>(tr("has waiters: %1").arg(has_waiters))); | 133 | list.push_back(std::make_unique<WaitTreeText>(tr("has waiters: %1").arg(has_waiters))); |
| @@ -324,11 +324,11 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const { | |||
| 324 | list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID()))); | 324 | list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID()))); |
| 325 | list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)") | 325 | list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)") |
| 326 | .arg(thread.GetPriority()) | 326 | .arg(thread.GetPriority()) |
| 327 | .arg(thread.GetNominalPriority()))); | 327 | .arg(thread.GetBasePriority()))); |
| 328 | list.push_back(std::make_unique<WaitTreeText>( | 328 | list.push_back(std::make_unique<WaitTreeText>( |
| 329 | tr("last running ticks = %1").arg(thread.GetLastScheduledTick()))); | 329 | tr("last running ticks = %1").arg(thread.GetLastScheduledTick()))); |
| 330 | 330 | ||
| 331 | const VAddr mutex_wait_address = thread.GetMutexWaitAddress(); | 331 | const VAddr mutex_wait_address = thread.GetMutexWaitAddressForDebugging(); |
| 332 | if (mutex_wait_address != 0) { | 332 | if (mutex_wait_address != 0) { |
| 333 | const auto& handle_table = thread.GetOwnerProcess()->GetHandleTable(); | 333 | const auto& handle_table = thread.GetOwnerProcess()->GetHandleTable(); |
| 334 | list.push_back(std::make_unique<WaitTreeMutexInfo>(mutex_wait_address, handle_table)); | 334 | list.push_back(std::make_unique<WaitTreeMutexInfo>(mutex_wait_address, handle_table)); |