diff options
Diffstat (limited to 'src/core/hle/kernel')
26 files changed, 2053 insertions, 1054 deletions
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index 8475b698c..4d2a9b35d 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp | |||
| @@ -7,11 +7,15 @@ | |||
| 7 | 7 | ||
| 8 | #include "common/assert.h" | 8 | #include "common/assert.h" |
| 9 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 10 | #include "core/arm/exclusive_monitor.h" | ||
| 10 | #include "core/core.h" | 11 | #include "core/core.h" |
| 11 | #include "core/hle/kernel/address_arbiter.h" | 12 | #include "core/hle/kernel/address_arbiter.h" |
| 12 | #include "core/hle/kernel/errors.h" | 13 | #include "core/hle/kernel/errors.h" |
| 14 | #include "core/hle/kernel/handle_table.h" | ||
| 15 | #include "core/hle/kernel/kernel.h" | ||
| 13 | #include "core/hle/kernel/scheduler.h" | 16 | #include "core/hle/kernel/scheduler.h" |
| 14 | #include "core/hle/kernel/thread.h" | 17 | #include "core/hle/kernel/thread.h" |
| 18 | #include "core/hle/kernel/time_manager.h" | ||
| 15 | #include "core/hle/result.h" | 19 | #include "core/hle/result.h" |
| 16 | #include "core/memory.h" | 20 | #include "core/memory.h" |
| 17 | 21 | ||
| @@ -20,6 +24,7 @@ namespace Kernel { | |||
| 20 | // Wake up num_to_wake (or all) threads in a vector. | 24 | // Wake up num_to_wake (or all) threads in a vector. |
| 21 | void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, | 25 | void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, |
| 22 | s32 num_to_wake) { | 26 | s32 num_to_wake) { |
| 27 | auto& time_manager = system.Kernel().TimeManager(); | ||
| 23 | // Only process up to 'target' threads, unless 'target' is <= 0, in which case process | 28 | // Only process up to 'target' threads, unless 'target' is <= 0, in which case process |
| 24 | // them all. | 29 | // them all. |
| 25 | std::size_t last = waiting_threads.size(); | 30 | std::size_t last = waiting_threads.size(); |
| @@ -29,12 +34,10 @@ void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& wai | |||
| 29 | 34 | ||
| 30 | // Signal the waiting threads. | 35 | // Signal the waiting threads. |
| 31 | for (std::size_t i = 0; i < last; i++) { | 36 | for (std::size_t i = 0; i < last; i++) { |
| 32 | ASSERT(waiting_threads[i]->GetStatus() == ThreadStatus::WaitArb); | 37 | waiting_threads[i]->SetSynchronizationResults(nullptr, RESULT_SUCCESS); |
| 33 | waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS); | ||
| 34 | RemoveThread(waiting_threads[i]); | 38 | RemoveThread(waiting_threads[i]); |
| 35 | waiting_threads[i]->SetArbiterWaitAddress(0); | 39 | waiting_threads[i]->WaitForArbitration(false); |
| 36 | waiting_threads[i]->ResumeFromWait(); | 40 | waiting_threads[i]->ResumeFromWait(); |
| 37 | system.PrepareReschedule(waiting_threads[i]->GetProcessorID()); | ||
| 38 | } | 41 | } |
| 39 | } | 42 | } |
| 40 | 43 | ||
| @@ -56,6 +59,7 @@ ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 v | |||
| 56 | } | 59 | } |
| 57 | 60 | ||
| 58 | ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) { | 61 | ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) { |
| 62 | SchedulerLock lock(system.Kernel()); | ||
| 59 | const std::vector<std::shared_ptr<Thread>> waiting_threads = | 63 | const std::vector<std::shared_ptr<Thread>> waiting_threads = |
| 60 | GetThreadsWaitingOnAddress(address); | 64 | GetThreadsWaitingOnAddress(address); |
| 61 | WakeThreads(waiting_threads, num_to_wake); | 65 | WakeThreads(waiting_threads, num_to_wake); |
| @@ -64,6 +68,7 @@ ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) { | |||
| 64 | 68 | ||
| 65 | ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, | 69 | ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, |
| 66 | s32 num_to_wake) { | 70 | s32 num_to_wake) { |
| 71 | SchedulerLock lock(system.Kernel()); | ||
| 67 | auto& memory = system.Memory(); | 72 | auto& memory = system.Memory(); |
| 68 | 73 | ||
| 69 | // Ensure that we can write to the address. | 74 | // Ensure that we can write to the address. |
| @@ -71,16 +76,24 @@ ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 | |||
| 71 | return ERR_INVALID_ADDRESS_STATE; | 76 | return ERR_INVALID_ADDRESS_STATE; |
| 72 | } | 77 | } |
| 73 | 78 | ||
| 74 | if (static_cast<s32>(memory.Read32(address)) != value) { | 79 | const std::size_t current_core = system.CurrentCoreIndex(); |
| 75 | return ERR_INVALID_STATE; | 80 | auto& monitor = system.Monitor(); |
| 76 | } | 81 | u32 current_value; |
| 82 | do { | ||
| 83 | current_value = monitor.ExclusiveRead32(current_core, address); | ||
| 84 | |||
| 85 | if (current_value != value) { | ||
| 86 | return ERR_INVALID_STATE; | ||
| 87 | } | ||
| 88 | current_value++; | ||
| 89 | } while (!monitor.ExclusiveWrite32(current_core, address, current_value)); | ||
| 77 | 90 | ||
| 78 | memory.Write32(address, static_cast<u32>(value + 1)); | ||
| 79 | return SignalToAddressOnly(address, num_to_wake); | 91 | return SignalToAddressOnly(address, num_to_wake); |
| 80 | } | 92 | } |
| 81 | 93 | ||
| 82 | ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, | 94 | ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, |
| 83 | s32 num_to_wake) { | 95 | s32 num_to_wake) { |
| 96 | SchedulerLock lock(system.Kernel()); | ||
| 84 | auto& memory = system.Memory(); | 97 | auto& memory = system.Memory(); |
| 85 | 98 | ||
| 86 | // Ensure that we can write to the address. | 99 | // Ensure that we can write to the address. |
| @@ -92,29 +105,33 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a | |||
| 92 | const std::vector<std::shared_ptr<Thread>> waiting_threads = | 105 | const std::vector<std::shared_ptr<Thread>> waiting_threads = |
| 93 | GetThreadsWaitingOnAddress(address); | 106 | GetThreadsWaitingOnAddress(address); |
| 94 | 107 | ||
| 95 | // Determine the modified value depending on the waiting count. | 108 | const std::size_t current_core = system.CurrentCoreIndex(); |
| 109 | auto& monitor = system.Monitor(); | ||
| 96 | s32 updated_value; | 110 | s32 updated_value; |
| 97 | if (num_to_wake <= 0) { | 111 | do { |
| 98 | if (waiting_threads.empty()) { | 112 | updated_value = monitor.ExclusiveRead32(current_core, address); |
| 99 | updated_value = value + 1; | 113 | |
| 100 | } else { | 114 | if (updated_value != value) { |
| 101 | updated_value = value - 1; | 115 | return ERR_INVALID_STATE; |
| 102 | } | 116 | } |
| 103 | } else { | 117 | // Determine the modified value depending on the waiting count. |
| 104 | if (waiting_threads.empty()) { | 118 | if (num_to_wake <= 0) { |
| 105 | updated_value = value + 1; | 119 | if (waiting_threads.empty()) { |
| 106 | } else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) { | 120 | updated_value = value + 1; |
| 107 | updated_value = value - 1; | 121 | } else { |
| 122 | updated_value = value - 1; | ||
| 123 | } | ||
| 108 | } else { | 124 | } else { |
| 109 | updated_value = value; | 125 | if (waiting_threads.empty()) { |
| 126 | updated_value = value + 1; | ||
| 127 | } else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) { | ||
| 128 | updated_value = value - 1; | ||
| 129 | } else { | ||
| 130 | updated_value = value; | ||
| 131 | } | ||
| 110 | } | 132 | } |
| 111 | } | 133 | } while (!monitor.ExclusiveWrite32(current_core, address, updated_value)); |
| 112 | 134 | ||
| 113 | if (static_cast<s32>(memory.Read32(address)) != value) { | ||
| 114 | return ERR_INVALID_STATE; | ||
| 115 | } | ||
| 116 | |||
| 117 | memory.Write32(address, static_cast<u32>(updated_value)); | ||
| 118 | WakeThreads(waiting_threads, num_to_wake); | 135 | WakeThreads(waiting_threads, num_to_wake); |
| 119 | return RESULT_SUCCESS; | 136 | return RESULT_SUCCESS; |
| 120 | } | 137 | } |
| @@ -136,60 +153,127 @@ ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s | |||
| 136 | ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, | 153 | ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout, |
| 137 | bool should_decrement) { | 154 | bool should_decrement) { |
| 138 | auto& memory = system.Memory(); | 155 | auto& memory = system.Memory(); |
| 156 | auto& kernel = system.Kernel(); | ||
| 157 | Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); | ||
| 139 | 158 | ||
| 140 | // Ensure that we can read the address. | 159 | Handle event_handle = InvalidHandle; |
| 141 | if (!memory.IsValidVirtualAddress(address)) { | 160 | { |
| 142 | return ERR_INVALID_ADDRESS_STATE; | 161 | SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); |
| 143 | } | 162 | |
| 163 | if (current_thread->IsPendingTermination()) { | ||
| 164 | lock.CancelSleep(); | ||
| 165 | return ERR_THREAD_TERMINATING; | ||
| 166 | } | ||
| 167 | |||
| 168 | // Ensure that we can read the address. | ||
| 169 | if (!memory.IsValidVirtualAddress(address)) { | ||
| 170 | lock.CancelSleep(); | ||
| 171 | return ERR_INVALID_ADDRESS_STATE; | ||
| 172 | } | ||
| 173 | |||
| 174 | s32 current_value = static_cast<s32>(memory.Read32(address)); | ||
| 175 | if (current_value >= value) { | ||
| 176 | lock.CancelSleep(); | ||
| 177 | return ERR_INVALID_STATE; | ||
| 178 | } | ||
| 179 | |||
| 180 | current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); | ||
| 181 | |||
| 182 | s32 decrement_value; | ||
| 183 | |||
| 184 | const std::size_t current_core = system.CurrentCoreIndex(); | ||
| 185 | auto& monitor = system.Monitor(); | ||
| 186 | do { | ||
| 187 | current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address)); | ||
| 188 | if (should_decrement) { | ||
| 189 | decrement_value = current_value - 1; | ||
| 190 | } else { | ||
| 191 | decrement_value = current_value; | ||
| 192 | } | ||
| 193 | } while ( | ||
| 194 | !monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value))); | ||
| 195 | |||
| 196 | // Short-circuit without rescheduling, if timeout is zero. | ||
| 197 | if (timeout == 0) { | ||
| 198 | lock.CancelSleep(); | ||
| 199 | return RESULT_TIMEOUT; | ||
| 200 | } | ||
| 144 | 201 | ||
| 145 | const s32 cur_value = static_cast<s32>(memory.Read32(address)); | 202 | current_thread->SetArbiterWaitAddress(address); |
| 146 | if (cur_value >= value) { | 203 | InsertThread(SharedFrom(current_thread)); |
| 147 | return ERR_INVALID_STATE; | 204 | current_thread->SetStatus(ThreadStatus::WaitArb); |
| 205 | current_thread->WaitForArbitration(true); | ||
| 148 | } | 206 | } |
| 149 | 207 | ||
| 150 | if (should_decrement) { | 208 | if (event_handle != InvalidHandle) { |
| 151 | memory.Write32(address, static_cast<u32>(cur_value - 1)); | 209 | auto& time_manager = kernel.TimeManager(); |
| 210 | time_manager.UnscheduleTimeEvent(event_handle); | ||
| 152 | } | 211 | } |
| 153 | 212 | ||
| 154 | // Short-circuit without rescheduling, if timeout is zero. | 213 | { |
| 155 | if (timeout == 0) { | 214 | SchedulerLock lock(kernel); |
| 156 | return RESULT_TIMEOUT; | 215 | if (current_thread->IsWaitingForArbitration()) { |
| 216 | RemoveThread(SharedFrom(current_thread)); | ||
| 217 | current_thread->WaitForArbitration(false); | ||
| 218 | } | ||
| 157 | } | 219 | } |
| 158 | 220 | ||
| 159 | return WaitForAddressImpl(address, timeout); | 221 | return current_thread->GetSignalingResult(); |
| 160 | } | 222 | } |
| 161 | 223 | ||
| 162 | ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { | 224 | ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { |
| 163 | auto& memory = system.Memory(); | 225 | auto& memory = system.Memory(); |
| 226 | auto& kernel = system.Kernel(); | ||
| 227 | Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); | ||
| 164 | 228 | ||
| 165 | // Ensure that we can read the address. | 229 | Handle event_handle = InvalidHandle; |
| 166 | if (!memory.IsValidVirtualAddress(address)) { | 230 | { |
| 167 | return ERR_INVALID_ADDRESS_STATE; | 231 | SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); |
| 168 | } | 232 | |
| 233 | if (current_thread->IsPendingTermination()) { | ||
| 234 | lock.CancelSleep(); | ||
| 235 | return ERR_THREAD_TERMINATING; | ||
| 236 | } | ||
| 237 | |||
| 238 | // Ensure that we can read the address. | ||
| 239 | if (!memory.IsValidVirtualAddress(address)) { | ||
| 240 | lock.CancelSleep(); | ||
| 241 | return ERR_INVALID_ADDRESS_STATE; | ||
| 242 | } | ||
| 169 | 243 | ||
| 170 | // Only wait for the address if equal. | 244 | s32 current_value = static_cast<s32>(memory.Read32(address)); |
| 171 | if (static_cast<s32>(memory.Read32(address)) != value) { | 245 | if (current_value != value) { |
| 172 | return ERR_INVALID_STATE; | 246 | lock.CancelSleep(); |
| 247 | return ERR_INVALID_STATE; | ||
| 248 | } | ||
| 249 | |||
| 250 | // Short-circuit without rescheduling, if timeout is zero. | ||
| 251 | if (timeout == 0) { | ||
| 252 | lock.CancelSleep(); | ||
| 253 | return RESULT_TIMEOUT; | ||
| 254 | } | ||
| 255 | |||
| 256 | current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); | ||
| 257 | current_thread->SetArbiterWaitAddress(address); | ||
| 258 | InsertThread(SharedFrom(current_thread)); | ||
| 259 | current_thread->SetStatus(ThreadStatus::WaitArb); | ||
| 260 | current_thread->WaitForArbitration(true); | ||
| 173 | } | 261 | } |
| 174 | 262 | ||
| 175 | // Short-circuit without rescheduling if timeout is zero. | 263 | if (event_handle != InvalidHandle) { |
| 176 | if (timeout == 0) { | 264 | auto& time_manager = kernel.TimeManager(); |
| 177 | return RESULT_TIMEOUT; | 265 | time_manager.UnscheduleTimeEvent(event_handle); |
| 178 | } | 266 | } |
| 179 | 267 | ||
| 180 | return WaitForAddressImpl(address, timeout); | 268 | { |
| 181 | } | 269 | SchedulerLock lock(kernel); |
| 270 | if (current_thread->IsWaitingForArbitration()) { | ||
| 271 | RemoveThread(SharedFrom(current_thread)); | ||
| 272 | current_thread->WaitForArbitration(false); | ||
| 273 | } | ||
| 274 | } | ||
| 182 | 275 | ||
| 183 | ResultCode AddressArbiter::WaitForAddressImpl(VAddr address, s64 timeout) { | 276 | return current_thread->GetSignalingResult(); |
| 184 | Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); | ||
| 185 | current_thread->SetArbiterWaitAddress(address); | ||
| 186 | InsertThread(SharedFrom(current_thread)); | ||
| 187 | current_thread->SetStatus(ThreadStatus::WaitArb); | ||
| 188 | current_thread->InvalidateWakeupCallback(); | ||
| 189 | current_thread->WakeAfterDelay(timeout); | ||
| 190 | |||
| 191 | system.PrepareReschedule(current_thread->GetProcessorID()); | ||
| 192 | return RESULT_TIMEOUT; | ||
| 193 | } | 277 | } |
| 194 | 278 | ||
| 195 | void AddressArbiter::HandleWakeupThread(std::shared_ptr<Thread> thread) { | 279 | void AddressArbiter::HandleWakeupThread(std::shared_ptr<Thread> thread) { |
| @@ -221,9 +305,9 @@ void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) { | |||
| 221 | const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(), | 305 | const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(), |
| 222 | [&thread](const auto& entry) { return thread == entry; }); | 306 | [&thread](const auto& entry) { return thread == entry; }); |
| 223 | 307 | ||
| 224 | ASSERT(iter != thread_list.cend()); | 308 | if (iter != thread_list.cend()) { |
| 225 | 309 | thread_list.erase(iter); | |
| 226 | thread_list.erase(iter); | 310 | } |
| 227 | } | 311 | } |
| 228 | 312 | ||
| 229 | std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress( | 313 | std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress( |
diff --git a/src/core/hle/kernel/address_arbiter.h b/src/core/hle/kernel/address_arbiter.h index f958eee5a..0b05d533c 100644 --- a/src/core/hle/kernel/address_arbiter.h +++ b/src/core/hle/kernel/address_arbiter.h | |||
| @@ -73,9 +73,6 @@ private: | |||
| 73 | /// Waits on an address if the value passed is equal to the argument value. | 73 | /// Waits on an address if the value passed is equal to the argument value. |
| 74 | ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout); | 74 | ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout); |
| 75 | 75 | ||
| 76 | // Waits on the given address with a timeout in nanoseconds | ||
| 77 | ResultCode WaitForAddressImpl(VAddr address, s64 timeout); | ||
| 78 | |||
| 79 | /// Wake up num_to_wake (or all) threads in a vector. | 76 | /// Wake up num_to_wake (or all) threads in a vector. |
| 80 | void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake); | 77 | void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake); |
| 81 | 78 | ||
diff --git a/src/core/hle/kernel/client_port.cpp b/src/core/hle/kernel/client_port.cpp index 5498fd313..8aff2227a 100644 --- a/src/core/hle/kernel/client_port.cpp +++ b/src/core/hle/kernel/client_port.cpp | |||
| @@ -34,7 +34,7 @@ ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() { | |||
| 34 | } | 34 | } |
| 35 | 35 | ||
| 36 | // Wake the threads waiting on the ServerPort | 36 | // Wake the threads waiting on the ServerPort |
| 37 | server_port->WakeupAllWaitingThreads(); | 37 | server_port->Signal(); |
| 38 | 38 | ||
| 39 | return MakeResult(std::move(client)); | 39 | return MakeResult(std::move(client)); |
| 40 | } | 40 | } |
diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h index 29bfa3621..d4e5d88cf 100644 --- a/src/core/hle/kernel/errors.h +++ b/src/core/hle/kernel/errors.h | |||
| @@ -12,6 +12,7 @@ namespace Kernel { | |||
| 12 | 12 | ||
| 13 | constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7}; | 13 | constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7}; |
| 14 | constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14}; | 14 | constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14}; |
| 15 | constexpr ResultCode ERR_THREAD_TERMINATING{ErrorModule::Kernel, 59}; | ||
| 15 | constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101}; | 16 | constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101}; |
| 16 | constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102}; | 17 | constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102}; |
| 17 | constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103}; | 18 | constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103}; |
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index ba0eac4c2..9277b5d08 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp | |||
| @@ -14,14 +14,17 @@ | |||
| 14 | #include "common/common_types.h" | 14 | #include "common/common_types.h" |
| 15 | #include "common/logging/log.h" | 15 | #include "common/logging/log.h" |
| 16 | #include "core/hle/ipc_helpers.h" | 16 | #include "core/hle/ipc_helpers.h" |
| 17 | #include "core/hle/kernel/errors.h" | ||
| 17 | #include "core/hle/kernel/handle_table.h" | 18 | #include "core/hle/kernel/handle_table.h" |
| 18 | #include "core/hle/kernel/hle_ipc.h" | 19 | #include "core/hle/kernel/hle_ipc.h" |
| 19 | #include "core/hle/kernel/kernel.h" | 20 | #include "core/hle/kernel/kernel.h" |
| 20 | #include "core/hle/kernel/object.h" | 21 | #include "core/hle/kernel/object.h" |
| 21 | #include "core/hle/kernel/process.h" | 22 | #include "core/hle/kernel/process.h" |
| 22 | #include "core/hle/kernel/readable_event.h" | 23 | #include "core/hle/kernel/readable_event.h" |
| 24 | #include "core/hle/kernel/scheduler.h" | ||
| 23 | #include "core/hle/kernel/server_session.h" | 25 | #include "core/hle/kernel/server_session.h" |
| 24 | #include "core/hle/kernel/thread.h" | 26 | #include "core/hle/kernel/thread.h" |
| 27 | #include "core/hle/kernel/time_manager.h" | ||
| 25 | #include "core/hle/kernel/writable_event.h" | 28 | #include "core/hle/kernel/writable_event.h" |
| 26 | #include "core/memory.h" | 29 | #include "core/memory.h" |
| 27 | 30 | ||
| @@ -46,15 +49,6 @@ std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread( | |||
| 46 | const std::string& reason, u64 timeout, WakeupCallback&& callback, | 49 | const std::string& reason, u64 timeout, WakeupCallback&& callback, |
| 47 | std::shared_ptr<WritableEvent> writable_event) { | 50 | std::shared_ptr<WritableEvent> writable_event) { |
| 48 | // Put the client thread to sleep until the wait event is signaled or the timeout expires. | 51 | // Put the client thread to sleep until the wait event is signaled or the timeout expires. |
| 49 | thread->SetWakeupCallback( | ||
| 50 | [context = *this, callback](ThreadWakeupReason reason, std::shared_ptr<Thread> thread, | ||
| 51 | std::shared_ptr<SynchronizationObject> object, | ||
| 52 | std::size_t index) mutable -> bool { | ||
| 53 | ASSERT(thread->GetStatus() == ThreadStatus::WaitHLEEvent); | ||
| 54 | callback(thread, context, reason); | ||
| 55 | context.WriteToOutgoingCommandBuffer(*thread); | ||
| 56 | return true; | ||
| 57 | }); | ||
| 58 | 52 | ||
| 59 | if (!writable_event) { | 53 | if (!writable_event) { |
| 60 | // Create event if not provided | 54 | // Create event if not provided |
| @@ -62,14 +56,26 @@ std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread( | |||
| 62 | writable_event = pair.writable; | 56 | writable_event = pair.writable; |
| 63 | } | 57 | } |
| 64 | 58 | ||
| 65 | const auto readable_event{writable_event->GetReadableEvent()}; | 59 | { |
| 66 | writable_event->Clear(); | 60 | Handle event_handle = InvalidHandle; |
| 67 | thread->SetStatus(ThreadStatus::WaitHLEEvent); | 61 | SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout); |
| 68 | thread->SetSynchronizationObjects({readable_event}); | 62 | thread->SetHLECallback( |
| 69 | readable_event->AddWaitingThread(thread); | 63 | [context = *this, callback](std::shared_ptr<Thread> thread) mutable -> bool { |
| 70 | 64 | ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT | |
| 71 | if (timeout > 0) { | 65 | ? ThreadWakeupReason::Timeout |
| 72 | thread->WakeAfterDelay(timeout); | 66 | : ThreadWakeupReason::Signal; |
| 67 | callback(thread, context, reason); | ||
| 68 | context.WriteToOutgoingCommandBuffer(*thread); | ||
| 69 | return true; | ||
| 70 | }); | ||
| 71 | const auto readable_event{writable_event->GetReadableEvent()}; | ||
| 72 | writable_event->Clear(); | ||
| 73 | thread->SetHLESyncObject(readable_event.get()); | ||
| 74 | thread->SetStatus(ThreadStatus::WaitHLEEvent); | ||
| 75 | thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); | ||
| 76 | readable_event->AddWaitingThread(thread); | ||
| 77 | lock.Release(); | ||
| 78 | thread->SetHLETimeEvent(event_handle); | ||
| 73 | } | 79 | } |
| 74 | 80 | ||
| 75 | is_thread_waiting = true; | 81 | is_thread_waiting = true; |
| @@ -282,18 +288,18 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) { | |||
| 282 | } | 288 | } |
| 283 | 289 | ||
| 284 | std::vector<u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const { | 290 | std::vector<u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const { |
| 285 | std::vector<u8> buffer; | 291 | std::vector<u8> buffer{}; |
| 286 | const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && | 292 | const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && |
| 287 | BufferDescriptorA()[buffer_index].Size()}; | 293 | BufferDescriptorA()[buffer_index].Size()}; |
| 288 | 294 | ||
| 289 | if (is_buffer_a) { | 295 | if (is_buffer_a) { |
| 290 | ASSERT_MSG(BufferDescriptorA().size() > buffer_index, | 296 | ASSERT_OR_EXECUTE_MSG(BufferDescriptorA().size() > buffer_index, { return buffer; }, |
| 291 | "BufferDescriptorA invalid buffer_index {}", buffer_index); | 297 | "BufferDescriptorA invalid buffer_index {}", buffer_index); |
| 292 | buffer.resize(BufferDescriptorA()[buffer_index].Size()); | 298 | buffer.resize(BufferDescriptorA()[buffer_index].Size()); |
| 293 | memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), buffer.data(), buffer.size()); | 299 | memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), buffer.data(), buffer.size()); |
| 294 | } else { | 300 | } else { |
| 295 | ASSERT_MSG(BufferDescriptorX().size() > buffer_index, | 301 | ASSERT_OR_EXECUTE_MSG(BufferDescriptorX().size() > buffer_index, { return buffer; }, |
| 296 | "BufferDescriptorX invalid buffer_index {}", buffer_index); | 302 | "BufferDescriptorX invalid buffer_index {}", buffer_index); |
| 297 | buffer.resize(BufferDescriptorX()[buffer_index].Size()); | 303 | buffer.resize(BufferDescriptorX()[buffer_index].Size()); |
| 298 | memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), buffer.data(), buffer.size()); | 304 | memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), buffer.data(), buffer.size()); |
| 299 | } | 305 | } |
| @@ -318,16 +324,16 @@ std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size, | |||
| 318 | } | 324 | } |
| 319 | 325 | ||
| 320 | if (is_buffer_b) { | 326 | if (is_buffer_b) { |
| 321 | ASSERT_MSG(BufferDescriptorB().size() > buffer_index, | 327 | ASSERT_OR_EXECUTE_MSG(BufferDescriptorB().size() > buffer_index && |
| 322 | "BufferDescriptorB invalid buffer_index {}", buffer_index); | 328 | BufferDescriptorB()[buffer_index].Size() >= size, |
| 323 | ASSERT_MSG(BufferDescriptorB()[buffer_index].Size() >= size, | 329 | { return 0; }, "BufferDescriptorB is invalid, index={}, size={}", |
| 324 | "BufferDescriptorB buffer_index {} is not large enough", buffer_index); | 330 | buffer_index, size); |
| 325 | memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size); | 331 | memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size); |
| 326 | } else { | 332 | } else { |
| 327 | ASSERT_MSG(BufferDescriptorC().size() > buffer_index, | 333 | ASSERT_OR_EXECUTE_MSG(BufferDescriptorC().size() > buffer_index && |
| 328 | "BufferDescriptorC invalid buffer_index {}", buffer_index); | 334 | BufferDescriptorC()[buffer_index].Size() >= size, |
| 329 | ASSERT_MSG(BufferDescriptorC()[buffer_index].Size() >= size, | 335 | { return 0; }, "BufferDescriptorC is invalid, index={}, size={}", |
| 330 | "BufferDescriptorC buffer_index {} is not large enough", buffer_index); | 336 | buffer_index, size); |
| 331 | memory.WriteBlock(BufferDescriptorC()[buffer_index].Address(), buffer, size); | 337 | memory.WriteBlock(BufferDescriptorC()[buffer_index].Address(), buffer, size); |
| 332 | } | 338 | } |
| 333 | 339 | ||
| @@ -338,16 +344,12 @@ std::size_t HLERequestContext::GetReadBufferSize(std::size_t buffer_index) const | |||
| 338 | const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && | 344 | const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && |
| 339 | BufferDescriptorA()[buffer_index].Size()}; | 345 | BufferDescriptorA()[buffer_index].Size()}; |
| 340 | if (is_buffer_a) { | 346 | if (is_buffer_a) { |
| 341 | ASSERT_MSG(BufferDescriptorA().size() > buffer_index, | 347 | ASSERT_OR_EXECUTE_MSG(BufferDescriptorA().size() > buffer_index, { return 0; }, |
| 342 | "BufferDescriptorA invalid buffer_index {}", buffer_index); | 348 | "BufferDescriptorA invalid buffer_index {}", buffer_index); |
| 343 | ASSERT_MSG(BufferDescriptorA()[buffer_index].Size() > 0, | ||
| 344 | "BufferDescriptorA buffer_index {} is empty", buffer_index); | ||
| 345 | return BufferDescriptorA()[buffer_index].Size(); | 349 | return BufferDescriptorA()[buffer_index].Size(); |
| 346 | } else { | 350 | } else { |
| 347 | ASSERT_MSG(BufferDescriptorX().size() > buffer_index, | 351 | ASSERT_OR_EXECUTE_MSG(BufferDescriptorX().size() > buffer_index, { return 0; }, |
| 348 | "BufferDescriptorX invalid buffer_index {}", buffer_index); | 352 | "BufferDescriptorX invalid buffer_index {}", buffer_index); |
| 349 | ASSERT_MSG(BufferDescriptorX()[buffer_index].Size() > 0, | ||
| 350 | "BufferDescriptorX buffer_index {} is empty", buffer_index); | ||
| 351 | return BufferDescriptorX()[buffer_index].Size(); | 353 | return BufferDescriptorX()[buffer_index].Size(); |
| 352 | } | 354 | } |
| 353 | } | 355 | } |
| @@ -356,14 +358,15 @@ std::size_t HLERequestContext::GetWriteBufferSize(std::size_t buffer_index) cons | |||
| 356 | const bool is_buffer_b{BufferDescriptorB().size() > buffer_index && | 358 | const bool is_buffer_b{BufferDescriptorB().size() > buffer_index && |
| 357 | BufferDescriptorB()[buffer_index].Size()}; | 359 | BufferDescriptorB()[buffer_index].Size()}; |
| 358 | if (is_buffer_b) { | 360 | if (is_buffer_b) { |
| 359 | ASSERT_MSG(BufferDescriptorB().size() > buffer_index, | 361 | ASSERT_OR_EXECUTE_MSG(BufferDescriptorB().size() > buffer_index, { return 0; }, |
| 360 | "BufferDescriptorB invalid buffer_index {}", buffer_index); | 362 | "BufferDescriptorB invalid buffer_index {}", buffer_index); |
| 361 | return BufferDescriptorB()[buffer_index].Size(); | 363 | return BufferDescriptorB()[buffer_index].Size(); |
| 362 | } else { | 364 | } else { |
| 363 | ASSERT_MSG(BufferDescriptorC().size() > buffer_index, | 365 | ASSERT_OR_EXECUTE_MSG(BufferDescriptorC().size() > buffer_index, { return 0; }, |
| 364 | "BufferDescriptorC invalid buffer_index {}", buffer_index); | 366 | "BufferDescriptorC invalid buffer_index {}", buffer_index); |
| 365 | return BufferDescriptorC()[buffer_index].Size(); | 367 | return BufferDescriptorC()[buffer_index].Size(); |
| 366 | } | 368 | } |
| 369 | return 0; | ||
| 367 | } | 370 | } |
| 368 | 371 | ||
| 369 | std::string HLERequestContext::Description() const { | 372 | std::string HLERequestContext::Description() const { |
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 7655382fa..1f2af7a1b 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <array> | ||
| 5 | #include <atomic> | 6 | #include <atomic> |
| 6 | #include <bitset> | 7 | #include <bitset> |
| 7 | #include <functional> | 8 | #include <functional> |
| @@ -13,11 +14,15 @@ | |||
| 13 | 14 | ||
| 14 | #include "common/assert.h" | 15 | #include "common/assert.h" |
| 15 | #include "common/logging/log.h" | 16 | #include "common/logging/log.h" |
| 17 | #include "common/microprofile.h" | ||
| 18 | #include "common/thread.h" | ||
| 16 | #include "core/arm/arm_interface.h" | 19 | #include "core/arm/arm_interface.h" |
| 20 | #include "core/arm/cpu_interrupt_handler.h" | ||
| 17 | #include "core/arm/exclusive_monitor.h" | 21 | #include "core/arm/exclusive_monitor.h" |
| 18 | #include "core/core.h" | 22 | #include "core/core.h" |
| 19 | #include "core/core_timing.h" | 23 | #include "core/core_timing.h" |
| 20 | #include "core/core_timing_util.h" | 24 | #include "core/core_timing_util.h" |
| 25 | #include "core/cpu_manager.h" | ||
| 21 | #include "core/device_memory.h" | 26 | #include "core/device_memory.h" |
| 22 | #include "core/hardware_properties.h" | 27 | #include "core/hardware_properties.h" |
| 23 | #include "core/hle/kernel/client_port.h" | 28 | #include "core/hle/kernel/client_port.h" |
| @@ -39,85 +44,28 @@ | |||
| 39 | #include "core/hle/result.h" | 44 | #include "core/hle/result.h" |
| 40 | #include "core/memory.h" | 45 | #include "core/memory.h" |
| 41 | 46 | ||
| 42 | namespace Kernel { | 47 | MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70)); |
| 43 | |||
| 44 | /** | ||
| 45 | * Callback that will wake up the thread it was scheduled for | ||
| 46 | * @param thread_handle The handle of the thread that's been awoken | ||
| 47 | * @param cycles_late The number of CPU cycles that have passed since the desired wakeup time | ||
| 48 | */ | ||
| 49 | static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_late) { | ||
| 50 | const auto proper_handle = static_cast<Handle>(thread_handle); | ||
| 51 | const auto& system = Core::System::GetInstance(); | ||
| 52 | |||
| 53 | // Lock the global kernel mutex when we enter the kernel HLE. | ||
| 54 | std::lock_guard lock{HLE::g_hle_lock}; | ||
| 55 | |||
| 56 | std::shared_ptr<Thread> thread = | ||
| 57 | system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle); | ||
| 58 | if (thread == nullptr) { | ||
| 59 | LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", proper_handle); | ||
| 60 | return; | ||
| 61 | } | ||
| 62 | |||
| 63 | bool resume = true; | ||
| 64 | |||
| 65 | if (thread->GetStatus() == ThreadStatus::WaitSynch || | ||
| 66 | thread->GetStatus() == ThreadStatus::WaitHLEEvent) { | ||
| 67 | // Remove the thread from each of its waiting objects' waitlists | ||
| 68 | for (const auto& object : thread->GetSynchronizationObjects()) { | ||
| 69 | object->RemoveWaitingThread(thread); | ||
| 70 | } | ||
| 71 | thread->ClearSynchronizationObjects(); | ||
| 72 | |||
| 73 | // Invoke the wakeup callback before clearing the wait objects | ||
| 74 | if (thread->HasWakeupCallback()) { | ||
| 75 | resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Timeout, thread, nullptr, 0); | ||
| 76 | } | ||
| 77 | } else if (thread->GetStatus() == ThreadStatus::WaitMutex || | ||
| 78 | thread->GetStatus() == ThreadStatus::WaitCondVar) { | ||
| 79 | thread->SetMutexWaitAddress(0); | ||
| 80 | thread->SetWaitHandle(0); | ||
| 81 | if (thread->GetStatus() == ThreadStatus::WaitCondVar) { | ||
| 82 | thread->GetOwnerProcess()->RemoveConditionVariableThread(thread); | ||
| 83 | thread->SetCondVarWaitAddress(0); | ||
| 84 | } | ||
| 85 | |||
| 86 | auto* const lock_owner = thread->GetLockOwner(); | ||
| 87 | // Threads waking up by timeout from WaitProcessWideKey do not perform priority inheritance | ||
| 88 | // and don't have a lock owner unless SignalProcessWideKey was called first and the thread | ||
| 89 | // wasn't awakened due to the mutex already being acquired. | ||
| 90 | if (lock_owner != nullptr) { | ||
| 91 | lock_owner->RemoveMutexWaiter(thread); | ||
| 92 | } | ||
| 93 | } | ||
| 94 | 48 | ||
| 95 | if (thread->GetStatus() == ThreadStatus::WaitArb) { | 49 | namespace Kernel { |
| 96 | auto& address_arbiter = thread->GetOwnerProcess()->GetAddressArbiter(); | ||
| 97 | address_arbiter.HandleWakeupThread(thread); | ||
| 98 | } | ||
| 99 | |||
| 100 | if (resume) { | ||
| 101 | if (thread->GetStatus() == ThreadStatus::WaitCondVar || | ||
| 102 | thread->GetStatus() == ThreadStatus::WaitArb) { | ||
| 103 | thread->SetWaitSynchronizationResult(RESULT_TIMEOUT); | ||
| 104 | } | ||
| 105 | thread->ResumeFromWait(); | ||
| 106 | } | ||
| 107 | } | ||
| 108 | 50 | ||
| 109 | struct KernelCore::Impl { | 51 | struct KernelCore::Impl { |
| 110 | explicit Impl(Core::System& system, KernelCore& kernel) | 52 | explicit Impl(Core::System& system, KernelCore& kernel) |
| 111 | : global_scheduler{kernel}, synchronization{system}, time_manager{system}, system{system} {} | 53 | : global_scheduler{kernel}, synchronization{system}, time_manager{system}, system{system} {} |
| 112 | 54 | ||
| 55 | void SetMulticore(bool is_multicore) { | ||
| 56 | this->is_multicore = is_multicore; | ||
| 57 | } | ||
| 58 | |||
| 113 | void Initialize(KernelCore& kernel) { | 59 | void Initialize(KernelCore& kernel) { |
| 114 | Shutdown(); | 60 | Shutdown(); |
| 61 | RegisterHostThread(); | ||
| 115 | 62 | ||
| 116 | InitializePhysicalCores(); | 63 | InitializePhysicalCores(); |
| 117 | InitializeSystemResourceLimit(kernel); | 64 | InitializeSystemResourceLimit(kernel); |
| 118 | InitializeMemoryLayout(); | 65 | InitializeMemoryLayout(); |
| 119 | InitializeThreads(); | 66 | InitializePreemption(kernel); |
| 120 | InitializePreemption(); | 67 | InitializeSchedulers(); |
| 68 | InitializeSuspendThreads(); | ||
| 121 | } | 69 | } |
| 122 | 70 | ||
| 123 | void Shutdown() { | 71 | void Shutdown() { |
| @@ -126,13 +74,26 @@ struct KernelCore::Impl { | |||
| 126 | next_user_process_id = Process::ProcessIDMin; | 74 | next_user_process_id = Process::ProcessIDMin; |
| 127 | next_thread_id = 1; | 75 | next_thread_id = 1; |
| 128 | 76 | ||
| 77 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | ||
| 78 | if (suspend_threads[i]) { | ||
| 79 | suspend_threads[i].reset(); | ||
| 80 | } | ||
| 81 | } | ||
| 82 | |||
| 83 | for (std::size_t i = 0; i < cores.size(); i++) { | ||
| 84 | cores[i].Shutdown(); | ||
| 85 | schedulers[i].reset(); | ||
| 86 | } | ||
| 87 | cores.clear(); | ||
| 88 | |||
| 89 | registered_core_threads.reset(); | ||
| 90 | |||
| 129 | process_list.clear(); | 91 | process_list.clear(); |
| 130 | current_process = nullptr; | 92 | current_process = nullptr; |
| 131 | 93 | ||
| 132 | system_resource_limit = nullptr; | 94 | system_resource_limit = nullptr; |
| 133 | 95 | ||
| 134 | global_handle_table.Clear(); | 96 | global_handle_table.Clear(); |
| 135 | thread_wakeup_event_type = nullptr; | ||
| 136 | preemption_event = nullptr; | 97 | preemption_event = nullptr; |
| 137 | 98 | ||
| 138 | global_scheduler.Shutdown(); | 99 | global_scheduler.Shutdown(); |
| @@ -145,13 +106,21 @@ struct KernelCore::Impl { | |||
| 145 | cores.clear(); | 106 | cores.clear(); |
| 146 | 107 | ||
| 147 | exclusive_monitor.reset(); | 108 | exclusive_monitor.reset(); |
| 109 | host_thread_ids.clear(); | ||
| 148 | } | 110 | } |
| 149 | 111 | ||
| 150 | void InitializePhysicalCores() { | 112 | void InitializePhysicalCores() { |
| 151 | exclusive_monitor = | 113 | exclusive_monitor = |
| 152 | Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); | 114 | Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); |
| 153 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | 115 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { |
| 154 | cores.emplace_back(system, i, *exclusive_monitor); | 116 | schedulers[i] = std::make_unique<Kernel::Scheduler>(system, i); |
| 117 | cores.emplace_back(system, i, *schedulers[i], interrupts[i]); | ||
| 118 | } | ||
| 119 | } | ||
| 120 | |||
| 121 | void InitializeSchedulers() { | ||
| 122 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | ||
| 123 | cores[i].Scheduler().Initialize(); | ||
| 155 | } | 124 | } |
| 156 | } | 125 | } |
| 157 | 126 | ||
| @@ -173,15 +142,13 @@ struct KernelCore::Impl { | |||
| 173 | } | 142 | } |
| 174 | } | 143 | } |
| 175 | 144 | ||
| 176 | void InitializeThreads() { | 145 | void InitializePreemption(KernelCore& kernel) { |
| 177 | thread_wakeup_event_type = | 146 | preemption_event = Core::Timing::CreateEvent( |
| 178 | Core::Timing::CreateEvent("ThreadWakeupCallback", ThreadWakeupCallback); | 147 | "PreemptionCallback", [this, &kernel](u64 userdata, s64 cycles_late) { |
| 179 | } | 148 | { |
| 180 | 149 | SchedulerLock lock(kernel); | |
| 181 | void InitializePreemption() { | 150 | global_scheduler.PreemptThreads(); |
| 182 | preemption_event = | 151 | } |
| 183 | Core::Timing::CreateEvent("PreemptionCallback", [this](u64 userdata, s64 cycles_late) { | ||
| 184 | global_scheduler.PreemptThreads(); | ||
| 185 | s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10)); | 152 | s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10)); |
| 186 | system.CoreTiming().ScheduleEvent(time_interval, preemption_event); | 153 | system.CoreTiming().ScheduleEvent(time_interval, preemption_event); |
| 187 | }); | 154 | }); |
| @@ -190,6 +157,20 @@ struct KernelCore::Impl { | |||
| 190 | system.CoreTiming().ScheduleEvent(time_interval, preemption_event); | 157 | system.CoreTiming().ScheduleEvent(time_interval, preemption_event); |
| 191 | } | 158 | } |
| 192 | 159 | ||
| 160 | void InitializeSuspendThreads() { | ||
| 161 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | ||
| 162 | std::string name = "Suspend Thread Id:" + std::to_string(i); | ||
| 163 | std::function<void(void*)> init_func = | ||
| 164 | system.GetCpuManager().GetSuspendThreadStartFunc(); | ||
| 165 | void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); | ||
| 166 | ThreadType type = | ||
| 167 | static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND); | ||
| 168 | auto thread_res = Thread::Create(system, type, name, 0, 0, 0, static_cast<u32>(i), 0, | ||
| 169 | nullptr, std::move(init_func), init_func_parameter); | ||
| 170 | suspend_threads[i] = std::move(thread_res).Unwrap(); | ||
| 171 | } | ||
| 172 | } | ||
| 173 | |||
| 193 | void MakeCurrentProcess(Process* process) { | 174 | void MakeCurrentProcess(Process* process) { |
| 194 | current_process = process; | 175 | current_process = process; |
| 195 | 176 | ||
| @@ -197,15 +178,17 @@ struct KernelCore::Impl { | |||
| 197 | return; | 178 | return; |
| 198 | } | 179 | } |
| 199 | 180 | ||
| 200 | for (auto& core : cores) { | 181 | u32 core_id = GetCurrentHostThreadID(); |
| 201 | core.SetIs64Bit(process->Is64BitProcess()); | 182 | if (core_id < Core::Hardware::NUM_CPU_CORES) { |
| 183 | system.Memory().SetCurrentPageTable(*process, core_id); | ||
| 202 | } | 184 | } |
| 203 | |||
| 204 | system.Memory().SetCurrentPageTable(*process); | ||
| 205 | } | 185 | } |
| 206 | 186 | ||
| 207 | void RegisterCoreThread(std::size_t core_id) { | 187 | void RegisterCoreThread(std::size_t core_id) { |
| 208 | std::unique_lock lock{register_thread_mutex}; | 188 | std::unique_lock lock{register_thread_mutex}; |
| 189 | if (!is_multicore) { | ||
| 190 | single_core_thread_id = std::this_thread::get_id(); | ||
| 191 | } | ||
| 209 | const std::thread::id this_id = std::this_thread::get_id(); | 192 | const std::thread::id this_id = std::this_thread::get_id(); |
| 210 | const auto it = host_thread_ids.find(this_id); | 193 | const auto it = host_thread_ids.find(this_id); |
| 211 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | 194 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); |
| @@ -219,12 +202,19 @@ struct KernelCore::Impl { | |||
| 219 | std::unique_lock lock{register_thread_mutex}; | 202 | std::unique_lock lock{register_thread_mutex}; |
| 220 | const std::thread::id this_id = std::this_thread::get_id(); | 203 | const std::thread::id this_id = std::this_thread::get_id(); |
| 221 | const auto it = host_thread_ids.find(this_id); | 204 | const auto it = host_thread_ids.find(this_id); |
| 222 | ASSERT(it == host_thread_ids.end()); | 205 | if (it != host_thread_ids.end()) { |
| 206 | return; | ||
| 207 | } | ||
| 223 | host_thread_ids[this_id] = registered_thread_ids++; | 208 | host_thread_ids[this_id] = registered_thread_ids++; |
| 224 | } | 209 | } |
| 225 | 210 | ||
| 226 | u32 GetCurrentHostThreadID() const { | 211 | u32 GetCurrentHostThreadID() const { |
| 227 | const std::thread::id this_id = std::this_thread::get_id(); | 212 | const std::thread::id this_id = std::this_thread::get_id(); |
| 213 | if (!is_multicore) { | ||
| 214 | if (single_core_thread_id == this_id) { | ||
| 215 | return static_cast<u32>(system.GetCpuManager().CurrentCore()); | ||
| 216 | } | ||
| 217 | } | ||
| 228 | const auto it = host_thread_ids.find(this_id); | 218 | const auto it = host_thread_ids.find(this_id); |
| 229 | if (it == host_thread_ids.end()) { | 219 | if (it == host_thread_ids.end()) { |
| 230 | return Core::INVALID_HOST_THREAD_ID; | 220 | return Core::INVALID_HOST_THREAD_ID; |
| @@ -240,7 +230,7 @@ struct KernelCore::Impl { | |||
| 240 | } | 230 | } |
| 241 | const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler(); | 231 | const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler(); |
| 242 | const Kernel::Thread* current = sched.GetCurrentThread(); | 232 | const Kernel::Thread* current = sched.GetCurrentThread(); |
| 243 | if (current != nullptr) { | 233 | if (current != nullptr && !current->IsPhantomMode()) { |
| 244 | result.guest_handle = current->GetGlobalHandle(); | 234 | result.guest_handle = current->GetGlobalHandle(); |
| 245 | } else { | 235 | } else { |
| 246 | result.guest_handle = InvalidHandle; | 236 | result.guest_handle = InvalidHandle; |
| @@ -313,7 +303,6 @@ struct KernelCore::Impl { | |||
| 313 | 303 | ||
| 314 | std::shared_ptr<ResourceLimit> system_resource_limit; | 304 | std::shared_ptr<ResourceLimit> system_resource_limit; |
| 315 | 305 | ||
| 316 | std::shared_ptr<Core::Timing::EventType> thread_wakeup_event_type; | ||
| 317 | std::shared_ptr<Core::Timing::EventType> preemption_event; | 306 | std::shared_ptr<Core::Timing::EventType> preemption_event; |
| 318 | 307 | ||
| 319 | // This is the kernel's handle table or supervisor handle table which | 308 | // This is the kernel's handle table or supervisor handle table which |
| @@ -343,6 +332,15 @@ struct KernelCore::Impl { | |||
| 343 | std::shared_ptr<Kernel::SharedMemory> irs_shared_mem; | 332 | std::shared_ptr<Kernel::SharedMemory> irs_shared_mem; |
| 344 | std::shared_ptr<Kernel::SharedMemory> time_shared_mem; | 333 | std::shared_ptr<Kernel::SharedMemory> time_shared_mem; |
| 345 | 334 | ||
| 335 | std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; | ||
| 336 | std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{}; | ||
| 337 | std::array<std::unique_ptr<Kernel::Scheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; | ||
| 338 | |||
| 339 | bool is_multicore{}; | ||
| 340 | std::thread::id single_core_thread_id{}; | ||
| 341 | |||
| 342 | std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{}; | ||
| 343 | |||
| 346 | // System context | 344 | // System context |
| 347 | Core::System& system; | 345 | Core::System& system; |
| 348 | }; | 346 | }; |
| @@ -352,6 +350,10 @@ KernelCore::~KernelCore() { | |||
| 352 | Shutdown(); | 350 | Shutdown(); |
| 353 | } | 351 | } |
| 354 | 352 | ||
| 353 | void KernelCore::SetMulticore(bool is_multicore) { | ||
| 354 | impl->SetMulticore(is_multicore); | ||
| 355 | } | ||
| 356 | |||
| 355 | void KernelCore::Initialize() { | 357 | void KernelCore::Initialize() { |
| 356 | impl->Initialize(*this); | 358 | impl->Initialize(*this); |
| 357 | } | 359 | } |
| @@ -397,11 +399,11 @@ const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const { | |||
| 397 | } | 399 | } |
| 398 | 400 | ||
| 399 | Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) { | 401 | Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) { |
| 400 | return impl->cores[id].Scheduler(); | 402 | return *impl->schedulers[id]; |
| 401 | } | 403 | } |
| 402 | 404 | ||
| 403 | const Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) const { | 405 | const Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) const { |
| 404 | return impl->cores[id].Scheduler(); | 406 | return *impl->schedulers[id]; |
| 405 | } | 407 | } |
| 406 | 408 | ||
| 407 | Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) { | 409 | Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) { |
| @@ -412,6 +414,39 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const { | |||
| 412 | return impl->cores[id]; | 414 | return impl->cores[id]; |
| 413 | } | 415 | } |
| 414 | 416 | ||
| 417 | Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { | ||
| 418 | u32 core_id = impl->GetCurrentHostThreadID(); | ||
| 419 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 420 | return impl->cores[core_id]; | ||
| 421 | } | ||
| 422 | |||
| 423 | const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { | ||
| 424 | u32 core_id = impl->GetCurrentHostThreadID(); | ||
| 425 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 426 | return impl->cores[core_id]; | ||
| 427 | } | ||
| 428 | |||
| 429 | Kernel::Scheduler& KernelCore::CurrentScheduler() { | ||
| 430 | u32 core_id = impl->GetCurrentHostThreadID(); | ||
| 431 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 432 | return *impl->schedulers[core_id]; | ||
| 433 | } | ||
| 434 | |||
| 435 | const Kernel::Scheduler& KernelCore::CurrentScheduler() const { | ||
| 436 | u32 core_id = impl->GetCurrentHostThreadID(); | ||
| 437 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 438 | return *impl->schedulers[core_id]; | ||
| 439 | } | ||
| 440 | |||
| 441 | std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() { | ||
| 442 | return impl->interrupts; | ||
| 443 | } | ||
| 444 | |||
| 445 | const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() | ||
| 446 | const { | ||
| 447 | return impl->interrupts; | ||
| 448 | } | ||
| 449 | |||
| 415 | Kernel::Synchronization& KernelCore::Synchronization() { | 450 | Kernel::Synchronization& KernelCore::Synchronization() { |
| 416 | return impl->synchronization; | 451 | return impl->synchronization; |
| 417 | } | 452 | } |
| @@ -437,15 +472,17 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const { | |||
| 437 | } | 472 | } |
| 438 | 473 | ||
| 439 | void KernelCore::InvalidateAllInstructionCaches() { | 474 | void KernelCore::InvalidateAllInstructionCaches() { |
| 440 | for (std::size_t i = 0; i < impl->global_scheduler.CpuCoresCount(); i++) { | 475 | auto& threads = GlobalScheduler().GetThreadList(); |
| 441 | PhysicalCore(i).ArmInterface().ClearInstructionCache(); | 476 | for (auto& thread : threads) { |
| 477 | if (!thread->IsHLEThread()) { | ||
| 478 | auto& arm_interface = thread->ArmInterface(); | ||
| 479 | arm_interface.ClearInstructionCache(); | ||
| 480 | } | ||
| 442 | } | 481 | } |
| 443 | } | 482 | } |
| 444 | 483 | ||
| 445 | void KernelCore::PrepareReschedule(std::size_t id) { | 484 | void KernelCore::PrepareReschedule(std::size_t id) { |
| 446 | if (id < impl->global_scheduler.CpuCoresCount()) { | 485 | // TODO: Reimplement, this |
| 447 | impl->cores[id].Stop(); | ||
| 448 | } | ||
| 449 | } | 486 | } |
| 450 | 487 | ||
| 451 | void KernelCore::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) { | 488 | void KernelCore::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) { |
| @@ -481,10 +518,6 @@ u64 KernelCore::CreateNewUserProcessID() { | |||
| 481 | return impl->next_user_process_id++; | 518 | return impl->next_user_process_id++; |
| 482 | } | 519 | } |
| 483 | 520 | ||
| 484 | const std::shared_ptr<Core::Timing::EventType>& KernelCore::ThreadWakeupCallbackEventType() const { | ||
| 485 | return impl->thread_wakeup_event_type; | ||
| 486 | } | ||
| 487 | |||
| 488 | Kernel::HandleTable& KernelCore::GlobalHandleTable() { | 521 | Kernel::HandleTable& KernelCore::GlobalHandleTable() { |
| 489 | return impl->global_handle_table; | 522 | return impl->global_handle_table; |
| 490 | } | 523 | } |
| @@ -557,4 +590,34 @@ const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const { | |||
| 557 | return *impl->time_shared_mem; | 590 | return *impl->time_shared_mem; |
| 558 | } | 591 | } |
| 559 | 592 | ||
| 593 | void KernelCore::Suspend(bool in_suspention) { | ||
| 594 | const bool should_suspend = exception_exited || in_suspention; | ||
| 595 | { | ||
| 596 | SchedulerLock lock(*this); | ||
| 597 | ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep; | ||
| 598 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | ||
| 599 | impl->suspend_threads[i]->SetStatus(status); | ||
| 600 | } | ||
| 601 | } | ||
| 602 | } | ||
| 603 | |||
| 604 | bool KernelCore::IsMulticore() const { | ||
| 605 | return impl->is_multicore; | ||
| 606 | } | ||
| 607 | |||
| 608 | void KernelCore::ExceptionalExit() { | ||
| 609 | exception_exited = true; | ||
| 610 | Suspend(true); | ||
| 611 | } | ||
| 612 | |||
| 613 | void KernelCore::EnterSVCProfile() { | ||
| 614 | std::size_t core = impl->GetCurrentHostThreadID(); | ||
| 615 | impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); | ||
| 616 | } | ||
| 617 | |||
| 618 | void KernelCore::ExitSVCProfile() { | ||
| 619 | std::size_t core = impl->GetCurrentHostThreadID(); | ||
| 620 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]); | ||
| 621 | } | ||
| 622 | |||
| 560 | } // namespace Kernel | 623 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 83de1f542..49bd47e89 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h | |||
| @@ -4,15 +4,17 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <array> | ||
| 7 | #include <memory> | 8 | #include <memory> |
| 8 | #include <string> | 9 | #include <string> |
| 9 | #include <unordered_map> | 10 | #include <unordered_map> |
| 10 | #include <vector> | 11 | #include <vector> |
| 12 | #include "core/hardware_properties.h" | ||
| 11 | #include "core/hle/kernel/memory/memory_types.h" | 13 | #include "core/hle/kernel/memory/memory_types.h" |
| 12 | #include "core/hle/kernel/object.h" | 14 | #include "core/hle/kernel/object.h" |
| 13 | 15 | ||
| 14 | namespace Core { | 16 | namespace Core { |
| 15 | struct EmuThreadHandle; | 17 | class CPUInterruptHandler; |
| 16 | class ExclusiveMonitor; | 18 | class ExclusiveMonitor; |
| 17 | class System; | 19 | class System; |
| 18 | } // namespace Core | 20 | } // namespace Core |
| @@ -65,6 +67,9 @@ public: | |||
| 65 | KernelCore(KernelCore&&) = delete; | 67 | KernelCore(KernelCore&&) = delete; |
| 66 | KernelCore& operator=(KernelCore&&) = delete; | 68 | KernelCore& operator=(KernelCore&&) = delete; |
| 67 | 69 | ||
| 70 | /// Sets if emulation is multicore or single core, must be set before Initialize | ||
| 71 | void SetMulticore(bool is_multicore); | ||
| 72 | |||
| 68 | /// Resets the kernel to a clean slate for use. | 73 | /// Resets the kernel to a clean slate for use. |
| 69 | void Initialize(); | 74 | void Initialize(); |
| 70 | 75 | ||
| @@ -110,6 +115,18 @@ public: | |||
| 110 | /// Gets the an instance of the respective physical CPU core. | 115 | /// Gets the an instance of the respective physical CPU core. |
| 111 | const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; | 116 | const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; |
| 112 | 117 | ||
| 118 | /// Gets the sole instance of the Scheduler at the current running core. | ||
| 119 | Kernel::Scheduler& CurrentScheduler(); | ||
| 120 | |||
| 121 | /// Gets the sole instance of the Scheduler at the current running core. | ||
| 122 | const Kernel::Scheduler& CurrentScheduler() const; | ||
| 123 | |||
| 124 | /// Gets the an instance of the current physical CPU core. | ||
| 125 | Kernel::PhysicalCore& CurrentPhysicalCore(); | ||
| 126 | |||
| 127 | /// Gets the an instance of the current physical CPU core. | ||
| 128 | const Kernel::PhysicalCore& CurrentPhysicalCore() const; | ||
| 129 | |||
| 113 | /// Gets the an instance of the Synchronization Interface. | 130 | /// Gets the an instance of the Synchronization Interface. |
| 114 | Kernel::Synchronization& Synchronization(); | 131 | Kernel::Synchronization& Synchronization(); |
| 115 | 132 | ||
| @@ -129,6 +146,10 @@ public: | |||
| 129 | 146 | ||
| 130 | const Core::ExclusiveMonitor& GetExclusiveMonitor() const; | 147 | const Core::ExclusiveMonitor& GetExclusiveMonitor() const; |
| 131 | 148 | ||
| 149 | std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts(); | ||
| 150 | |||
| 151 | const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Interrupts() const; | ||
| 152 | |||
| 132 | void InvalidateAllInstructionCaches(); | 153 | void InvalidateAllInstructionCaches(); |
| 133 | 154 | ||
| 134 | /// Adds a port to the named port table | 155 | /// Adds a port to the named port table |
| @@ -191,6 +212,18 @@ public: | |||
| 191 | /// Gets the shared memory object for Time services. | 212 | /// Gets the shared memory object for Time services. |
| 192 | const Kernel::SharedMemory& GetTimeSharedMem() const; | 213 | const Kernel::SharedMemory& GetTimeSharedMem() const; |
| 193 | 214 | ||
| 215 | /// Suspend/unsuspend the OS. | ||
| 216 | void Suspend(bool in_suspention); | ||
| 217 | |||
| 218 | /// Exceptional exit the OS. | ||
| 219 | void ExceptionalExit(); | ||
| 220 | |||
| 221 | bool IsMulticore() const; | ||
| 222 | |||
| 223 | void EnterSVCProfile(); | ||
| 224 | |||
| 225 | void ExitSVCProfile(); | ||
| 226 | |||
| 194 | private: | 227 | private: |
| 195 | friend class Object; | 228 | friend class Object; |
| 196 | friend class Process; | 229 | friend class Process; |
| @@ -208,9 +241,6 @@ private: | |||
| 208 | /// Creates a new thread ID, incrementing the internal thread ID counter. | 241 | /// Creates a new thread ID, incrementing the internal thread ID counter. |
| 209 | u64 CreateNewThreadID(); | 242 | u64 CreateNewThreadID(); |
| 210 | 243 | ||
| 211 | /// Retrieves the event type used for thread wakeup callbacks. | ||
| 212 | const std::shared_ptr<Core::Timing::EventType>& ThreadWakeupCallbackEventType() const; | ||
| 213 | |||
| 214 | /// Provides a reference to the global handle table. | 244 | /// Provides a reference to the global handle table. |
| 215 | Kernel::HandleTable& GlobalHandleTable(); | 245 | Kernel::HandleTable& GlobalHandleTable(); |
| 216 | 246 | ||
| @@ -219,6 +249,7 @@ private: | |||
| 219 | 249 | ||
| 220 | struct Impl; | 250 | struct Impl; |
| 221 | std::unique_ptr<Impl> impl; | 251 | std::unique_ptr<Impl> impl; |
| 252 | bool exception_exited{}; | ||
| 222 | }; | 253 | }; |
| 223 | 254 | ||
| 224 | } // namespace Kernel | 255 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/memory/memory_manager.cpp b/src/core/hle/kernel/memory/memory_manager.cpp index 616148190..acf13585c 100644 --- a/src/core/hle/kernel/memory/memory_manager.cpp +++ b/src/core/hle/kernel/memory/memory_manager.cpp | |||
| @@ -139,7 +139,6 @@ ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pa | |||
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | // Only succeed if we allocated as many pages as we wanted | 141 | // Only succeed if we allocated as many pages as we wanted |
| 142 | ASSERT(num_pages >= 0); | ||
| 143 | if (num_pages) { | 142 | if (num_pages) { |
| 144 | return ERR_OUT_OF_MEMORY; | 143 | return ERR_OUT_OF_MEMORY; |
| 145 | } | 144 | } |
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp index 7869eb32b..8f6c944d1 100644 --- a/src/core/hle/kernel/mutex.cpp +++ b/src/core/hle/kernel/mutex.cpp | |||
| @@ -34,8 +34,6 @@ static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThr | |||
| 34 | if (thread->GetMutexWaitAddress() != mutex_addr) | 34 | if (thread->GetMutexWaitAddress() != mutex_addr) |
| 35 | continue; | 35 | continue; |
| 36 | 36 | ||
| 37 | ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); | ||
| 38 | |||
| 39 | ++num_waiters; | 37 | ++num_waiters; |
| 40 | if (highest_priority_thread == nullptr || | 38 | if (highest_priority_thread == nullptr || |
| 41 | thread->GetPriority() < highest_priority_thread->GetPriority()) { | 39 | thread->GetPriority() < highest_priority_thread->GetPriority()) { |
| @@ -49,6 +47,7 @@ static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThr | |||
| 49 | /// Update the mutex owner field of all threads waiting on the mutex to point to the new owner. | 47 | /// Update the mutex owner field of all threads waiting on the mutex to point to the new owner. |
| 50 | static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr<Thread> current_thread, | 48 | static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr<Thread> current_thread, |
| 51 | std::shared_ptr<Thread> new_owner) { | 49 | std::shared_ptr<Thread> new_owner) { |
| 50 | current_thread->RemoveMutexWaiter(new_owner); | ||
| 52 | const auto threads = current_thread->GetMutexWaitingThreads(); | 51 | const auto threads = current_thread->GetMutexWaitingThreads(); |
| 53 | for (const auto& thread : threads) { | 52 | for (const auto& thread : threads) { |
| 54 | if (thread->GetMutexWaitAddress() != mutex_addr) | 53 | if (thread->GetMutexWaitAddress() != mutex_addr) |
| @@ -72,85 +71,100 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle, | |||
| 72 | return ERR_INVALID_ADDRESS; | 71 | return ERR_INVALID_ADDRESS; |
| 73 | } | 72 | } |
| 74 | 73 | ||
| 75 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | 74 | auto& kernel = system.Kernel(); |
| 76 | std::shared_ptr<Thread> current_thread = | 75 | std::shared_ptr<Thread> current_thread = |
| 77 | SharedFrom(system.CurrentScheduler().GetCurrentThread()); | 76 | SharedFrom(kernel.CurrentScheduler().GetCurrentThread()); |
| 78 | std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle); | 77 | { |
| 79 | std::shared_ptr<Thread> requesting_thread = handle_table.Get<Thread>(requesting_thread_handle); | 78 | SchedulerLock lock(kernel); |
| 79 | // The mutex address must be 4-byte aligned | ||
| 80 | if ((address % sizeof(u32)) != 0) { | ||
| 81 | return ERR_INVALID_ADDRESS; | ||
| 82 | } | ||
| 80 | 83 | ||
| 81 | // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of another | 84 | const auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); |
| 82 | // thread. | 85 | std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle); |
| 83 | ASSERT(requesting_thread == current_thread); | 86 | std::shared_ptr<Thread> requesting_thread = |
| 87 | handle_table.Get<Thread>(requesting_thread_handle); | ||
| 84 | 88 | ||
| 85 | const u32 addr_value = system.Memory().Read32(address); | 89 | // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of |
| 90 | // another thread. | ||
| 91 | ASSERT(requesting_thread == current_thread); | ||
| 86 | 92 | ||
| 87 | // If the mutex isn't being held, just return success. | 93 | current_thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS); |
| 88 | if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) { | ||
| 89 | return RESULT_SUCCESS; | ||
| 90 | } | ||
| 91 | 94 | ||
| 92 | if (holding_thread == nullptr) { | 95 | const u32 addr_value = system.Memory().Read32(address); |
| 93 | LOG_ERROR(Kernel, "Holding thread does not exist! thread_handle={:08X}", | 96 | |
| 94 | holding_thread_handle); | 97 | // If the mutex isn't being held, just return success. |
| 95 | return ERR_INVALID_HANDLE; | 98 | if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) { |
| 96 | } | 99 | return RESULT_SUCCESS; |
| 100 | } | ||
| 97 | 101 | ||
| 98 | // Wait until the mutex is released | 102 | if (holding_thread == nullptr) { |
| 99 | current_thread->SetMutexWaitAddress(address); | 103 | return ERR_INVALID_HANDLE; |
| 100 | current_thread->SetWaitHandle(requesting_thread_handle); | 104 | } |
| 101 | 105 | ||
| 102 | current_thread->SetStatus(ThreadStatus::WaitMutex); | 106 | // Wait until the mutex is released |
| 103 | current_thread->InvalidateWakeupCallback(); | 107 | current_thread->SetMutexWaitAddress(address); |
| 108 | current_thread->SetWaitHandle(requesting_thread_handle); | ||
| 104 | 109 | ||
| 105 | // Update the lock holder thread's priority to prevent priority inversion. | 110 | current_thread->SetStatus(ThreadStatus::WaitMutex); |
| 106 | holding_thread->AddMutexWaiter(current_thread); | ||
| 107 | 111 | ||
| 108 | system.PrepareReschedule(); | 112 | // Update the lock holder thread's priority to prevent priority inversion. |
| 113 | holding_thread->AddMutexWaiter(current_thread); | ||
| 114 | } | ||
| 109 | 115 | ||
| 110 | return RESULT_SUCCESS; | 116 | { |
| 117 | SchedulerLock lock(kernel); | ||
| 118 | auto* owner = current_thread->GetLockOwner(); | ||
| 119 | if (owner != nullptr) { | ||
| 120 | owner->RemoveMutexWaiter(current_thread); | ||
| 121 | } | ||
| 122 | } | ||
| 123 | return current_thread->GetSignalingResult(); | ||
| 111 | } | 124 | } |
| 112 | 125 | ||
| 113 | ResultCode Mutex::Release(VAddr address) { | 126 | std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thread> owner, |
| 127 | VAddr address) { | ||
| 114 | // The mutex address must be 4-byte aligned | 128 | // The mutex address must be 4-byte aligned |
| 115 | if ((address % sizeof(u32)) != 0) { | 129 | if ((address % sizeof(u32)) != 0) { |
| 116 | LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address); | 130 | LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address); |
| 117 | return ERR_INVALID_ADDRESS; | 131 | return {ERR_INVALID_ADDRESS, nullptr}; |
| 118 | } | 132 | } |
| 119 | 133 | ||
| 120 | std::shared_ptr<Thread> current_thread = | 134 | auto [new_owner, num_waiters] = GetHighestPriorityMutexWaitingThread(owner, address); |
| 121 | SharedFrom(system.CurrentScheduler().GetCurrentThread()); | 135 | if (new_owner == nullptr) { |
| 122 | auto [thread, num_waiters] = GetHighestPriorityMutexWaitingThread(current_thread, address); | ||
| 123 | |||
| 124 | // There are no more threads waiting for the mutex, release it completely. | ||
| 125 | if (thread == nullptr) { | ||
| 126 | system.Memory().Write32(address, 0); | 136 | system.Memory().Write32(address, 0); |
| 127 | return RESULT_SUCCESS; | 137 | return {RESULT_SUCCESS, nullptr}; |
| 128 | } | 138 | } |
| 129 | |||
| 130 | // Transfer the ownership of the mutex from the previous owner to the new one. | 139 | // Transfer the ownership of the mutex from the previous owner to the new one. |
| 131 | TransferMutexOwnership(address, current_thread, thread); | 140 | TransferMutexOwnership(address, owner, new_owner); |
| 132 | 141 | u32 mutex_value = new_owner->GetWaitHandle(); | |
| 133 | u32 mutex_value = thread->GetWaitHandle(); | ||
| 134 | |||
| 135 | if (num_waiters >= 2) { | 142 | if (num_waiters >= 2) { |
| 136 | // Notify the guest that there are still some threads waiting for the mutex | 143 | // Notify the guest that there are still some threads waiting for the mutex |
| 137 | mutex_value |= Mutex::MutexHasWaitersFlag; | 144 | mutex_value |= Mutex::MutexHasWaitersFlag; |
| 138 | } | 145 | } |
| 146 | new_owner->SetSynchronizationResults(nullptr, RESULT_SUCCESS); | ||
| 147 | new_owner->SetLockOwner(nullptr); | ||
| 148 | new_owner->ResumeFromWait(); | ||
| 139 | 149 | ||
| 140 | // Grant the mutex to the next waiting thread and resume it. | ||
| 141 | system.Memory().Write32(address, mutex_value); | 150 | system.Memory().Write32(address, mutex_value); |
| 151 | return {RESULT_SUCCESS, new_owner}; | ||
| 152 | } | ||
| 142 | 153 | ||
| 143 | ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); | 154 | ResultCode Mutex::Release(VAddr address) { |
| 144 | thread->ResumeFromWait(); | 155 | auto& kernel = system.Kernel(); |
| 156 | SchedulerLock lock(kernel); | ||
| 145 | 157 | ||
| 146 | thread->SetLockOwner(nullptr); | 158 | std::shared_ptr<Thread> current_thread = |
| 147 | thread->SetCondVarWaitAddress(0); | 159 | SharedFrom(kernel.CurrentScheduler().GetCurrentThread()); |
| 148 | thread->SetMutexWaitAddress(0); | ||
| 149 | thread->SetWaitHandle(0); | ||
| 150 | thread->SetWaitSynchronizationResult(RESULT_SUCCESS); | ||
| 151 | 160 | ||
| 152 | system.PrepareReschedule(); | 161 | auto [result, new_owner] = Unlock(current_thread, address); |
| 153 | 162 | ||
| 154 | return RESULT_SUCCESS; | 163 | if (result != RESULT_SUCCESS && new_owner != nullptr) { |
| 164 | new_owner->SetSynchronizationResults(nullptr, result); | ||
| 165 | } | ||
| 166 | |||
| 167 | return result; | ||
| 155 | } | 168 | } |
| 169 | |||
| 156 | } // namespace Kernel | 170 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/mutex.h b/src/core/hle/kernel/mutex.h index b904de2e8..3b81dc3df 100644 --- a/src/core/hle/kernel/mutex.h +++ b/src/core/hle/kernel/mutex.h | |||
| @@ -28,6 +28,10 @@ public: | |||
| 28 | ResultCode TryAcquire(VAddr address, Handle holding_thread_handle, | 28 | ResultCode TryAcquire(VAddr address, Handle holding_thread_handle, |
| 29 | Handle requesting_thread_handle); | 29 | Handle requesting_thread_handle); |
| 30 | 30 | ||
| 31 | /// Unlocks a mutex for owner at address | ||
| 32 | std::pair<ResultCode, std::shared_ptr<Thread>> Unlock(std::shared_ptr<Thread> owner, | ||
| 33 | VAddr address); | ||
| 34 | |||
| 31 | /// Releases the mutex at the specified address. | 35 | /// Releases the mutex at the specified address. |
| 32 | ResultCode Release(VAddr address); | 36 | ResultCode Release(VAddr address); |
| 33 | 37 | ||
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp index a15011076..c6bbdb080 100644 --- a/src/core/hle/kernel/physical_core.cpp +++ b/src/core/hle/kernel/physical_core.cpp | |||
| @@ -2,12 +2,15 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include "common/assert.h" | ||
| 5 | #include "common/logging/log.h" | 6 | #include "common/logging/log.h" |
| 7 | #include "common/spin_lock.h" | ||
| 6 | #include "core/arm/arm_interface.h" | 8 | #include "core/arm/arm_interface.h" |
| 7 | #ifdef ARCHITECTURE_x86_64 | 9 | #ifdef ARCHITECTURE_x86_64 |
| 8 | #include "core/arm/dynarmic/arm_dynarmic_32.h" | 10 | #include "core/arm/dynarmic/arm_dynarmic_32.h" |
| 9 | #include "core/arm/dynarmic/arm_dynarmic_64.h" | 11 | #include "core/arm/dynarmic/arm_dynarmic_64.h" |
| 10 | #endif | 12 | #endif |
| 13 | #include "core/arm/cpu_interrupt_handler.h" | ||
| 11 | #include "core/arm/exclusive_monitor.h" | 14 | #include "core/arm/exclusive_monitor.h" |
| 12 | #include "core/arm/unicorn/arm_unicorn.h" | 15 | #include "core/arm/unicorn/arm_unicorn.h" |
| 13 | #include "core/core.h" | 16 | #include "core/core.h" |
| @@ -17,50 +20,37 @@ | |||
| 17 | 20 | ||
| 18 | namespace Kernel { | 21 | namespace Kernel { |
| 19 | 22 | ||
| 20 | PhysicalCore::PhysicalCore(Core::System& system, std::size_t id, | 23 | PhysicalCore::PhysicalCore(Core::System& system, std::size_t id, Kernel::Scheduler& scheduler, |
| 21 | Core::ExclusiveMonitor& exclusive_monitor) | 24 | Core::CPUInterruptHandler& interrupt_handler) |
| 22 | : core_index{id} { | 25 | : interrupt_handler{interrupt_handler}, core_index{id}, scheduler{scheduler} { |
| 23 | #ifdef ARCHITECTURE_x86_64 | ||
| 24 | arm_interface_32 = | ||
| 25 | std::make_unique<Core::ARM_Dynarmic_32>(system, exclusive_monitor, core_index); | ||
| 26 | arm_interface_64 = | ||
| 27 | std::make_unique<Core::ARM_Dynarmic_64>(system, exclusive_monitor, core_index); | ||
| 28 | |||
| 29 | #else | ||
| 30 | using Core::ARM_Unicorn; | ||
| 31 | arm_interface_32 = std::make_unique<ARM_Unicorn>(system, ARM_Unicorn::Arch::AArch32); | ||
| 32 | arm_interface_64 = std::make_unique<ARM_Unicorn>(system, ARM_Unicorn::Arch::AArch64); | ||
| 33 | LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available"); | ||
| 34 | #endif | ||
| 35 | 26 | ||
| 36 | scheduler = std::make_unique<Kernel::Scheduler>(system, core_index); | 27 | guard = std::make_unique<Common::SpinLock>(); |
| 37 | } | 28 | } |
| 38 | 29 | ||
| 39 | PhysicalCore::~PhysicalCore() = default; | 30 | PhysicalCore::~PhysicalCore() = default; |
| 40 | 31 | ||
| 41 | void PhysicalCore::Run() { | 32 | void PhysicalCore::Idle() { |
| 42 | arm_interface->Run(); | 33 | interrupt_handler.AwaitInterrupt(); |
| 43 | arm_interface->ClearExclusiveState(); | ||
| 44 | } | 34 | } |
| 45 | 35 | ||
| 46 | void PhysicalCore::Step() { | 36 | void PhysicalCore::Shutdown() { |
| 47 | arm_interface->Step(); | 37 | scheduler.Shutdown(); |
| 48 | } | 38 | } |
| 49 | 39 | ||
| 50 | void PhysicalCore::Stop() { | 40 | bool PhysicalCore::IsInterrupted() const { |
| 51 | arm_interface->PrepareReschedule(); | 41 | return interrupt_handler.IsInterrupted(); |
| 52 | } | 42 | } |
| 53 | 43 | ||
| 54 | void PhysicalCore::Shutdown() { | 44 | void PhysicalCore::Interrupt() { |
| 55 | scheduler->Shutdown(); | 45 | guard->lock(); |
| 46 | interrupt_handler.SetInterrupt(true); | ||
| 47 | guard->unlock(); | ||
| 56 | } | 48 | } |
| 57 | 49 | ||
| 58 | void PhysicalCore::SetIs64Bit(bool is_64_bit) { | 50 | void PhysicalCore::ClearInterrupt() { |
| 59 | if (is_64_bit) { | 51 | guard->lock(); |
| 60 | arm_interface = arm_interface_64.get(); | 52 | interrupt_handler.SetInterrupt(false); |
| 61 | } else { | 53 | guard->unlock(); |
| 62 | arm_interface = arm_interface_32.get(); | ||
| 63 | } | ||
| 64 | } | 54 | } |
| 65 | 55 | ||
| 66 | } // namespace Kernel | 56 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h index 3269166be..d7a7a951c 100644 --- a/src/core/hle/kernel/physical_core.h +++ b/src/core/hle/kernel/physical_core.h | |||
| @@ -7,12 +7,17 @@ | |||
| 7 | #include <cstddef> | 7 | #include <cstddef> |
| 8 | #include <memory> | 8 | #include <memory> |
| 9 | 9 | ||
| 10 | namespace Common { | ||
| 11 | class SpinLock; | ||
| 12 | } | ||
| 13 | |||
| 10 | namespace Kernel { | 14 | namespace Kernel { |
| 11 | class Scheduler; | 15 | class Scheduler; |
| 12 | } // namespace Kernel | 16 | } // namespace Kernel |
| 13 | 17 | ||
| 14 | namespace Core { | 18 | namespace Core { |
| 15 | class ARM_Interface; | 19 | class ARM_Interface; |
| 20 | class CPUInterruptHandler; | ||
| 16 | class ExclusiveMonitor; | 21 | class ExclusiveMonitor; |
| 17 | class System; | 22 | class System; |
| 18 | } // namespace Core | 23 | } // namespace Core |
| @@ -21,7 +26,8 @@ namespace Kernel { | |||
| 21 | 26 | ||
| 22 | class PhysicalCore { | 27 | class PhysicalCore { |
| 23 | public: | 28 | public: |
| 24 | PhysicalCore(Core::System& system, std::size_t id, Core::ExclusiveMonitor& exclusive_monitor); | 29 | PhysicalCore(Core::System& system, std::size_t id, Kernel::Scheduler& scheduler, |
| 30 | Core::CPUInterruptHandler& interrupt_handler); | ||
| 25 | ~PhysicalCore(); | 31 | ~PhysicalCore(); |
| 26 | 32 | ||
| 27 | PhysicalCore(const PhysicalCore&) = delete; | 33 | PhysicalCore(const PhysicalCore&) = delete; |
| @@ -30,23 +36,18 @@ public: | |||
| 30 | PhysicalCore(PhysicalCore&&) = default; | 36 | PhysicalCore(PhysicalCore&&) = default; |
| 31 | PhysicalCore& operator=(PhysicalCore&&) = default; | 37 | PhysicalCore& operator=(PhysicalCore&&) = default; |
| 32 | 38 | ||
| 33 | /// Execute current jit state | 39 | void Idle(); |
| 34 | void Run(); | 40 | /// Interrupt this physical core. |
| 35 | /// Execute a single instruction in current jit. | 41 | void Interrupt(); |
| 36 | void Step(); | ||
| 37 | /// Stop JIT execution/exit | ||
| 38 | void Stop(); | ||
| 39 | 42 | ||
| 40 | // Shutdown this physical core. | 43 | /// Clear this core's interrupt |
| 41 | void Shutdown(); | 44 | void ClearInterrupt(); |
| 42 | 45 | ||
| 43 | Core::ARM_Interface& ArmInterface() { | 46 | /// Check if this core is interrupted |
| 44 | return *arm_interface; | 47 | bool IsInterrupted() const; |
| 45 | } | ||
| 46 | 48 | ||
| 47 | const Core::ARM_Interface& ArmInterface() const { | 49 | // Shutdown this physical core. |
| 48 | return *arm_interface; | 50 | void Shutdown(); |
| 49 | } | ||
| 50 | 51 | ||
| 51 | bool IsMainCore() const { | 52 | bool IsMainCore() const { |
| 52 | return core_index == 0; | 53 | return core_index == 0; |
| @@ -61,21 +62,18 @@ public: | |||
| 61 | } | 62 | } |
| 62 | 63 | ||
| 63 | Kernel::Scheduler& Scheduler() { | 64 | Kernel::Scheduler& Scheduler() { |
| 64 | return *scheduler; | 65 | return scheduler; |
| 65 | } | 66 | } |
| 66 | 67 | ||
| 67 | const Kernel::Scheduler& Scheduler() const { | 68 | const Kernel::Scheduler& Scheduler() const { |
| 68 | return *scheduler; | 69 | return scheduler; |
| 69 | } | 70 | } |
| 70 | 71 | ||
| 71 | void SetIs64Bit(bool is_64_bit); | ||
| 72 | |||
| 73 | private: | 72 | private: |
| 73 | Core::CPUInterruptHandler& interrupt_handler; | ||
| 74 | std::size_t core_index; | 74 | std::size_t core_index; |
| 75 | std::unique_ptr<Core::ARM_Interface> arm_interface_32; | 75 | Kernel::Scheduler& scheduler; |
| 76 | std::unique_ptr<Core::ARM_Interface> arm_interface_64; | 76 | std::unique_ptr<Common::SpinLock> guard; |
| 77 | std::unique_ptr<Kernel::Scheduler> scheduler; | ||
| 78 | Core::ARM_Interface* arm_interface{}; | ||
| 79 | }; | 77 | }; |
| 80 | 78 | ||
| 81 | } // namespace Kernel | 79 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index c4c5199b1..f9d7c024d 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include "core/hle/kernel/resource_limit.h" | 22 | #include "core/hle/kernel/resource_limit.h" |
| 23 | #include "core/hle/kernel/scheduler.h" | 23 | #include "core/hle/kernel/scheduler.h" |
| 24 | #include "core/hle/kernel/thread.h" | 24 | #include "core/hle/kernel/thread.h" |
| 25 | #include "core/hle/lock.h" | ||
| 25 | #include "core/memory.h" | 26 | #include "core/memory.h" |
| 26 | #include "core/settings.h" | 27 | #include "core/settings.h" |
| 27 | 28 | ||
| @@ -30,14 +31,15 @@ namespace { | |||
| 30 | /** | 31 | /** |
| 31 | * Sets up the primary application thread | 32 | * Sets up the primary application thread |
| 32 | * | 33 | * |
| 34 | * @param system The system instance to create the main thread under. | ||
| 33 | * @param owner_process The parent process for the main thread | 35 | * @param owner_process The parent process for the main thread |
| 34 | * @param kernel The kernel instance to create the main thread under. | ||
| 35 | * @param priority The priority to give the main thread | 36 | * @param priority The priority to give the main thread |
| 36 | */ | 37 | */ |
| 37 | void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority, VAddr stack_top) { | 38 | void SetupMainThread(Core::System& system, Process& owner_process, u32 priority, VAddr stack_top) { |
| 38 | const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart(); | 39 | const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart(); |
| 39 | auto thread_res = Thread::Create(kernel, "main", entry_point, priority, 0, | 40 | ThreadType type = THREADTYPE_USER; |
| 40 | owner_process.GetIdealCore(), stack_top, owner_process); | 41 | auto thread_res = Thread::Create(system, type, "main", entry_point, priority, 0, |
| 42 | owner_process.GetIdealCore(), stack_top, &owner_process); | ||
| 41 | 43 | ||
| 42 | std::shared_ptr<Thread> thread = std::move(thread_res).Unwrap(); | 44 | std::shared_ptr<Thread> thread = std::move(thread_res).Unwrap(); |
| 43 | 45 | ||
| @@ -48,8 +50,12 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority, V | |||
| 48 | thread->GetContext32().cpu_registers[1] = thread_handle; | 50 | thread->GetContext32().cpu_registers[1] = thread_handle; |
| 49 | thread->GetContext64().cpu_registers[1] = thread_handle; | 51 | thread->GetContext64().cpu_registers[1] = thread_handle; |
| 50 | 52 | ||
| 53 | auto& kernel = system.Kernel(); | ||
| 51 | // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires | 54 | // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires |
| 52 | thread->ResumeFromWait(); | 55 | { |
| 56 | SchedulerLock lock{kernel}; | ||
| 57 | thread->SetStatus(ThreadStatus::Ready); | ||
| 58 | } | ||
| 53 | } | 59 | } |
| 54 | } // Anonymous namespace | 60 | } // Anonymous namespace |
| 55 | 61 | ||
| @@ -182,7 +188,6 @@ void Process::RemoveConditionVariableThread(std::shared_ptr<Thread> thread) { | |||
| 182 | } | 188 | } |
| 183 | ++it; | 189 | ++it; |
| 184 | } | 190 | } |
| 185 | UNREACHABLE(); | ||
| 186 | } | 191 | } |
| 187 | 192 | ||
| 188 | std::vector<std::shared_ptr<Thread>> Process::GetConditionVariableThreads( | 193 | std::vector<std::shared_ptr<Thread>> Process::GetConditionVariableThreads( |
| @@ -207,6 +212,7 @@ void Process::UnregisterThread(const Thread* thread) { | |||
| 207 | } | 212 | } |
| 208 | 213 | ||
| 209 | ResultCode Process::ClearSignalState() { | 214 | ResultCode Process::ClearSignalState() { |
| 215 | SchedulerLock lock(system.Kernel()); | ||
| 210 | if (status == ProcessStatus::Exited) { | 216 | if (status == ProcessStatus::Exited) { |
| 211 | LOG_ERROR(Kernel, "called on a terminated process instance."); | 217 | LOG_ERROR(Kernel, "called on a terminated process instance."); |
| 212 | return ERR_INVALID_STATE; | 218 | return ERR_INVALID_STATE; |
| @@ -294,7 +300,7 @@ void Process::Run(s32 main_thread_priority, u64 stack_size) { | |||
| 294 | 300 | ||
| 295 | ChangeStatus(ProcessStatus::Running); | 301 | ChangeStatus(ProcessStatus::Running); |
| 296 | 302 | ||
| 297 | SetupMainThread(*this, kernel, main_thread_priority, main_thread_stack_top); | 303 | SetupMainThread(system, *this, main_thread_priority, main_thread_stack_top); |
| 298 | resource_limit->Reserve(ResourceType::Threads, 1); | 304 | resource_limit->Reserve(ResourceType::Threads, 1); |
| 299 | resource_limit->Reserve(ResourceType::PhysicalMemory, main_thread_stack_size); | 305 | resource_limit->Reserve(ResourceType::PhysicalMemory, main_thread_stack_size); |
| 300 | } | 306 | } |
| @@ -340,6 +346,7 @@ static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) { | |||
| 340 | } | 346 | } |
| 341 | 347 | ||
| 342 | VAddr Process::CreateTLSRegion() { | 348 | VAddr Process::CreateTLSRegion() { |
| 349 | SchedulerLock lock(system.Kernel()); | ||
| 343 | if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)}; | 350 | if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)}; |
| 344 | tls_page_iter != tls_pages.cend()) { | 351 | tls_page_iter != tls_pages.cend()) { |
| 345 | return *tls_page_iter->ReserveSlot(); | 352 | return *tls_page_iter->ReserveSlot(); |
| @@ -370,6 +377,7 @@ VAddr Process::CreateTLSRegion() { | |||
| 370 | } | 377 | } |
| 371 | 378 | ||
| 372 | void Process::FreeTLSRegion(VAddr tls_address) { | 379 | void Process::FreeTLSRegion(VAddr tls_address) { |
| 380 | SchedulerLock lock(system.Kernel()); | ||
| 373 | const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE); | 381 | const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE); |
| 374 | auto iter = | 382 | auto iter = |
| 375 | std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) { | 383 | std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) { |
| @@ -384,6 +392,7 @@ void Process::FreeTLSRegion(VAddr tls_address) { | |||
| 384 | } | 392 | } |
| 385 | 393 | ||
| 386 | void Process::LoadModule(CodeSet code_set, VAddr base_addr) { | 394 | void Process::LoadModule(CodeSet code_set, VAddr base_addr) { |
| 395 | std::lock_guard lock{HLE::g_hle_lock}; | ||
| 387 | const auto ReprotectSegment = [&](const CodeSet::Segment& segment, | 396 | const auto ReprotectSegment = [&](const CodeSet::Segment& segment, |
| 388 | Memory::MemoryPermission permission) { | 397 | Memory::MemoryPermission permission) { |
| 389 | page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission); | 398 | page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission); |
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp index ef5e19e63..6e286419e 100644 --- a/src/core/hle/kernel/readable_event.cpp +++ b/src/core/hle/kernel/readable_event.cpp | |||
| @@ -6,8 +6,10 @@ | |||
| 6 | #include "common/assert.h" | 6 | #include "common/assert.h" |
| 7 | #include "common/logging/log.h" | 7 | #include "common/logging/log.h" |
| 8 | #include "core/hle/kernel/errors.h" | 8 | #include "core/hle/kernel/errors.h" |
| 9 | #include "core/hle/kernel/kernel.h" | ||
| 9 | #include "core/hle/kernel/object.h" | 10 | #include "core/hle/kernel/object.h" |
| 10 | #include "core/hle/kernel/readable_event.h" | 11 | #include "core/hle/kernel/readable_event.h" |
| 12 | #include "core/hle/kernel/scheduler.h" | ||
| 11 | #include "core/hle/kernel/thread.h" | 13 | #include "core/hle/kernel/thread.h" |
| 12 | 14 | ||
| 13 | namespace Kernel { | 15 | namespace Kernel { |
| @@ -37,6 +39,7 @@ void ReadableEvent::Clear() { | |||
| 37 | } | 39 | } |
| 38 | 40 | ||
| 39 | ResultCode ReadableEvent::Reset() { | 41 | ResultCode ReadableEvent::Reset() { |
| 42 | SchedulerLock lock(kernel); | ||
| 40 | if (!is_signaled) { | 43 | if (!is_signaled) { |
| 41 | LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}", | 44 | LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}", |
| 42 | GetObjectId(), GetTypeName(), GetName()); | 45 | GetObjectId(), GetTypeName(), GetName()); |
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 1140c72a3..2b12c0dbf 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -11,11 +11,15 @@ | |||
| 11 | #include <utility> | 11 | #include <utility> |
| 12 | 12 | ||
| 13 | #include "common/assert.h" | 13 | #include "common/assert.h" |
| 14 | #include "common/bit_util.h" | ||
| 15 | #include "common/fiber.h" | ||
| 14 | #include "common/logging/log.h" | 16 | #include "common/logging/log.h" |
| 15 | #include "core/arm/arm_interface.h" | 17 | #include "core/arm/arm_interface.h" |
| 16 | #include "core/core.h" | 18 | #include "core/core.h" |
| 17 | #include "core/core_timing.h" | 19 | #include "core/core_timing.h" |
| 20 | #include "core/cpu_manager.h" | ||
| 18 | #include "core/hle/kernel/kernel.h" | 21 | #include "core/hle/kernel/kernel.h" |
| 22 | #include "core/hle/kernel/physical_core.h" | ||
| 19 | #include "core/hle/kernel/process.h" | 23 | #include "core/hle/kernel/process.h" |
| 20 | #include "core/hle/kernel/scheduler.h" | 24 | #include "core/hle/kernel/scheduler.h" |
| 21 | #include "core/hle/kernel/time_manager.h" | 25 | #include "core/hle/kernel/time_manager.h" |
| @@ -27,103 +31,151 @@ GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {} | |||
| 27 | GlobalScheduler::~GlobalScheduler() = default; | 31 | GlobalScheduler::~GlobalScheduler() = default; |
| 28 | 32 | ||
| 29 | void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) { | 33 | void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) { |
| 34 | global_list_guard.lock(); | ||
| 30 | thread_list.push_back(std::move(thread)); | 35 | thread_list.push_back(std::move(thread)); |
| 36 | global_list_guard.unlock(); | ||
| 31 | } | 37 | } |
| 32 | 38 | ||
| 33 | void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) { | 39 | void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) { |
| 40 | global_list_guard.lock(); | ||
| 34 | thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), | 41 | thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), |
| 35 | thread_list.end()); | 42 | thread_list.end()); |
| 43 | global_list_guard.unlock(); | ||
| 36 | } | 44 | } |
| 37 | 45 | ||
| 38 | void GlobalScheduler::UnloadThread(std::size_t core) { | 46 | u32 GlobalScheduler::SelectThreads() { |
| 39 | Scheduler& sched = kernel.Scheduler(core); | 47 | ASSERT(is_locked); |
| 40 | sched.UnloadThread(); | ||
| 41 | } | ||
| 42 | |||
| 43 | void GlobalScheduler::SelectThread(std::size_t core) { | ||
| 44 | const auto update_thread = [](Thread* thread, Scheduler& sched) { | 48 | const auto update_thread = [](Thread* thread, Scheduler& sched) { |
| 45 | if (thread != sched.selected_thread.get()) { | 49 | sched.guard.lock(); |
| 50 | if (thread != sched.selected_thread_set.get()) { | ||
| 46 | if (thread == nullptr) { | 51 | if (thread == nullptr) { |
| 47 | ++sched.idle_selection_count; | 52 | ++sched.idle_selection_count; |
| 48 | } | 53 | } |
| 49 | sched.selected_thread = SharedFrom(thread); | 54 | sched.selected_thread_set = SharedFrom(thread); |
| 50 | } | 55 | } |
| 51 | sched.is_context_switch_pending = sched.selected_thread != sched.current_thread; | 56 | const bool reschedule_pending = |
| 57 | sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread); | ||
| 58 | sched.is_context_switch_pending = reschedule_pending; | ||
| 52 | std::atomic_thread_fence(std::memory_order_seq_cst); | 59 | std::atomic_thread_fence(std::memory_order_seq_cst); |
| 60 | sched.guard.unlock(); | ||
| 61 | return reschedule_pending; | ||
| 53 | }; | 62 | }; |
| 54 | Scheduler& sched = kernel.Scheduler(core); | 63 | if (!is_reselection_pending.load()) { |
| 55 | Thread* current_thread = nullptr; | 64 | return 0; |
| 56 | // Step 1: Get top thread in schedule queue. | ||
| 57 | current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front(); | ||
| 58 | if (current_thread) { | ||
| 59 | update_thread(current_thread, sched); | ||
| 60 | return; | ||
| 61 | } | 65 | } |
| 62 | // Step 2: Try selecting a suggested thread. | 66 | std::array<Thread*, Core::Hardware::NUM_CPU_CORES> top_threads{}; |
| 63 | Thread* winner = nullptr; | 67 | |
| 64 | std::set<s32> sug_cores; | 68 | u32 idle_cores{}; |
| 65 | for (auto thread : suggested_queue[core]) { | 69 | |
| 66 | s32 this_core = thread->GetProcessorID(); | 70 | // Step 1: Get top thread in schedule queue. |
| 67 | Thread* thread_on_core = nullptr; | 71 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 68 | if (this_core >= 0) { | 72 | Thread* top_thread = |
| 69 | thread_on_core = scheduled_queue[this_core].front(); | 73 | scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front(); |
| 70 | } | 74 | if (top_thread != nullptr) { |
| 71 | if (this_core < 0 || thread != thread_on_core) { | 75 | // TODO(Blinkhawk): Implement Thread Pinning |
| 72 | winner = thread; | 76 | } else { |
| 73 | break; | 77 | idle_cores |= (1ul << core); |
| 74 | } | 78 | } |
| 75 | sug_cores.insert(this_core); | 79 | top_threads[core] = top_thread; |
| 76 | } | 80 | } |
| 77 | // if we got a suggested thread, select it, else do a second pass. | 81 | |
| 78 | if (winner && winner->GetPriority() > 2) { | 82 | while (idle_cores != 0) { |
| 79 | if (winner->IsRunning()) { | 83 | u32 core_id = Common::CountTrailingZeroes32(idle_cores); |
| 80 | UnloadThread(static_cast<u32>(winner->GetProcessorID())); | 84 | |
| 85 | if (!suggested_queue[core_id].empty()) { | ||
| 86 | std::array<s32, Core::Hardware::NUM_CPU_CORES> migration_candidates{}; | ||
| 87 | std::size_t num_candidates = 0; | ||
| 88 | auto iter = suggested_queue[core_id].begin(); | ||
| 89 | Thread* suggested = nullptr; | ||
| 90 | // Step 2: Try selecting a suggested thread. | ||
| 91 | while (iter != suggested_queue[core_id].end()) { | ||
| 92 | suggested = *iter; | ||
| 93 | iter++; | ||
| 94 | s32 suggested_core_id = suggested->GetProcessorID(); | ||
| 95 | Thread* top_thread = | ||
| 96 | suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr; | ||
| 97 | if (top_thread != suggested) { | ||
| 98 | if (top_thread != nullptr && | ||
| 99 | top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) { | ||
| 100 | suggested = nullptr; | ||
| 101 | break; | ||
| 102 | // There's a too high thread to do core migration, cancel | ||
| 103 | } | ||
| 104 | TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested); | ||
| 105 | break; | ||
| 106 | } | ||
| 107 | suggested = nullptr; | ||
| 108 | migration_candidates[num_candidates++] = suggested_core_id; | ||
| 109 | } | ||
| 110 | // Step 3: Select a suggested thread from another core | ||
| 111 | if (suggested == nullptr) { | ||
| 112 | for (std::size_t i = 0; i < num_candidates; i++) { | ||
| 113 | s32 candidate_core = migration_candidates[i]; | ||
| 114 | suggested = top_threads[candidate_core]; | ||
| 115 | auto it = scheduled_queue[candidate_core].begin(); | ||
| 116 | it++; | ||
| 117 | Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr; | ||
| 118 | if (next != nullptr) { | ||
| 119 | TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), | ||
| 120 | suggested); | ||
| 121 | top_threads[candidate_core] = next; | ||
| 122 | break; | ||
| 123 | } else { | ||
| 124 | suggested = nullptr; | ||
| 125 | } | ||
| 126 | } | ||
| 127 | } | ||
| 128 | top_threads[core_id] = suggested; | ||
| 81 | } | 129 | } |
| 82 | TransferToCore(winner->GetPriority(), static_cast<s32>(core), winner); | 130 | |
| 83 | update_thread(winner, sched); | 131 | idle_cores &= ~(1ul << core_id); |
| 84 | return; | ||
| 85 | } | 132 | } |
| 86 | // Step 3: Select a suggested thread from another core | 133 | u32 cores_needing_context_switch{}; |
| 87 | for (auto& src_core : sug_cores) { | 134 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 88 | auto it = scheduled_queue[src_core].begin(); | 135 | Scheduler& sched = kernel.Scheduler(core); |
| 89 | it++; | 136 | ASSERT(top_threads[core] == nullptr || top_threads[core]->GetProcessorID() == core); |
| 90 | if (it != scheduled_queue[src_core].end()) { | 137 | if (update_thread(top_threads[core], sched)) { |
| 91 | Thread* thread_on_core = scheduled_queue[src_core].front(); | 138 | cores_needing_context_switch |= (1ul << core); |
| 92 | Thread* to_change = *it; | ||
| 93 | if (thread_on_core->IsRunning() || to_change->IsRunning()) { | ||
| 94 | UnloadThread(static_cast<u32>(src_core)); | ||
| 95 | } | ||
| 96 | TransferToCore(thread_on_core->GetPriority(), static_cast<s32>(core), thread_on_core); | ||
| 97 | current_thread = thread_on_core; | ||
| 98 | break; | ||
| 99 | } | 139 | } |
| 100 | } | 140 | } |
| 101 | update_thread(current_thread, sched); | 141 | return cores_needing_context_switch; |
| 102 | } | 142 | } |
| 103 | 143 | ||
| 104 | bool GlobalScheduler::YieldThread(Thread* yielding_thread) { | 144 | bool GlobalScheduler::YieldThread(Thread* yielding_thread) { |
| 145 | ASSERT(is_locked); | ||
| 105 | // Note: caller should use critical section, etc. | 146 | // Note: caller should use critical section, etc. |
| 147 | if (!yielding_thread->IsRunnable()) { | ||
| 148 | // Normally this case shouldn't happen except for SetThreadActivity. | ||
| 149 | is_reselection_pending.store(true, std::memory_order_release); | ||
| 150 | return false; | ||
| 151 | } | ||
| 106 | const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); | 152 | const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); |
| 107 | const u32 priority = yielding_thread->GetPriority(); | 153 | const u32 priority = yielding_thread->GetPriority(); |
| 108 | 154 | ||
| 109 | // Yield the thread | 155 | // Yield the thread |
| 110 | const Thread* const winner = scheduled_queue[core_id].front(priority); | 156 | Reschedule(priority, core_id, yielding_thread); |
| 111 | ASSERT_MSG(yielding_thread == winner, "Thread yielding without being in front"); | 157 | const Thread* const winner = scheduled_queue[core_id].front(); |
| 112 | scheduled_queue[core_id].yield(priority); | 158 | if (kernel.GetCurrentHostThreadID() != core_id) { |
| 159 | is_reselection_pending.store(true, std::memory_order_release); | ||
| 160 | } | ||
| 113 | 161 | ||
| 114 | return AskForReselectionOrMarkRedundant(yielding_thread, winner); | 162 | return AskForReselectionOrMarkRedundant(yielding_thread, winner); |
| 115 | } | 163 | } |
| 116 | 164 | ||
| 117 | bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { | 165 | bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { |
| 166 | ASSERT(is_locked); | ||
| 118 | // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, | 167 | // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, |
| 119 | // etc. | 168 | // etc. |
| 169 | if (!yielding_thread->IsRunnable()) { | ||
| 170 | // Normally this case shouldn't happen except for SetThreadActivity. | ||
| 171 | is_reselection_pending.store(true, std::memory_order_release); | ||
| 172 | return false; | ||
| 173 | } | ||
| 120 | const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); | 174 | const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); |
| 121 | const u32 priority = yielding_thread->GetPriority(); | 175 | const u32 priority = yielding_thread->GetPriority(); |
| 122 | 176 | ||
| 123 | // Yield the thread | 177 | // Yield the thread |
| 124 | ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority), | 178 | Reschedule(priority, core_id, yielding_thread); |
| 125 | "Thread yielding without being in front"); | ||
| 126 | scheduled_queue[core_id].yield(priority); | ||
| 127 | 179 | ||
| 128 | std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads; | 180 | std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads; |
| 129 | for (std::size_t i = 0; i < current_threads.size(); i++) { | 181 | for (std::size_t i = 0; i < current_threads.size(); i++) { |
| @@ -153,21 +205,28 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { | |||
| 153 | 205 | ||
| 154 | if (winner != nullptr) { | 206 | if (winner != nullptr) { |
| 155 | if (winner != yielding_thread) { | 207 | if (winner != yielding_thread) { |
| 156 | if (winner->IsRunning()) { | ||
| 157 | UnloadThread(static_cast<u32>(winner->GetProcessorID())); | ||
| 158 | } | ||
| 159 | TransferToCore(winner->GetPriority(), s32(core_id), winner); | 208 | TransferToCore(winner->GetPriority(), s32(core_id), winner); |
| 160 | } | 209 | } |
| 161 | } else { | 210 | } else { |
| 162 | winner = next_thread; | 211 | winner = next_thread; |
| 163 | } | 212 | } |
| 164 | 213 | ||
| 214 | if (kernel.GetCurrentHostThreadID() != core_id) { | ||
| 215 | is_reselection_pending.store(true, std::memory_order_release); | ||
| 216 | } | ||
| 217 | |||
| 165 | return AskForReselectionOrMarkRedundant(yielding_thread, winner); | 218 | return AskForReselectionOrMarkRedundant(yielding_thread, winner); |
| 166 | } | 219 | } |
| 167 | 220 | ||
| 168 | bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) { | 221 | bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) { |
| 222 | ASSERT(is_locked); | ||
| 169 | // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, | 223 | // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, |
| 170 | // etc. | 224 | // etc. |
| 225 | if (!yielding_thread->IsRunnable()) { | ||
| 226 | // Normally this case shouldn't happen except for SetThreadActivity. | ||
| 227 | is_reselection_pending.store(true, std::memory_order_release); | ||
| 228 | return false; | ||
| 229 | } | ||
| 171 | Thread* winner = nullptr; | 230 | Thread* winner = nullptr; |
| 172 | const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); | 231 | const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); |
| 173 | 232 | ||
| @@ -195,25 +254,31 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread | |||
| 195 | } | 254 | } |
| 196 | if (winner != nullptr) { | 255 | if (winner != nullptr) { |
| 197 | if (winner != yielding_thread) { | 256 | if (winner != yielding_thread) { |
| 198 | if (winner->IsRunning()) { | ||
| 199 | UnloadThread(static_cast<u32>(winner->GetProcessorID())); | ||
| 200 | } | ||
| 201 | TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner); | 257 | TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner); |
| 202 | } | 258 | } |
| 203 | } else { | 259 | } else { |
| 204 | winner = yielding_thread; | 260 | winner = yielding_thread; |
| 205 | } | 261 | } |
| 262 | } else { | ||
| 263 | winner = scheduled_queue[core_id].front(); | ||
| 264 | } | ||
| 265 | |||
| 266 | if (kernel.GetCurrentHostThreadID() != core_id) { | ||
| 267 | is_reselection_pending.store(true, std::memory_order_release); | ||
| 206 | } | 268 | } |
| 207 | 269 | ||
| 208 | return AskForReselectionOrMarkRedundant(yielding_thread, winner); | 270 | return AskForReselectionOrMarkRedundant(yielding_thread, winner); |
| 209 | } | 271 | } |
| 210 | 272 | ||
| 211 | void GlobalScheduler::PreemptThreads() { | 273 | void GlobalScheduler::PreemptThreads() { |
| 274 | ASSERT(is_locked); | ||
| 212 | for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | 275 | for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { |
| 213 | const u32 priority = preemption_priorities[core_id]; | 276 | const u32 priority = preemption_priorities[core_id]; |
| 214 | 277 | ||
| 215 | if (scheduled_queue[core_id].size(priority) > 0) { | 278 | if (scheduled_queue[core_id].size(priority) > 0) { |
| 216 | scheduled_queue[core_id].front(priority)->IncrementYieldCount(); | 279 | if (scheduled_queue[core_id].size(priority) > 1) { |
| 280 | scheduled_queue[core_id].front(priority)->IncrementYieldCount(); | ||
| 281 | } | ||
| 217 | scheduled_queue[core_id].yield(priority); | 282 | scheduled_queue[core_id].yield(priority); |
| 218 | if (scheduled_queue[core_id].size(priority) > 1) { | 283 | if (scheduled_queue[core_id].size(priority) > 1) { |
| 219 | scheduled_queue[core_id].front(priority)->IncrementYieldCount(); | 284 | scheduled_queue[core_id].front(priority)->IncrementYieldCount(); |
| @@ -247,9 +312,6 @@ void GlobalScheduler::PreemptThreads() { | |||
| 247 | } | 312 | } |
| 248 | 313 | ||
| 249 | if (winner != nullptr) { | 314 | if (winner != nullptr) { |
| 250 | if (winner->IsRunning()) { | ||
| 251 | UnloadThread(static_cast<u32>(winner->GetProcessorID())); | ||
| 252 | } | ||
| 253 | TransferToCore(winner->GetPriority(), s32(core_id), winner); | 315 | TransferToCore(winner->GetPriority(), s32(core_id), winner); |
| 254 | current_thread = | 316 | current_thread = |
| 255 | winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread; | 317 | winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread; |
| @@ -280,9 +342,6 @@ void GlobalScheduler::PreemptThreads() { | |||
| 280 | } | 342 | } |
| 281 | 343 | ||
| 282 | if (winner != nullptr) { | 344 | if (winner != nullptr) { |
| 283 | if (winner->IsRunning()) { | ||
| 284 | UnloadThread(static_cast<u32>(winner->GetProcessorID())); | ||
| 285 | } | ||
| 286 | TransferToCore(winner->GetPriority(), s32(core_id), winner); | 345 | TransferToCore(winner->GetPriority(), s32(core_id), winner); |
| 287 | current_thread = winner; | 346 | current_thread = winner; |
| 288 | } | 347 | } |
| @@ -292,34 +351,65 @@ void GlobalScheduler::PreemptThreads() { | |||
| 292 | } | 351 | } |
| 293 | } | 352 | } |
| 294 | 353 | ||
| 354 | void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule, | ||
| 355 | Core::EmuThreadHandle global_thread) { | ||
| 356 | u32 current_core = global_thread.host_handle; | ||
| 357 | bool must_context_switch = global_thread.guest_handle != InvalidHandle && | ||
| 358 | (current_core < Core::Hardware::NUM_CPU_CORES); | ||
| 359 | while (cores_pending_reschedule != 0) { | ||
| 360 | u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule); | ||
| 361 | ASSERT(core < Core::Hardware::NUM_CPU_CORES); | ||
| 362 | if (!must_context_switch || core != current_core) { | ||
| 363 | auto& phys_core = kernel.PhysicalCore(core); | ||
| 364 | phys_core.Interrupt(); | ||
| 365 | } else { | ||
| 366 | must_context_switch = true; | ||
| 367 | } | ||
| 368 | cores_pending_reschedule &= ~(1ul << core); | ||
| 369 | } | ||
| 370 | if (must_context_switch) { | ||
| 371 | auto& core_scheduler = kernel.CurrentScheduler(); | ||
| 372 | kernel.ExitSVCProfile(); | ||
| 373 | core_scheduler.TryDoContextSwitch(); | ||
| 374 | kernel.EnterSVCProfile(); | ||
| 375 | } | ||
| 376 | } | ||
| 377 | |||
| 295 | void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) { | 378 | void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) { |
| 379 | ASSERT(is_locked); | ||
| 296 | suggested_queue[core].add(thread, priority); | 380 | suggested_queue[core].add(thread, priority); |
| 297 | } | 381 | } |
| 298 | 382 | ||
| 299 | void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) { | 383 | void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) { |
| 384 | ASSERT(is_locked); | ||
| 300 | suggested_queue[core].remove(thread, priority); | 385 | suggested_queue[core].remove(thread, priority); |
| 301 | } | 386 | } |
| 302 | 387 | ||
| 303 | void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) { | 388 | void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) { |
| 389 | ASSERT(is_locked); | ||
| 304 | ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); | 390 | ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); |
| 305 | scheduled_queue[core].add(thread, priority); | 391 | scheduled_queue[core].add(thread, priority); |
| 306 | } | 392 | } |
| 307 | 393 | ||
| 308 | void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) { | 394 | void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) { |
| 395 | ASSERT(is_locked); | ||
| 309 | ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); | 396 | ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); |
| 310 | scheduled_queue[core].add(thread, priority, false); | 397 | scheduled_queue[core].add(thread, priority, false); |
| 311 | } | 398 | } |
| 312 | 399 | ||
| 313 | void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) { | 400 | void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) { |
| 401 | ASSERT(is_locked); | ||
| 314 | scheduled_queue[core].remove(thread, priority); | 402 | scheduled_queue[core].remove(thread, priority); |
| 315 | scheduled_queue[core].add(thread, priority); | 403 | scheduled_queue[core].add(thread, priority); |
| 316 | } | 404 | } |
| 317 | 405 | ||
| 318 | void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) { | 406 | void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) { |
| 407 | ASSERT(is_locked); | ||
| 319 | scheduled_queue[core].remove(thread, priority); | 408 | scheduled_queue[core].remove(thread, priority); |
| 320 | } | 409 | } |
| 321 | 410 | ||
| 322 | void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) { | 411 | void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) { |
| 412 | ASSERT(is_locked); | ||
| 323 | const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT; | 413 | const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT; |
| 324 | const s32 source_core = thread->GetProcessorID(); | 414 | const s32 source_core = thread->GetProcessorID(); |
| 325 | if (source_core == destination_core || !schedulable) { | 415 | if (source_core == destination_core || !schedulable) { |
| @@ -349,6 +439,108 @@ bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, | |||
| 349 | } | 439 | } |
| 350 | } | 440 | } |
| 351 | 441 | ||
| 442 | void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) { | ||
| 443 | if (old_flags == thread->scheduling_state) { | ||
| 444 | return; | ||
| 445 | } | ||
| 446 | ASSERT(is_locked); | ||
| 447 | |||
| 448 | if (old_flags == static_cast<u32>(ThreadSchedStatus::Runnable)) { | ||
| 449 | // In this case the thread was running, now it's pausing/exitting | ||
| 450 | if (thread->processor_id >= 0) { | ||
| 451 | Unschedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread); | ||
| 452 | } | ||
| 453 | |||
| 454 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 455 | if (core != static_cast<u32>(thread->processor_id) && | ||
| 456 | ((thread->affinity_mask >> core) & 1) != 0) { | ||
| 457 | Unsuggest(thread->current_priority, core, thread); | ||
| 458 | } | ||
| 459 | } | ||
| 460 | } else if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { | ||
| 461 | // The thread is now set to running from being stopped | ||
| 462 | if (thread->processor_id >= 0) { | ||
| 463 | Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread); | ||
| 464 | } | ||
| 465 | |||
| 466 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 467 | if (core != static_cast<u32>(thread->processor_id) && | ||
| 468 | ((thread->affinity_mask >> core) & 1) != 0) { | ||
| 469 | Suggest(thread->current_priority, core, thread); | ||
| 470 | } | ||
| 471 | } | ||
| 472 | } | ||
| 473 | |||
| 474 | SetReselectionPending(); | ||
| 475 | } | ||
| 476 | |||
| 477 | void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priority) { | ||
| 478 | if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable)) { | ||
| 479 | return; | ||
| 480 | } | ||
| 481 | ASSERT(is_locked); | ||
| 482 | if (thread->processor_id >= 0) { | ||
| 483 | Unschedule(old_priority, static_cast<u32>(thread->processor_id), thread); | ||
| 484 | } | ||
| 485 | |||
| 486 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 487 | if (core != static_cast<u32>(thread->processor_id) && | ||
| 488 | ((thread->affinity_mask >> core) & 1) != 0) { | ||
| 489 | Unsuggest(old_priority, core, thread); | ||
| 490 | } | ||
| 491 | } | ||
| 492 | |||
| 493 | if (thread->processor_id >= 0) { | ||
| 494 | if (thread == kernel.CurrentScheduler().GetCurrentThread()) { | ||
| 495 | SchedulePrepend(thread->current_priority, static_cast<u32>(thread->processor_id), | ||
| 496 | thread); | ||
| 497 | } else { | ||
| 498 | Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread); | ||
| 499 | } | ||
| 500 | } | ||
| 501 | |||
| 502 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 503 | if (core != static_cast<u32>(thread->processor_id) && | ||
| 504 | ((thread->affinity_mask >> core) & 1) != 0) { | ||
| 505 | Suggest(thread->current_priority, core, thread); | ||
| 506 | } | ||
| 507 | } | ||
| 508 | thread->IncrementYieldCount(); | ||
| 509 | SetReselectionPending(); | ||
| 510 | } | ||
| 511 | |||
| 512 | void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask, | ||
| 513 | s32 old_core) { | ||
| 514 | if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable) || | ||
| 515 | thread->current_priority >= THREADPRIO_COUNT) { | ||
| 516 | return; | ||
| 517 | } | ||
| 518 | ASSERT(is_locked); | ||
| 519 | |||
| 520 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 521 | if (((old_affinity_mask >> core) & 1) != 0) { | ||
| 522 | if (core == static_cast<u32>(old_core)) { | ||
| 523 | Unschedule(thread->current_priority, core, thread); | ||
| 524 | } else { | ||
| 525 | Unsuggest(thread->current_priority, core, thread); | ||
| 526 | } | ||
| 527 | } | ||
| 528 | } | ||
| 529 | |||
| 530 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 531 | if (((thread->affinity_mask >> core) & 1) != 0) { | ||
| 532 | if (core == static_cast<u32>(thread->processor_id)) { | ||
| 533 | Schedule(thread->current_priority, core, thread); | ||
| 534 | } else { | ||
| 535 | Suggest(thread->current_priority, core, thread); | ||
| 536 | } | ||
| 537 | } | ||
| 538 | } | ||
| 539 | |||
| 540 | thread->IncrementYieldCount(); | ||
| 541 | SetReselectionPending(); | ||
| 542 | } | ||
| 543 | |||
| 352 | void GlobalScheduler::Shutdown() { | 544 | void GlobalScheduler::Shutdown() { |
| 353 | for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | 545 | for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 354 | scheduled_queue[core].clear(); | 546 | scheduled_queue[core].clear(); |
| @@ -359,10 +551,12 @@ void GlobalScheduler::Shutdown() { | |||
| 359 | 551 | ||
| 360 | void GlobalScheduler::Lock() { | 552 | void GlobalScheduler::Lock() { |
| 361 | Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID(); | 553 | Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID(); |
| 554 | ASSERT(!current_thread.IsInvalid()); | ||
| 362 | if (current_thread == current_owner) { | 555 | if (current_thread == current_owner) { |
| 363 | ++scope_lock; | 556 | ++scope_lock; |
| 364 | } else { | 557 | } else { |
| 365 | inner_lock.lock(); | 558 | inner_lock.lock(); |
| 559 | is_locked = true; | ||
| 366 | current_owner = current_thread; | 560 | current_owner = current_thread; |
| 367 | ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle()); | 561 | ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle()); |
| 368 | scope_lock = 1; | 562 | scope_lock = 1; |
| @@ -374,17 +568,18 @@ void GlobalScheduler::Unlock() { | |||
| 374 | ASSERT(scope_lock > 0); | 568 | ASSERT(scope_lock > 0); |
| 375 | return; | 569 | return; |
| 376 | } | 570 | } |
| 377 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | 571 | u32 cores_pending_reschedule = SelectThreads(); |
| 378 | SelectThread(i); | 572 | Core::EmuThreadHandle leaving_thread = current_owner; |
| 379 | } | ||
| 380 | current_owner = Core::EmuThreadHandle::InvalidHandle(); | 573 | current_owner = Core::EmuThreadHandle::InvalidHandle(); |
| 381 | scope_lock = 1; | 574 | scope_lock = 1; |
| 575 | is_locked = false; | ||
| 382 | inner_lock.unlock(); | 576 | inner_lock.unlock(); |
| 383 | // TODO(Blinkhawk): Setup the interrupts and change context on current core. | 577 | EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread); |
| 384 | } | 578 | } |
| 385 | 579 | ||
| 386 | Scheduler::Scheduler(Core::System& system, std::size_t core_id) | 580 | Scheduler::Scheduler(Core::System& system, std::size_t core_id) : system(system), core_id(core_id) { |
| 387 | : system{system}, core_id{core_id} {} | 581 | switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this); |
| 582 | } | ||
| 388 | 583 | ||
| 389 | Scheduler::~Scheduler() = default; | 584 | Scheduler::~Scheduler() = default; |
| 390 | 585 | ||
| @@ -393,56 +588,128 @@ bool Scheduler::HaveReadyThreads() const { | |||
| 393 | } | 588 | } |
| 394 | 589 | ||
| 395 | Thread* Scheduler::GetCurrentThread() const { | 590 | Thread* Scheduler::GetCurrentThread() const { |
| 396 | return current_thread.get(); | 591 | if (current_thread) { |
| 592 | return current_thread.get(); | ||
| 593 | } | ||
| 594 | return idle_thread.get(); | ||
| 397 | } | 595 | } |
| 398 | 596 | ||
| 399 | Thread* Scheduler::GetSelectedThread() const { | 597 | Thread* Scheduler::GetSelectedThread() const { |
| 400 | return selected_thread.get(); | 598 | return selected_thread.get(); |
| 401 | } | 599 | } |
| 402 | 600 | ||
| 403 | void Scheduler::SelectThreads() { | ||
| 404 | system.GlobalScheduler().SelectThread(core_id); | ||
| 405 | } | ||
| 406 | |||
| 407 | u64 Scheduler::GetLastContextSwitchTicks() const { | 601 | u64 Scheduler::GetLastContextSwitchTicks() const { |
| 408 | return last_context_switch_time; | 602 | return last_context_switch_time; |
| 409 | } | 603 | } |
| 410 | 604 | ||
| 411 | void Scheduler::TryDoContextSwitch() { | 605 | void Scheduler::TryDoContextSwitch() { |
| 606 | auto& phys_core = system.Kernel().CurrentPhysicalCore(); | ||
| 607 | if (phys_core.IsInterrupted()) { | ||
| 608 | phys_core.ClearInterrupt(); | ||
| 609 | } | ||
| 610 | guard.lock(); | ||
| 412 | if (is_context_switch_pending) { | 611 | if (is_context_switch_pending) { |
| 413 | SwitchContext(); | 612 | SwitchContext(); |
| 613 | } else { | ||
| 614 | guard.unlock(); | ||
| 414 | } | 615 | } |
| 415 | } | 616 | } |
| 416 | 617 | ||
| 417 | void Scheduler::UnloadThread() { | 618 | void Scheduler::OnThreadStart() { |
| 418 | Thread* const previous_thread = GetCurrentThread(); | 619 | SwitchContextStep2(); |
| 419 | Process* const previous_process = system.Kernel().CurrentProcess(); | 620 | } |
| 420 | 621 | ||
| 421 | UpdateLastContextSwitchTime(previous_thread, previous_process); | 622 | void Scheduler::Unload() { |
| 623 | Thread* thread = current_thread.get(); | ||
| 624 | if (thread) { | ||
| 625 | thread->SetContinuousOnSVC(false); | ||
| 626 | thread->last_running_ticks = system.CoreTiming().GetCPUTicks(); | ||
| 627 | thread->SetIsRunning(false); | ||
| 628 | if (!thread->IsHLEThread() && !thread->HasExited()) { | ||
| 629 | Core::ARM_Interface& cpu_core = thread->ArmInterface(); | ||
| 630 | cpu_core.SaveContext(thread->GetContext32()); | ||
| 631 | cpu_core.SaveContext(thread->GetContext64()); | ||
| 632 | // Save the TPIDR_EL0 system register in case it was modified. | ||
| 633 | thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | ||
| 634 | cpu_core.ClearExclusiveState(); | ||
| 635 | } | ||
| 636 | thread->context_guard.unlock(); | ||
| 637 | } | ||
| 638 | } | ||
| 422 | 639 | ||
| 423 | // Save context for previous thread | 640 | void Scheduler::Reload() { |
| 424 | if (previous_thread) { | 641 | Thread* thread = current_thread.get(); |
| 425 | system.ArmInterface(core_id).SaveContext(previous_thread->GetContext32()); | 642 | if (thread) { |
| 426 | system.ArmInterface(core_id).SaveContext(previous_thread->GetContext64()); | 643 | ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable, |
| 427 | // Save the TPIDR_EL0 system register in case it was modified. | 644 | "Thread must be runnable."); |
| 428 | previous_thread->SetTPIDR_EL0(system.ArmInterface(core_id).GetTPIDR_EL0()); | ||
| 429 | 645 | ||
| 430 | if (previous_thread->GetStatus() == ThreadStatus::Running) { | 646 | // Cancel any outstanding wakeup events for this thread |
| 431 | // This is only the case when a reschedule is triggered without the current thread | 647 | thread->SetIsRunning(true); |
| 432 | // yielding execution (i.e. an event triggered, system core time-sliced, etc) | 648 | thread->SetWasRunning(false); |
| 433 | previous_thread->SetStatus(ThreadStatus::Ready); | 649 | thread->last_running_ticks = system.CoreTiming().GetCPUTicks(); |
| 650 | |||
| 651 | auto* const thread_owner_process = thread->GetOwnerProcess(); | ||
| 652 | if (thread_owner_process != nullptr) { | ||
| 653 | system.Kernel().MakeCurrentProcess(thread_owner_process); | ||
| 654 | } | ||
| 655 | if (!thread->IsHLEThread()) { | ||
| 656 | Core::ARM_Interface& cpu_core = thread->ArmInterface(); | ||
| 657 | cpu_core.LoadContext(thread->GetContext32()); | ||
| 658 | cpu_core.LoadContext(thread->GetContext64()); | ||
| 659 | cpu_core.SetTlsAddress(thread->GetTLSAddress()); | ||
| 660 | cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); | ||
| 661 | cpu_core.ChangeProcessorID(this->core_id); | ||
| 662 | cpu_core.ClearExclusiveState(); | ||
| 434 | } | 663 | } |
| 435 | previous_thread->SetIsRunning(false); | ||
| 436 | } | 664 | } |
| 437 | current_thread = nullptr; | 665 | } |
| 666 | |||
| 667 | void Scheduler::SwitchContextStep2() { | ||
| 668 | Thread* previous_thread = current_thread_prev.get(); | ||
| 669 | Thread* new_thread = selected_thread.get(); | ||
| 670 | |||
| 671 | // Load context of new thread | ||
| 672 | Process* const previous_process = | ||
| 673 | previous_thread != nullptr ? previous_thread->GetOwnerProcess() : nullptr; | ||
| 674 | |||
| 675 | if (new_thread) { | ||
| 676 | ASSERT_MSG(new_thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable, | ||
| 677 | "Thread must be runnable."); | ||
| 678 | |||
| 679 | // Cancel any outstanding wakeup events for this thread | ||
| 680 | new_thread->SetIsRunning(true); | ||
| 681 | new_thread->last_running_ticks = system.CoreTiming().GetCPUTicks(); | ||
| 682 | new_thread->SetWasRunning(false); | ||
| 683 | |||
| 684 | auto* const thread_owner_process = current_thread->GetOwnerProcess(); | ||
| 685 | if (thread_owner_process != nullptr) { | ||
| 686 | system.Kernel().MakeCurrentProcess(thread_owner_process); | ||
| 687 | } | ||
| 688 | if (!new_thread->IsHLEThread()) { | ||
| 689 | Core::ARM_Interface& cpu_core = new_thread->ArmInterface(); | ||
| 690 | cpu_core.LoadContext(new_thread->GetContext32()); | ||
| 691 | cpu_core.LoadContext(new_thread->GetContext64()); | ||
| 692 | cpu_core.SetTlsAddress(new_thread->GetTLSAddress()); | ||
| 693 | cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0()); | ||
| 694 | cpu_core.ChangeProcessorID(this->core_id); | ||
| 695 | cpu_core.ClearExclusiveState(); | ||
| 696 | } | ||
| 697 | } | ||
| 698 | |||
| 699 | TryDoContextSwitch(); | ||
| 438 | } | 700 | } |
| 439 | 701 | ||
| 440 | void Scheduler::SwitchContext() { | 702 | void Scheduler::SwitchContext() { |
| 441 | Thread* const previous_thread = GetCurrentThread(); | 703 | current_thread_prev = current_thread; |
| 442 | Thread* const new_thread = GetSelectedThread(); | 704 | selected_thread = selected_thread_set; |
| 705 | Thread* previous_thread = current_thread_prev.get(); | ||
| 706 | Thread* new_thread = selected_thread.get(); | ||
| 707 | current_thread = selected_thread; | ||
| 443 | 708 | ||
| 444 | is_context_switch_pending = false; | 709 | is_context_switch_pending = false; |
| 710 | |||
| 445 | if (new_thread == previous_thread) { | 711 | if (new_thread == previous_thread) { |
| 712 | guard.unlock(); | ||
| 446 | return; | 713 | return; |
| 447 | } | 714 | } |
| 448 | 715 | ||
| @@ -452,51 +719,75 @@ void Scheduler::SwitchContext() { | |||
| 452 | 719 | ||
| 453 | // Save context for previous thread | 720 | // Save context for previous thread |
| 454 | if (previous_thread) { | 721 | if (previous_thread) { |
| 455 | system.ArmInterface(core_id).SaveContext(previous_thread->GetContext32()); | 722 | if (new_thread != nullptr && new_thread->IsSuspendThread()) { |
| 456 | system.ArmInterface(core_id).SaveContext(previous_thread->GetContext64()); | 723 | previous_thread->SetWasRunning(true); |
| 457 | // Save the TPIDR_EL0 system register in case it was modified. | ||
| 458 | previous_thread->SetTPIDR_EL0(system.ArmInterface(core_id).GetTPIDR_EL0()); | ||
| 459 | |||
| 460 | if (previous_thread->GetStatus() == ThreadStatus::Running) { | ||
| 461 | // This is only the case when a reschedule is triggered without the current thread | ||
| 462 | // yielding execution (i.e. an event triggered, system core time-sliced, etc) | ||
| 463 | previous_thread->SetStatus(ThreadStatus::Ready); | ||
| 464 | } | 724 | } |
| 725 | previous_thread->SetContinuousOnSVC(false); | ||
| 726 | previous_thread->last_running_ticks = system.CoreTiming().GetCPUTicks(); | ||
| 465 | previous_thread->SetIsRunning(false); | 727 | previous_thread->SetIsRunning(false); |
| 466 | } | 728 | if (!previous_thread->IsHLEThread() && !previous_thread->HasExited()) { |
| 467 | 729 | Core::ARM_Interface& cpu_core = previous_thread->ArmInterface(); | |
| 468 | // Load context of new thread | 730 | cpu_core.SaveContext(previous_thread->GetContext32()); |
| 469 | if (new_thread) { | 731 | cpu_core.SaveContext(previous_thread->GetContext64()); |
| 470 | ASSERT_MSG(new_thread->GetProcessorID() == s32(this->core_id), | 732 | // Save the TPIDR_EL0 system register in case it was modified. |
| 471 | "Thread must be assigned to this core."); | 733 | previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); |
| 472 | ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready, | 734 | cpu_core.ClearExclusiveState(); |
| 473 | "Thread must be ready to become running."); | ||
| 474 | |||
| 475 | // Cancel any outstanding wakeup events for this thread | ||
| 476 | new_thread->CancelWakeupTimer(); | ||
| 477 | current_thread = SharedFrom(new_thread); | ||
| 478 | new_thread->SetStatus(ThreadStatus::Running); | ||
| 479 | new_thread->SetIsRunning(true); | ||
| 480 | |||
| 481 | auto* const thread_owner_process = current_thread->GetOwnerProcess(); | ||
| 482 | if (previous_process != thread_owner_process) { | ||
| 483 | system.Kernel().MakeCurrentProcess(thread_owner_process); | ||
| 484 | } | 735 | } |
| 736 | previous_thread->context_guard.unlock(); | ||
| 737 | } | ||
| 485 | 738 | ||
| 486 | system.ArmInterface(core_id).LoadContext(new_thread->GetContext32()); | 739 | std::shared_ptr<Common::Fiber>* old_context; |
| 487 | system.ArmInterface(core_id).LoadContext(new_thread->GetContext64()); | 740 | if (previous_thread != nullptr) { |
| 488 | system.ArmInterface(core_id).SetTlsAddress(new_thread->GetTLSAddress()); | 741 | old_context = &previous_thread->GetHostContext(); |
| 489 | system.ArmInterface(core_id).SetTPIDR_EL0(new_thread->GetTPIDR_EL0()); | ||
| 490 | } else { | 742 | } else { |
| 491 | current_thread = nullptr; | 743 | old_context = &idle_thread->GetHostContext(); |
| 492 | // Note: We do not reset the current process and current page table when idling because | 744 | } |
| 493 | // technically we haven't changed processes, our threads are just paused. | 745 | guard.unlock(); |
| 746 | |||
| 747 | Common::Fiber::YieldTo(*old_context, switch_fiber); | ||
| 748 | /// When a thread wakes up, the scheduler may have changed to other in another core. | ||
| 749 | auto& next_scheduler = system.Kernel().CurrentScheduler(); | ||
| 750 | next_scheduler.SwitchContextStep2(); | ||
| 751 | } | ||
| 752 | |||
| 753 | void Scheduler::OnSwitch(void* this_scheduler) { | ||
| 754 | Scheduler* sched = static_cast<Scheduler*>(this_scheduler); | ||
| 755 | sched->SwitchToCurrent(); | ||
| 756 | } | ||
| 757 | |||
| 758 | void Scheduler::SwitchToCurrent() { | ||
| 759 | while (true) { | ||
| 760 | guard.lock(); | ||
| 761 | selected_thread = selected_thread_set; | ||
| 762 | current_thread = selected_thread; | ||
| 763 | is_context_switch_pending = false; | ||
| 764 | guard.unlock(); | ||
| 765 | while (!is_context_switch_pending) { | ||
| 766 | if (current_thread != nullptr && !current_thread->IsHLEThread()) { | ||
| 767 | current_thread->context_guard.lock(); | ||
| 768 | if (!current_thread->IsRunnable()) { | ||
| 769 | current_thread->context_guard.unlock(); | ||
| 770 | break; | ||
| 771 | } | ||
| 772 | if (current_thread->GetProcessorID() != core_id) { | ||
| 773 | current_thread->context_guard.unlock(); | ||
| 774 | break; | ||
| 775 | } | ||
| 776 | } | ||
| 777 | std::shared_ptr<Common::Fiber>* next_context; | ||
| 778 | if (current_thread != nullptr) { | ||
| 779 | next_context = ¤t_thread->GetHostContext(); | ||
| 780 | } else { | ||
| 781 | next_context = &idle_thread->GetHostContext(); | ||
| 782 | } | ||
| 783 | Common::Fiber::YieldTo(switch_fiber, *next_context); | ||
| 784 | } | ||
| 494 | } | 785 | } |
| 495 | } | 786 | } |
| 496 | 787 | ||
| 497 | void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { | 788 | void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { |
| 498 | const u64 prev_switch_ticks = last_context_switch_time; | 789 | const u64 prev_switch_ticks = last_context_switch_time; |
| 499 | const u64 most_recent_switch_ticks = system.CoreTiming().GetTicks(); | 790 | const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks(); |
| 500 | const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; | 791 | const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; |
| 501 | 792 | ||
| 502 | if (thread != nullptr) { | 793 | if (thread != nullptr) { |
| @@ -510,6 +801,16 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { | |||
| 510 | last_context_switch_time = most_recent_switch_ticks; | 801 | last_context_switch_time = most_recent_switch_ticks; |
| 511 | } | 802 | } |
| 512 | 803 | ||
| 804 | void Scheduler::Initialize() { | ||
| 805 | std::string name = "Idle Thread Id:" + std::to_string(core_id); | ||
| 806 | std::function<void(void*)> init_func = system.GetCpuManager().GetIdleThreadStartFunc(); | ||
| 807 | void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); | ||
| 808 | ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE); | ||
| 809 | auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0, | ||
| 810 | nullptr, std::move(init_func), init_func_parameter); | ||
| 811 | idle_thread = std::move(thread_res).Unwrap(); | ||
| 812 | } | ||
| 813 | |||
| 513 | void Scheduler::Shutdown() { | 814 | void Scheduler::Shutdown() { |
| 514 | current_thread = nullptr; | 815 | current_thread = nullptr; |
| 515 | selected_thread = nullptr; | 816 | selected_thread = nullptr; |
| @@ -538,4 +839,13 @@ SchedulerLockAndSleep::~SchedulerLockAndSleep() { | |||
| 538 | time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); | 839 | time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); |
| 539 | } | 840 | } |
| 540 | 841 | ||
| 842 | void SchedulerLockAndSleep::Release() { | ||
| 843 | if (sleep_cancelled) { | ||
| 844 | return; | ||
| 845 | } | ||
| 846 | auto& time_manager = kernel.TimeManager(); | ||
| 847 | time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds); | ||
| 848 | sleep_cancelled = true; | ||
| 849 | } | ||
| 850 | |||
| 541 | } // namespace Kernel | 851 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index 07df33f9c..b3b4b5169 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h | |||
| @@ -11,9 +11,14 @@ | |||
| 11 | 11 | ||
| 12 | #include "common/common_types.h" | 12 | #include "common/common_types.h" |
| 13 | #include "common/multi_level_queue.h" | 13 | #include "common/multi_level_queue.h" |
| 14 | #include "common/spin_lock.h" | ||
| 14 | #include "core/hardware_properties.h" | 15 | #include "core/hardware_properties.h" |
| 15 | #include "core/hle/kernel/thread.h" | 16 | #include "core/hle/kernel/thread.h" |
| 16 | 17 | ||
| 18 | namespace Common { | ||
| 19 | class Fiber; | ||
| 20 | } | ||
| 21 | |||
| 17 | namespace Core { | 22 | namespace Core { |
| 18 | class ARM_Interface; | 23 | class ARM_Interface; |
| 19 | class System; | 24 | class System; |
| @@ -41,41 +46,17 @@ public: | |||
| 41 | return thread_list; | 46 | return thread_list; |
| 42 | } | 47 | } |
| 43 | 48 | ||
| 44 | /** | 49 | /// Notify the scheduler a thread's status has changed. |
| 45 | * Add a thread to the suggested queue of a cpu core. Suggested threads may be | 50 | void AdjustSchedulingOnStatus(Thread* thread, u32 old_flags); |
| 46 | * picked if no thread is scheduled to run on the core. | ||
| 47 | */ | ||
| 48 | void Suggest(u32 priority, std::size_t core, Thread* thread); | ||
| 49 | |||
| 50 | /** | ||
| 51 | * Remove a thread to the suggested queue of a cpu core. Suggested threads may be | ||
| 52 | * picked if no thread is scheduled to run on the core. | ||
| 53 | */ | ||
| 54 | void Unsuggest(u32 priority, std::size_t core, Thread* thread); | ||
| 55 | |||
| 56 | /** | ||
| 57 | * Add a thread to the scheduling queue of a cpu core. The thread is added at the | ||
| 58 | * back the queue in its priority level. | ||
| 59 | */ | ||
| 60 | void Schedule(u32 priority, std::size_t core, Thread* thread); | ||
| 61 | |||
| 62 | /** | ||
| 63 | * Add a thread to the scheduling queue of a cpu core. The thread is added at the | ||
| 64 | * front the queue in its priority level. | ||
| 65 | */ | ||
| 66 | void SchedulePrepend(u32 priority, std::size_t core, Thread* thread); | ||
| 67 | 51 | ||
| 68 | /// Reschedule an already scheduled thread based on a new priority | 52 | /// Notify the scheduler a thread's priority has changed. |
| 69 | void Reschedule(u32 priority, std::size_t core, Thread* thread); | 53 | void AdjustSchedulingOnPriority(Thread* thread, u32 old_priority); |
| 70 | |||
| 71 | /// Unschedules a thread. | ||
| 72 | void Unschedule(u32 priority, std::size_t core, Thread* thread); | ||
| 73 | 54 | ||
| 74 | /// Selects a core and forces it to unload its current thread's context | 55 | /// Notify the scheduler a thread's core and/or affinity mask has changed. |
| 75 | void UnloadThread(std::size_t core); | 56 | void AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask, s32 old_core); |
| 76 | 57 | ||
| 77 | /** | 58 | /** |
| 78 | * Takes care of selecting the new scheduled thread in three steps: | 59 | * Takes care of selecting the new scheduled threads in three steps: |
| 79 | * | 60 | * |
| 80 | * 1. First a thread is selected from the top of the priority queue. If no thread | 61 | * 1. First a thread is selected from the top of the priority queue. If no thread |
| 81 | * is obtained then we move to step two, else we are done. | 62 | * is obtained then we move to step two, else we are done. |
| @@ -85,8 +66,10 @@ public: | |||
| 85 | * | 66 | * |
| 86 | * 3. Third is no suggested thread is found, we do a second pass and pick a running | 67 | * 3. Third is no suggested thread is found, we do a second pass and pick a running |
| 87 | * thread in another core and swap it with its current thread. | 68 | * thread in another core and swap it with its current thread. |
| 69 | * | ||
| 70 | * returns the cores needing scheduling. | ||
| 88 | */ | 71 | */ |
| 89 | void SelectThread(std::size_t core); | 72 | u32 SelectThreads(); |
| 90 | 73 | ||
| 91 | bool HaveReadyThreads(std::size_t core_id) const { | 74 | bool HaveReadyThreads(std::size_t core_id) const { |
| 92 | return !scheduled_queue[core_id].empty(); | 75 | return !scheduled_queue[core_id].empty(); |
| @@ -149,6 +132,40 @@ private: | |||
| 149 | /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling | 132 | /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling |
| 150 | /// and reschedules current core if needed. | 133 | /// and reschedules current core if needed. |
| 151 | void Unlock(); | 134 | void Unlock(); |
| 135 | |||
| 136 | void EnableInterruptAndSchedule(u32 cores_pending_reschedule, | ||
| 137 | Core::EmuThreadHandle global_thread); | ||
| 138 | |||
| 139 | /** | ||
| 140 | * Add a thread to the suggested queue of a cpu core. Suggested threads may be | ||
| 141 | * picked if no thread is scheduled to run on the core. | ||
| 142 | */ | ||
| 143 | void Suggest(u32 priority, std::size_t core, Thread* thread); | ||
| 144 | |||
| 145 | /** | ||
| 146 | * Remove a thread to the suggested queue of a cpu core. Suggested threads may be | ||
| 147 | * picked if no thread is scheduled to run on the core. | ||
| 148 | */ | ||
| 149 | void Unsuggest(u32 priority, std::size_t core, Thread* thread); | ||
| 150 | |||
| 151 | /** | ||
| 152 | * Add a thread to the scheduling queue of a cpu core. The thread is added at the | ||
| 153 | * back the queue in its priority level. | ||
| 154 | */ | ||
| 155 | void Schedule(u32 priority, std::size_t core, Thread* thread); | ||
| 156 | |||
| 157 | /** | ||
| 158 | * Add a thread to the scheduling queue of a cpu core. The thread is added at the | ||
| 159 | * front the queue in its priority level. | ||
| 160 | */ | ||
| 161 | void SchedulePrepend(u32 priority, std::size_t core, Thread* thread); | ||
| 162 | |||
| 163 | /// Reschedule an already scheduled thread based on a new priority | ||
| 164 | void Reschedule(u32 priority, std::size_t core, Thread* thread); | ||
| 165 | |||
| 166 | /// Unschedules a thread. | ||
| 167 | void Unschedule(u32 priority, std::size_t core, Thread* thread); | ||
| 168 | |||
| 152 | /** | 169 | /** |
| 153 | * Transfers a thread into an specific core. If the destination_core is -1 | 170 | * Transfers a thread into an specific core. If the destination_core is -1 |
| 154 | * it will be unscheduled from its source code and added into its suggested | 171 | * it will be unscheduled from its source code and added into its suggested |
| @@ -170,10 +187,13 @@ private: | |||
| 170 | std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62}; | 187 | std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62}; |
| 171 | 188 | ||
| 172 | /// Scheduler lock mechanisms. | 189 | /// Scheduler lock mechanisms. |
| 173 | std::mutex inner_lock{}; // TODO(Blinkhawk): Replace for a SpinLock | 190 | bool is_locked{}; |
| 191 | Common::SpinLock inner_lock{}; | ||
| 174 | std::atomic<s64> scope_lock{}; | 192 | std::atomic<s64> scope_lock{}; |
| 175 | Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()}; | 193 | Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()}; |
| 176 | 194 | ||
| 195 | Common::SpinLock global_list_guard{}; | ||
| 196 | |||
| 177 | /// Lists all thread ids that aren't deleted/etc. | 197 | /// Lists all thread ids that aren't deleted/etc. |
| 178 | std::vector<std::shared_ptr<Thread>> thread_list; | 198 | std::vector<std::shared_ptr<Thread>> thread_list; |
| 179 | KernelCore& kernel; | 199 | KernelCore& kernel; |
| @@ -190,11 +210,11 @@ public: | |||
| 190 | /// Reschedules to the next available thread (call after current thread is suspended) | 210 | /// Reschedules to the next available thread (call after current thread is suspended) |
| 191 | void TryDoContextSwitch(); | 211 | void TryDoContextSwitch(); |
| 192 | 212 | ||
| 193 | /// Unloads currently running thread | 213 | /// The next two are for SingleCore Only. |
| 194 | void UnloadThread(); | 214 | /// Unload current thread before preempting core. |
| 195 | 215 | void Unload(); | |
| 196 | /// Select the threads in top of the scheduling multilist. | 216 | /// Reload current thread after core preemption. |
| 197 | void SelectThreads(); | 217 | void Reload(); |
| 198 | 218 | ||
| 199 | /// Gets the current running thread | 219 | /// Gets the current running thread |
| 200 | Thread* GetCurrentThread() const; | 220 | Thread* GetCurrentThread() const; |
| @@ -209,15 +229,30 @@ public: | |||
| 209 | return is_context_switch_pending; | 229 | return is_context_switch_pending; |
| 210 | } | 230 | } |
| 211 | 231 | ||
| 232 | void Initialize(); | ||
| 233 | |||
| 212 | /// Shutdowns the scheduler. | 234 | /// Shutdowns the scheduler. |
| 213 | void Shutdown(); | 235 | void Shutdown(); |
| 214 | 236 | ||
| 237 | void OnThreadStart(); | ||
| 238 | |||
| 239 | std::shared_ptr<Common::Fiber>& ControlContext() { | ||
| 240 | return switch_fiber; | ||
| 241 | } | ||
| 242 | |||
| 243 | const std::shared_ptr<Common::Fiber>& ControlContext() const { | ||
| 244 | return switch_fiber; | ||
| 245 | } | ||
| 246 | |||
| 215 | private: | 247 | private: |
| 216 | friend class GlobalScheduler; | 248 | friend class GlobalScheduler; |
| 217 | 249 | ||
| 218 | /// Switches the CPU's active thread context to that of the specified thread | 250 | /// Switches the CPU's active thread context to that of the specified thread |
| 219 | void SwitchContext(); | 251 | void SwitchContext(); |
| 220 | 252 | ||
| 253 | /// When a thread wakes up, it must run this through it's new scheduler | ||
| 254 | void SwitchContextStep2(); | ||
| 255 | |||
| 221 | /** | 256 | /** |
| 222 | * Called on every context switch to update the internal timestamp | 257 | * Called on every context switch to update the internal timestamp |
| 223 | * This also updates the running time ticks for the given thread and | 258 | * This also updates the running time ticks for the given thread and |
| @@ -231,14 +266,24 @@ private: | |||
| 231 | */ | 266 | */ |
| 232 | void UpdateLastContextSwitchTime(Thread* thread, Process* process); | 267 | void UpdateLastContextSwitchTime(Thread* thread, Process* process); |
| 233 | 268 | ||
| 269 | static void OnSwitch(void* this_scheduler); | ||
| 270 | void SwitchToCurrent(); | ||
| 271 | |||
| 234 | std::shared_ptr<Thread> current_thread = nullptr; | 272 | std::shared_ptr<Thread> current_thread = nullptr; |
| 235 | std::shared_ptr<Thread> selected_thread = nullptr; | 273 | std::shared_ptr<Thread> selected_thread = nullptr; |
| 274 | std::shared_ptr<Thread> current_thread_prev = nullptr; | ||
| 275 | std::shared_ptr<Thread> selected_thread_set = nullptr; | ||
| 276 | std::shared_ptr<Thread> idle_thread = nullptr; | ||
| 277 | |||
| 278 | std::shared_ptr<Common::Fiber> switch_fiber = nullptr; | ||
| 236 | 279 | ||
| 237 | Core::System& system; | 280 | Core::System& system; |
| 238 | u64 last_context_switch_time = 0; | 281 | u64 last_context_switch_time = 0; |
| 239 | u64 idle_selection_count = 0; | 282 | u64 idle_selection_count = 0; |
| 240 | const std::size_t core_id; | 283 | const std::size_t core_id; |
| 241 | 284 | ||
| 285 | Common::SpinLock guard{}; | ||
| 286 | |||
| 242 | bool is_context_switch_pending = false; | 287 | bool is_context_switch_pending = false; |
| 243 | }; | 288 | }; |
| 244 | 289 | ||
| @@ -261,6 +306,8 @@ public: | |||
| 261 | sleep_cancelled = true; | 306 | sleep_cancelled = true; |
| 262 | } | 307 | } |
| 263 | 308 | ||
| 309 | void Release(); | ||
| 310 | |||
| 264 | private: | 311 | private: |
| 265 | Handle& event_handle; | 312 | Handle& event_handle; |
| 266 | Thread* time_task; | 313 | Thread* time_task; |
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index 25438b86b..7b23a6889 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include "core/hle/kernel/hle_ipc.h" | 17 | #include "core/hle/kernel/hle_ipc.h" |
| 18 | #include "core/hle/kernel/kernel.h" | 18 | #include "core/hle/kernel/kernel.h" |
| 19 | #include "core/hle/kernel/process.h" | 19 | #include "core/hle/kernel/process.h" |
| 20 | #include "core/hle/kernel/scheduler.h" | ||
| 20 | #include "core/hle/kernel/server_session.h" | 21 | #include "core/hle/kernel/server_session.h" |
| 21 | #include "core/hle/kernel/session.h" | 22 | #include "core/hle/kernel/session.h" |
| 22 | #include "core/hle/kernel/thread.h" | 23 | #include "core/hle/kernel/thread.h" |
| @@ -168,9 +169,12 @@ ResultCode ServerSession::CompleteSyncRequest() { | |||
| 168 | } | 169 | } |
| 169 | 170 | ||
| 170 | // Some service requests require the thread to block | 171 | // Some service requests require the thread to block |
| 171 | if (!context.IsThreadWaiting()) { | 172 | { |
| 172 | context.GetThread().ResumeFromWait(); | 173 | SchedulerLock lock(kernel); |
| 173 | context.GetThread().SetWaitSynchronizationResult(result); | 174 | if (!context.IsThreadWaiting()) { |
| 175 | context.GetThread().ResumeFromWait(); | ||
| 176 | context.GetThread().SetSynchronizationResults(nullptr, result); | ||
| 177 | } | ||
| 174 | } | 178 | } |
| 175 | 179 | ||
| 176 | request_queue.Pop(); | 180 | request_queue.Pop(); |
| @@ -180,8 +184,10 @@ ResultCode ServerSession::CompleteSyncRequest() { | |||
| 180 | 184 | ||
| 181 | ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread, | 185 | ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread, |
| 182 | Core::Memory::Memory& memory) { | 186 | Core::Memory::Memory& memory) { |
| 183 | Core::System::GetInstance().CoreTiming().ScheduleEvent(20000, request_event, {}); | 187 | ResultCode result = QueueSyncRequest(std::move(thread), memory); |
| 184 | return QueueSyncRequest(std::move(thread), memory); | 188 | const u64 delay = kernel.IsMulticore() ? 0U : 20000U; |
| 189 | Core::System::GetInstance().CoreTiming().ScheduleEvent(delay, request_event, {}); | ||
| 190 | return result; | ||
| 185 | } | 191 | } |
| 186 | 192 | ||
| 187 | } // namespace Kernel | 193 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 4ae4529f5..5db19dcf3 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -10,14 +10,15 @@ | |||
| 10 | 10 | ||
| 11 | #include "common/alignment.h" | 11 | #include "common/alignment.h" |
| 12 | #include "common/assert.h" | 12 | #include "common/assert.h" |
| 13 | #include "common/fiber.h" | ||
| 13 | #include "common/logging/log.h" | 14 | #include "common/logging/log.h" |
| 14 | #include "common/microprofile.h" | 15 | #include "common/microprofile.h" |
| 15 | #include "common/string_util.h" | 16 | #include "common/string_util.h" |
| 16 | #include "core/arm/exclusive_monitor.h" | 17 | #include "core/arm/exclusive_monitor.h" |
| 17 | #include "core/core.h" | 18 | #include "core/core.h" |
| 18 | #include "core/core_manager.h" | ||
| 19 | #include "core/core_timing.h" | 19 | #include "core/core_timing.h" |
| 20 | #include "core/core_timing_util.h" | 20 | #include "core/core_timing_util.h" |
| 21 | #include "core/cpu_manager.h" | ||
| 21 | #include "core/hle/kernel/address_arbiter.h" | 22 | #include "core/hle/kernel/address_arbiter.h" |
| 22 | #include "core/hle/kernel/client_port.h" | 23 | #include "core/hle/kernel/client_port.h" |
| 23 | #include "core/hle/kernel/client_session.h" | 24 | #include "core/hle/kernel/client_session.h" |
| @@ -27,6 +28,7 @@ | |||
| 27 | #include "core/hle/kernel/memory/memory_block.h" | 28 | #include "core/hle/kernel/memory/memory_block.h" |
| 28 | #include "core/hle/kernel/memory/page_table.h" | 29 | #include "core/hle/kernel/memory/page_table.h" |
| 29 | #include "core/hle/kernel/mutex.h" | 30 | #include "core/hle/kernel/mutex.h" |
| 31 | #include "core/hle/kernel/physical_core.h" | ||
| 30 | #include "core/hle/kernel/process.h" | 32 | #include "core/hle/kernel/process.h" |
| 31 | #include "core/hle/kernel/readable_event.h" | 33 | #include "core/hle/kernel/readable_event.h" |
| 32 | #include "core/hle/kernel/resource_limit.h" | 34 | #include "core/hle/kernel/resource_limit.h" |
| @@ -37,6 +39,7 @@ | |||
| 37 | #include "core/hle/kernel/svc_wrap.h" | 39 | #include "core/hle/kernel/svc_wrap.h" |
| 38 | #include "core/hle/kernel/synchronization.h" | 40 | #include "core/hle/kernel/synchronization.h" |
| 39 | #include "core/hle/kernel/thread.h" | 41 | #include "core/hle/kernel/thread.h" |
| 42 | #include "core/hle/kernel/time_manager.h" | ||
| 40 | #include "core/hle/kernel/transfer_memory.h" | 43 | #include "core/hle/kernel/transfer_memory.h" |
| 41 | #include "core/hle/kernel/writable_event.h" | 44 | #include "core/hle/kernel/writable_event.h" |
| 42 | #include "core/hle/lock.h" | 45 | #include "core/hle/lock.h" |
| @@ -133,6 +136,7 @@ enum class ResourceLimitValueType { | |||
| 133 | 136 | ||
| 134 | ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_limit, | 137 | ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_limit, |
| 135 | u32 resource_type, ResourceLimitValueType value_type) { | 138 | u32 resource_type, ResourceLimitValueType value_type) { |
| 139 | std::lock_guard lock{HLE::g_hle_lock}; | ||
| 136 | const auto type = static_cast<ResourceType>(resource_type); | 140 | const auto type = static_cast<ResourceType>(resource_type); |
| 137 | if (!IsValidResourceType(type)) { | 141 | if (!IsValidResourceType(type)) { |
| 138 | LOG_ERROR(Kernel_SVC, "Invalid resource limit type: '{}'", resource_type); | 142 | LOG_ERROR(Kernel_SVC, "Invalid resource limit type: '{}'", resource_type); |
| @@ -160,6 +164,7 @@ ResultVal<s64> RetrieveResourceLimitValue(Core::System& system, Handle resource_ | |||
| 160 | 164 | ||
| 161 | /// Set the process heap to a given Size. It can both extend and shrink the heap. | 165 | /// Set the process heap to a given Size. It can both extend and shrink the heap. |
| 162 | static ResultCode SetHeapSize(Core::System& system, VAddr* heap_addr, u64 heap_size) { | 166 | static ResultCode SetHeapSize(Core::System& system, VAddr* heap_addr, u64 heap_size) { |
| 167 | std::lock_guard lock{HLE::g_hle_lock}; | ||
| 163 | LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size); | 168 | LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size); |
| 164 | 169 | ||
| 165 | // Size must be a multiple of 0x200000 (2MB) and be equal to or less than 8GB. | 170 | // Size must be a multiple of 0x200000 (2MB) and be equal to or less than 8GB. |
| @@ -190,6 +195,7 @@ static ResultCode SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_s | |||
| 190 | 195 | ||
| 191 | static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, | 196 | static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, |
| 192 | u32 attribute) { | 197 | u32 attribute) { |
| 198 | std::lock_guard lock{HLE::g_hle_lock}; | ||
| 193 | LOG_DEBUG(Kernel_SVC, | 199 | LOG_DEBUG(Kernel_SVC, |
| 194 | "called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address, | 200 | "called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address, |
| 195 | size, mask, attribute); | 201 | size, mask, attribute); |
| @@ -226,8 +232,15 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si | |||
| 226 | static_cast<Memory::MemoryAttribute>(attribute)); | 232 | static_cast<Memory::MemoryAttribute>(attribute)); |
| 227 | } | 233 | } |
| 228 | 234 | ||
| 235 | static ResultCode SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask, | ||
| 236 | u32 attribute) { | ||
| 237 | return SetMemoryAttribute(system, static_cast<VAddr>(address), static_cast<std::size_t>(size), | ||
| 238 | mask, attribute); | ||
| 239 | } | ||
| 240 | |||
| 229 | /// Maps a memory range into a different range. | 241 | /// Maps a memory range into a different range. |
| 230 | static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) { | 242 | static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) { |
| 243 | std::lock_guard lock{HLE::g_hle_lock}; | ||
| 231 | LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, | 244 | LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, |
| 232 | src_addr, size); | 245 | src_addr, size); |
| 233 | 246 | ||
| @@ -241,8 +254,14 @@ static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr | |||
| 241 | return page_table.Map(dst_addr, src_addr, size); | 254 | return page_table.Map(dst_addr, src_addr, size); |
| 242 | } | 255 | } |
| 243 | 256 | ||
| 257 | static ResultCode MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) { | ||
| 258 | return MapMemory(system, static_cast<VAddr>(dst_addr), static_cast<VAddr>(src_addr), | ||
| 259 | static_cast<std::size_t>(size)); | ||
| 260 | } | ||
| 261 | |||
| 244 | /// Unmaps a region that was previously mapped with svcMapMemory | 262 | /// Unmaps a region that was previously mapped with svcMapMemory |
| 245 | static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) { | 263 | static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) { |
| 264 | std::lock_guard lock{HLE::g_hle_lock}; | ||
| 246 | LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, | 265 | LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, |
| 247 | src_addr, size); | 266 | src_addr, size); |
| 248 | 267 | ||
| @@ -256,9 +275,15 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad | |||
| 256 | return page_table.Unmap(dst_addr, src_addr, size); | 275 | return page_table.Unmap(dst_addr, src_addr, size); |
| 257 | } | 276 | } |
| 258 | 277 | ||
| 278 | static ResultCode UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) { | ||
| 279 | return UnmapMemory(system, static_cast<VAddr>(dst_addr), static_cast<VAddr>(src_addr), | ||
| 280 | static_cast<std::size_t>(size)); | ||
| 281 | } | ||
| 282 | |||
| 259 | /// Connect to an OS service given the port name, returns the handle to the port to out | 283 | /// Connect to an OS service given the port name, returns the handle to the port to out |
| 260 | static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle, | 284 | static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle, |
| 261 | VAddr port_name_address) { | 285 | VAddr port_name_address) { |
| 286 | std::lock_guard lock{HLE::g_hle_lock}; | ||
| 262 | auto& memory = system.Memory(); | 287 | auto& memory = system.Memory(); |
| 263 | 288 | ||
| 264 | if (!memory.IsValidVirtualAddress(port_name_address)) { | 289 | if (!memory.IsValidVirtualAddress(port_name_address)) { |
| @@ -317,11 +342,30 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) { | |||
| 317 | LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); | 342 | LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); |
| 318 | 343 | ||
| 319 | auto thread = system.CurrentScheduler().GetCurrentThread(); | 344 | auto thread = system.CurrentScheduler().GetCurrentThread(); |
| 320 | thread->InvalidateWakeupCallback(); | 345 | { |
| 321 | thread->SetStatus(ThreadStatus::WaitIPC); | 346 | SchedulerLock lock(system.Kernel()); |
| 322 | system.PrepareReschedule(thread->GetProcessorID()); | 347 | thread->InvalidateHLECallback(); |
| 348 | thread->SetStatus(ThreadStatus::WaitIPC); | ||
| 349 | session->SendSyncRequest(SharedFrom(thread), system.Memory()); | ||
| 350 | } | ||
| 351 | |||
| 352 | if (thread->HasHLECallback()) { | ||
| 353 | Handle event_handle = thread->GetHLETimeEvent(); | ||
| 354 | if (event_handle != InvalidHandle) { | ||
| 355 | auto& time_manager = system.Kernel().TimeManager(); | ||
| 356 | time_manager.UnscheduleTimeEvent(event_handle); | ||
| 357 | } | ||
| 358 | |||
| 359 | { | ||
| 360 | SchedulerLock lock(system.Kernel()); | ||
| 361 | auto* sync_object = thread->GetHLESyncObject(); | ||
| 362 | sync_object->RemoveWaitingThread(SharedFrom(thread)); | ||
| 363 | } | ||
| 364 | |||
| 365 | thread->InvokeHLECallback(SharedFrom(thread)); | ||
| 366 | } | ||
| 323 | 367 | ||
| 324 | return session->SendSyncRequest(SharedFrom(thread), system.Memory()); | 368 | return thread->GetSignalingResult(); |
| 325 | } | 369 | } |
| 326 | 370 | ||
| 327 | static ResultCode SendSyncRequest32(Core::System& system, Handle handle) { | 371 | static ResultCode SendSyncRequest32(Core::System& system, Handle handle) { |
| @@ -383,6 +427,15 @@ static ResultCode GetProcessId(Core::System& system, u64* process_id, Handle han | |||
| 383 | return ERR_INVALID_HANDLE; | 427 | return ERR_INVALID_HANDLE; |
| 384 | } | 428 | } |
| 385 | 429 | ||
| 430 | static ResultCode GetProcessId32(Core::System& system, u32* process_id_low, u32* process_id_high, | ||
| 431 | Handle handle) { | ||
| 432 | u64 process_id{}; | ||
| 433 | const auto result = GetProcessId(system, &process_id, handle); | ||
| 434 | *process_id_low = static_cast<u32>(process_id); | ||
| 435 | *process_id_high = static_cast<u32>(process_id >> 32); | ||
| 436 | return result; | ||
| 437 | } | ||
| 438 | |||
| 386 | /// Wait for the given handles to synchronize, timeout after the specified nanoseconds | 439 | /// Wait for the given handles to synchronize, timeout after the specified nanoseconds |
| 387 | static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr handles_address, | 440 | static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr handles_address, |
| 388 | u64 handle_count, s64 nano_seconds) { | 441 | u64 handle_count, s64 nano_seconds) { |
| @@ -447,10 +500,13 @@ static ResultCode CancelSynchronization(Core::System& system, Handle thread_hand | |||
| 447 | } | 500 | } |
| 448 | 501 | ||
| 449 | thread->CancelWait(); | 502 | thread->CancelWait(); |
| 450 | system.PrepareReschedule(thread->GetProcessorID()); | ||
| 451 | return RESULT_SUCCESS; | 503 | return RESULT_SUCCESS; |
| 452 | } | 504 | } |
| 453 | 505 | ||
| 506 | static ResultCode CancelSynchronization32(Core::System& system, Handle thread_handle) { | ||
| 507 | return CancelSynchronization(system, thread_handle); | ||
| 508 | } | ||
| 509 | |||
| 454 | /// Attempts to locks a mutex, creating it if it does not already exist | 510 | /// Attempts to locks a mutex, creating it if it does not already exist |
| 455 | static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_handle, | 511 | static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_handle, |
| 456 | VAddr mutex_addr, Handle requesting_thread_handle) { | 512 | VAddr mutex_addr, Handle requesting_thread_handle) { |
| @@ -475,6 +531,12 @@ static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_hand | |||
| 475 | requesting_thread_handle); | 531 | requesting_thread_handle); |
| 476 | } | 532 | } |
| 477 | 533 | ||
| 534 | static ResultCode ArbitrateLock32(Core::System& system, Handle holding_thread_handle, | ||
| 535 | u32 mutex_addr, Handle requesting_thread_handle) { | ||
| 536 | return ArbitrateLock(system, holding_thread_handle, static_cast<VAddr>(mutex_addr), | ||
| 537 | requesting_thread_handle); | ||
| 538 | } | ||
| 539 | |||
| 478 | /// Unlock a mutex | 540 | /// Unlock a mutex |
| 479 | static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) { | 541 | static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) { |
| 480 | LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr); | 542 | LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr); |
| @@ -494,6 +556,10 @@ static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) { | |||
| 494 | return current_process->GetMutex().Release(mutex_addr); | 556 | return current_process->GetMutex().Release(mutex_addr); |
| 495 | } | 557 | } |
| 496 | 558 | ||
| 559 | static ResultCode ArbitrateUnlock32(Core::System& system, u32 mutex_addr) { | ||
| 560 | return ArbitrateUnlock(system, static_cast<VAddr>(mutex_addr)); | ||
| 561 | } | ||
| 562 | |||
| 497 | enum class BreakType : u32 { | 563 | enum class BreakType : u32 { |
| 498 | Panic = 0, | 564 | Panic = 0, |
| 499 | AssertionFailed = 1, | 565 | AssertionFailed = 1, |
| @@ -594,6 +660,7 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { | |||
| 594 | info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt); | 660 | info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt); |
| 595 | 661 | ||
| 596 | if (!break_reason.signal_debugger) { | 662 | if (!break_reason.signal_debugger) { |
| 663 | SchedulerLock lock(system.Kernel()); | ||
| 597 | LOG_CRITICAL( | 664 | LOG_CRITICAL( |
| 598 | Debug_Emulated, | 665 | Debug_Emulated, |
| 599 | "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}", | 666 | "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}", |
| @@ -605,14 +672,16 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { | |||
| 605 | const auto thread_processor_id = current_thread->GetProcessorID(); | 672 | const auto thread_processor_id = current_thread->GetProcessorID(); |
| 606 | system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace(); | 673 | system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace(); |
| 607 | 674 | ||
| 608 | system.Kernel().CurrentProcess()->PrepareForTermination(); | ||
| 609 | |||
| 610 | // Kill the current thread | 675 | // Kill the current thread |
| 676 | system.Kernel().ExceptionalExit(); | ||
| 611 | current_thread->Stop(); | 677 | current_thread->Stop(); |
| 612 | system.PrepareReschedule(); | ||
| 613 | } | 678 | } |
| 614 | } | 679 | } |
| 615 | 680 | ||
| 681 | static void Break32(Core::System& system, u32 reason, u32 info1, u32 info2) { | ||
| 682 | Break(system, reason, static_cast<u64>(info1), static_cast<u64>(info2)); | ||
| 683 | } | ||
| 684 | |||
| 616 | /// Used to output a message on a debug hardware unit - does nothing on a retail unit | 685 | /// Used to output a message on a debug hardware unit - does nothing on a retail unit |
| 617 | static void OutputDebugString([[maybe_unused]] Core::System& system, VAddr address, u64 len) { | 686 | static void OutputDebugString([[maybe_unused]] Core::System& system, VAddr address, u64 len) { |
| 618 | if (len == 0) { | 687 | if (len == 0) { |
| @@ -627,6 +696,7 @@ static void OutputDebugString([[maybe_unused]] Core::System& system, VAddr addre | |||
| 627 | /// Gets system/memory information for the current process | 696 | /// Gets system/memory information for the current process |
| 628 | static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 handle, | 697 | static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 handle, |
| 629 | u64 info_sub_id) { | 698 | u64 info_sub_id) { |
| 699 | std::lock_guard lock{HLE::g_hle_lock}; | ||
| 630 | LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id, | 700 | LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id, |
| 631 | info_sub_id, handle); | 701 | info_sub_id, handle); |
| 632 | 702 | ||
| @@ -863,9 +933,9 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha | |||
| 863 | if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) { | 933 | if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) { |
| 864 | const u64 thread_ticks = current_thread->GetTotalCPUTimeTicks(); | 934 | const u64 thread_ticks = current_thread->GetTotalCPUTimeTicks(); |
| 865 | 935 | ||
| 866 | out_ticks = thread_ticks + (core_timing.GetTicks() - prev_ctx_ticks); | 936 | out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); |
| 867 | } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { | 937 | } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { |
| 868 | out_ticks = core_timing.GetTicks() - prev_ctx_ticks; | 938 | out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; |
| 869 | } | 939 | } |
| 870 | 940 | ||
| 871 | *result = out_ticks; | 941 | *result = out_ticks; |
| @@ -892,6 +962,7 @@ static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_h | |||
| 892 | 962 | ||
| 893 | /// Maps memory at a desired address | 963 | /// Maps memory at a desired address |
| 894 | static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { | 964 | static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { |
| 965 | std::lock_guard lock{HLE::g_hle_lock}; | ||
| 895 | LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size); | 966 | LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size); |
| 896 | 967 | ||
| 897 | if (!Common::Is4KBAligned(addr)) { | 968 | if (!Common::Is4KBAligned(addr)) { |
| @@ -939,8 +1010,13 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) | |||
| 939 | return page_table.MapPhysicalMemory(addr, size); | 1010 | return page_table.MapPhysicalMemory(addr, size); |
| 940 | } | 1011 | } |
| 941 | 1012 | ||
| 1013 | static ResultCode MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) { | ||
| 1014 | return MapPhysicalMemory(system, static_cast<VAddr>(addr), static_cast<std::size_t>(size)); | ||
| 1015 | } | ||
| 1016 | |||
| 942 | /// Unmaps memory previously mapped via MapPhysicalMemory | 1017 | /// Unmaps memory previously mapped via MapPhysicalMemory |
| 943 | static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { | 1018 | static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { |
| 1019 | std::lock_guard lock{HLE::g_hle_lock}; | ||
| 944 | LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size); | 1020 | LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size); |
| 945 | 1021 | ||
| 946 | if (!Common::Is4KBAligned(addr)) { | 1022 | if (!Common::Is4KBAligned(addr)) { |
| @@ -988,6 +1064,10 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size | |||
| 988 | return page_table.UnmapPhysicalMemory(addr, size); | 1064 | return page_table.UnmapPhysicalMemory(addr, size); |
| 989 | } | 1065 | } |
| 990 | 1066 | ||
| 1067 | static ResultCode UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) { | ||
| 1068 | return UnmapPhysicalMemory(system, static_cast<VAddr>(addr), static_cast<std::size_t>(size)); | ||
| 1069 | } | ||
| 1070 | |||
| 991 | /// Sets the thread activity | 1071 | /// Sets the thread activity |
| 992 | static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 activity) { | 1072 | static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 activity) { |
| 993 | LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", handle, activity); | 1073 | LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", handle, activity); |
| @@ -1017,10 +1097,11 @@ static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 act | |||
| 1017 | return ERR_BUSY; | 1097 | return ERR_BUSY; |
| 1018 | } | 1098 | } |
| 1019 | 1099 | ||
| 1020 | thread->SetActivity(static_cast<ThreadActivity>(activity)); | 1100 | return thread->SetActivity(static_cast<ThreadActivity>(activity)); |
| 1101 | } | ||
| 1021 | 1102 | ||
| 1022 | system.PrepareReschedule(thread->GetProcessorID()); | 1103 | static ResultCode SetThreadActivity32(Core::System& system, Handle handle, u32 activity) { |
| 1023 | return RESULT_SUCCESS; | 1104 | return SetThreadActivity(system, handle, activity); |
| 1024 | } | 1105 | } |
| 1025 | 1106 | ||
| 1026 | /// Gets the thread context | 1107 | /// Gets the thread context |
| @@ -1064,6 +1145,10 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H | |||
| 1064 | return RESULT_SUCCESS; | 1145 | return RESULT_SUCCESS; |
| 1065 | } | 1146 | } |
| 1066 | 1147 | ||
| 1148 | static ResultCode GetThreadContext32(Core::System& system, u32 thread_context, Handle handle) { | ||
| 1149 | return GetThreadContext(system, static_cast<VAddr>(thread_context), handle); | ||
| 1150 | } | ||
| 1151 | |||
| 1067 | /// Gets the priority for the specified thread | 1152 | /// Gets the priority for the specified thread |
| 1068 | static ResultCode GetThreadPriority(Core::System& system, u32* priority, Handle handle) { | 1153 | static ResultCode GetThreadPriority(Core::System& system, u32* priority, Handle handle) { |
| 1069 | LOG_TRACE(Kernel_SVC, "called"); | 1154 | LOG_TRACE(Kernel_SVC, "called"); |
| @@ -1071,6 +1156,7 @@ static ResultCode GetThreadPriority(Core::System& system, u32* priority, Handle | |||
| 1071 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | 1156 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); |
| 1072 | const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(handle); | 1157 | const std::shared_ptr<Thread> thread = handle_table.Get<Thread>(handle); |
| 1073 | if (!thread) { | 1158 | if (!thread) { |
| 1159 | *priority = 0; | ||
| 1074 | LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle); | 1160 | LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle); |
| 1075 | return ERR_INVALID_HANDLE; | 1161 | return ERR_INVALID_HANDLE; |
| 1076 | } | 1162 | } |
| @@ -1105,18 +1191,26 @@ static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 pri | |||
| 1105 | 1191 | ||
| 1106 | thread->SetPriority(priority); | 1192 | thread->SetPriority(priority); |
| 1107 | 1193 | ||
| 1108 | system.PrepareReschedule(thread->GetProcessorID()); | ||
| 1109 | return RESULT_SUCCESS; | 1194 | return RESULT_SUCCESS; |
| 1110 | } | 1195 | } |
| 1111 | 1196 | ||
| 1197 | static ResultCode SetThreadPriority32(Core::System& system, Handle handle, u32 priority) { | ||
| 1198 | return SetThreadPriority(system, handle, priority); | ||
| 1199 | } | ||
| 1200 | |||
| 1112 | /// Get which CPU core is executing the current thread | 1201 | /// Get which CPU core is executing the current thread |
| 1113 | static u32 GetCurrentProcessorNumber(Core::System& system) { | 1202 | static u32 GetCurrentProcessorNumber(Core::System& system) { |
| 1114 | LOG_TRACE(Kernel_SVC, "called"); | 1203 | LOG_TRACE(Kernel_SVC, "called"); |
| 1115 | return system.CurrentScheduler().GetCurrentThread()->GetProcessorID(); | 1204 | return static_cast<u32>(system.CurrentPhysicalCore().CoreIndex()); |
| 1205 | } | ||
| 1206 | |||
| 1207 | static u32 GetCurrentProcessorNumber32(Core::System& system) { | ||
| 1208 | return GetCurrentProcessorNumber(system); | ||
| 1116 | } | 1209 | } |
| 1117 | 1210 | ||
| 1118 | static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_handle, VAddr addr, | 1211 | static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_handle, VAddr addr, |
| 1119 | u64 size, u32 permissions) { | 1212 | u64 size, u32 permissions) { |
| 1213 | std::lock_guard lock{HLE::g_hle_lock}; | ||
| 1120 | LOG_TRACE(Kernel_SVC, | 1214 | LOG_TRACE(Kernel_SVC, |
| 1121 | "called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}", | 1215 | "called, shared_memory_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}", |
| 1122 | shared_memory_handle, addr, size, permissions); | 1216 | shared_memory_handle, addr, size, permissions); |
| @@ -1187,9 +1281,16 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han | |||
| 1187 | return shared_memory->Map(*current_process, addr, size, permission_type); | 1281 | return shared_memory->Map(*current_process, addr, size, permission_type); |
| 1188 | } | 1282 | } |
| 1189 | 1283 | ||
| 1284 | static ResultCode MapSharedMemory32(Core::System& system, Handle shared_memory_handle, u32 addr, | ||
| 1285 | u32 size, u32 permissions) { | ||
| 1286 | return MapSharedMemory(system, shared_memory_handle, static_cast<VAddr>(addr), | ||
| 1287 | static_cast<std::size_t>(size), permissions); | ||
| 1288 | } | ||
| 1289 | |||
| 1190 | static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address, | 1290 | static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address, |
| 1191 | VAddr page_info_address, Handle process_handle, | 1291 | VAddr page_info_address, Handle process_handle, |
| 1192 | VAddr address) { | 1292 | VAddr address) { |
| 1293 | std::lock_guard lock{HLE::g_hle_lock}; | ||
| 1193 | LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address); | 1294 | LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address); |
| 1194 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | 1295 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); |
| 1195 | std::shared_ptr<Process> process = handle_table.Get<Process>(process_handle); | 1296 | std::shared_ptr<Process> process = handle_table.Get<Process>(process_handle); |
| @@ -1372,6 +1473,7 @@ static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_ha | |||
| 1372 | /// Exits the current process | 1473 | /// Exits the current process |
| 1373 | static void ExitProcess(Core::System& system) { | 1474 | static void ExitProcess(Core::System& system) { |
| 1374 | auto* current_process = system.Kernel().CurrentProcess(); | 1475 | auto* current_process = system.Kernel().CurrentProcess(); |
| 1476 | UNIMPLEMENTED(); | ||
| 1375 | 1477 | ||
| 1376 | LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID()); | 1478 | LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID()); |
| 1377 | ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running, | 1479 | ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running, |
| @@ -1381,8 +1483,10 @@ static void ExitProcess(Core::System& system) { | |||
| 1381 | 1483 | ||
| 1382 | // Kill the current thread | 1484 | // Kill the current thread |
| 1383 | system.CurrentScheduler().GetCurrentThread()->Stop(); | 1485 | system.CurrentScheduler().GetCurrentThread()->Stop(); |
| 1486 | } | ||
| 1384 | 1487 | ||
| 1385 | system.PrepareReschedule(); | 1488 | static void ExitProcess32(Core::System& system) { |
| 1489 | ExitProcess(system); | ||
| 1386 | } | 1490 | } |
| 1387 | 1491 | ||
| 1388 | /// Creates a new thread | 1492 | /// Creates a new thread |
| @@ -1428,9 +1532,10 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e | |||
| 1428 | 1532 | ||
| 1429 | ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Threads, 1)); | 1533 | ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Threads, 1)); |
| 1430 | 1534 | ||
| 1535 | ThreadType type = THREADTYPE_USER; | ||
| 1431 | CASCADE_RESULT(std::shared_ptr<Thread> thread, | 1536 | CASCADE_RESULT(std::shared_ptr<Thread> thread, |
| 1432 | Thread::Create(kernel, "", entry_point, priority, arg, processor_id, stack_top, | 1537 | Thread::Create(system, type, "", entry_point, priority, arg, processor_id, |
| 1433 | *current_process)); | 1538 | stack_top, current_process)); |
| 1434 | 1539 | ||
| 1435 | const auto new_thread_handle = current_process->GetHandleTable().Create(thread); | 1540 | const auto new_thread_handle = current_process->GetHandleTable().Create(thread); |
| 1436 | if (new_thread_handle.Failed()) { | 1541 | if (new_thread_handle.Failed()) { |
| @@ -1444,11 +1549,15 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e | |||
| 1444 | thread->SetName( | 1549 | thread->SetName( |
| 1445 | fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *new_thread_handle)); | 1550 | fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *new_thread_handle)); |
| 1446 | 1551 | ||
| 1447 | system.PrepareReschedule(thread->GetProcessorID()); | ||
| 1448 | |||
| 1449 | return RESULT_SUCCESS; | 1552 | return RESULT_SUCCESS; |
| 1450 | } | 1553 | } |
| 1451 | 1554 | ||
| 1555 | static ResultCode CreateThread32(Core::System& system, Handle* out_handle, u32 priority, | ||
| 1556 | u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) { | ||
| 1557 | return CreateThread(system, out_handle, static_cast<VAddr>(entry_point), static_cast<u64>(arg), | ||
| 1558 | static_cast<VAddr>(stack_top), priority, processor_id); | ||
| 1559 | } | ||
| 1560 | |||
| 1452 | /// Starts the thread for the provided handle | 1561 | /// Starts the thread for the provided handle |
| 1453 | static ResultCode StartThread(Core::System& system, Handle thread_handle) { | 1562 | static ResultCode StartThread(Core::System& system, Handle thread_handle) { |
| 1454 | LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle); | 1563 | LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle); |
| @@ -1463,13 +1572,11 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) { | |||
| 1463 | 1572 | ||
| 1464 | ASSERT(thread->GetStatus() == ThreadStatus::Dormant); | 1573 | ASSERT(thread->GetStatus() == ThreadStatus::Dormant); |
| 1465 | 1574 | ||
| 1466 | thread->ResumeFromWait(); | 1575 | return thread->Start(); |
| 1467 | 1576 | } | |
| 1468 | if (thread->GetStatus() == ThreadStatus::Ready) { | ||
| 1469 | system.PrepareReschedule(thread->GetProcessorID()); | ||
| 1470 | } | ||
| 1471 | 1577 | ||
| 1472 | return RESULT_SUCCESS; | 1578 | static ResultCode StartThread32(Core::System& system, Handle thread_handle) { |
| 1579 | return StartThread(system, thread_handle); | ||
| 1473 | } | 1580 | } |
| 1474 | 1581 | ||
| 1475 | /// Called when a thread exits | 1582 | /// Called when a thread exits |
| @@ -1477,9 +1584,12 @@ static void ExitThread(Core::System& system) { | |||
| 1477 | LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC()); | 1584 | LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC()); |
| 1478 | 1585 | ||
| 1479 | auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); | 1586 | auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); |
| 1480 | current_thread->Stop(); | ||
| 1481 | system.GlobalScheduler().RemoveThread(SharedFrom(current_thread)); | 1587 | system.GlobalScheduler().RemoveThread(SharedFrom(current_thread)); |
| 1482 | system.PrepareReschedule(); | 1588 | current_thread->Stop(); |
| 1589 | } | ||
| 1590 | |||
| 1591 | static void ExitThread32(Core::System& system) { | ||
| 1592 | ExitThread(system); | ||
| 1483 | } | 1593 | } |
| 1484 | 1594 | ||
| 1485 | /// Sleep the current thread | 1595 | /// Sleep the current thread |
| @@ -1498,15 +1608,21 @@ static void SleepThread(Core::System& system, s64 nanoseconds) { | |||
| 1498 | 1608 | ||
| 1499 | if (nanoseconds <= 0) { | 1609 | if (nanoseconds <= 0) { |
| 1500 | switch (static_cast<SleepType>(nanoseconds)) { | 1610 | switch (static_cast<SleepType>(nanoseconds)) { |
| 1501 | case SleepType::YieldWithoutLoadBalancing: | 1611 | case SleepType::YieldWithoutLoadBalancing: { |
| 1502 | is_redundant = current_thread->YieldSimple(); | 1612 | auto pair = current_thread->YieldSimple(); |
| 1613 | is_redundant = pair.second; | ||
| 1503 | break; | 1614 | break; |
| 1504 | case SleepType::YieldWithLoadBalancing: | 1615 | } |
| 1505 | is_redundant = current_thread->YieldAndBalanceLoad(); | 1616 | case SleepType::YieldWithLoadBalancing: { |
| 1617 | auto pair = current_thread->YieldAndBalanceLoad(); | ||
| 1618 | is_redundant = pair.second; | ||
| 1506 | break; | 1619 | break; |
| 1507 | case SleepType::YieldAndWaitForLoadBalancing: | 1620 | } |
| 1508 | is_redundant = current_thread->YieldAndWaitForLoadBalancing(); | 1621 | case SleepType::YieldAndWaitForLoadBalancing: { |
| 1622 | auto pair = current_thread->YieldAndWaitForLoadBalancing(); | ||
| 1623 | is_redundant = pair.second; | ||
| 1509 | break; | 1624 | break; |
| 1625 | } | ||
| 1510 | default: | 1626 | default: |
| 1511 | UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); | 1627 | UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); |
| 1512 | } | 1628 | } |
| @@ -1514,13 +1630,18 @@ static void SleepThread(Core::System& system, s64 nanoseconds) { | |||
| 1514 | current_thread->Sleep(nanoseconds); | 1630 | current_thread->Sleep(nanoseconds); |
| 1515 | } | 1631 | } |
| 1516 | 1632 | ||
| 1517 | if (is_redundant) { | 1633 | if (is_redundant && !system.Kernel().IsMulticore()) { |
| 1518 | // If it's redundant, the core is pretty much idle. Some games keep idling | 1634 | system.Kernel().ExitSVCProfile(); |
| 1519 | // a core while it's doing nothing, we advance timing to avoid costly continuous | 1635 | system.CoreTiming().AddTicks(1000U); |
| 1520 | // calls. | 1636 | system.GetCpuManager().PreemptSingleCore(); |
| 1521 | system.CoreTiming().AddTicks(2000); | 1637 | system.Kernel().EnterSVCProfile(); |
| 1522 | } | 1638 | } |
| 1523 | system.PrepareReschedule(current_thread->GetProcessorID()); | 1639 | } |
| 1640 | |||
| 1641 | static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanoseconds_high) { | ||
| 1642 | const s64 nanoseconds = static_cast<s64>(static_cast<u64>(nanoseconds_low) | | ||
| 1643 | (static_cast<u64>(nanoseconds_high) << 32)); | ||
| 1644 | SleepThread(system, nanoseconds); | ||
| 1524 | } | 1645 | } |
| 1525 | 1646 | ||
| 1526 | /// Wait process wide key atomic | 1647 | /// Wait process wide key atomic |
| @@ -1547,31 +1668,69 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add | |||
| 1547 | } | 1668 | } |
| 1548 | 1669 | ||
| 1549 | ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); | 1670 | ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); |
| 1550 | 1671 | auto& kernel = system.Kernel(); | |
| 1672 | Handle event_handle; | ||
| 1673 | Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); | ||
| 1551 | auto* const current_process = system.Kernel().CurrentProcess(); | 1674 | auto* const current_process = system.Kernel().CurrentProcess(); |
| 1552 | const auto& handle_table = current_process->GetHandleTable(); | 1675 | { |
| 1553 | std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle); | 1676 | SchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds); |
| 1554 | ASSERT(thread); | 1677 | const auto& handle_table = current_process->GetHandleTable(); |
| 1678 | std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle); | ||
| 1679 | ASSERT(thread); | ||
| 1680 | |||
| 1681 | current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); | ||
| 1682 | |||
| 1683 | if (thread->IsPendingTermination()) { | ||
| 1684 | lock.CancelSleep(); | ||
| 1685 | return ERR_THREAD_TERMINATING; | ||
| 1686 | } | ||
| 1687 | |||
| 1688 | const auto release_result = current_process->GetMutex().Release(mutex_addr); | ||
| 1689 | if (release_result.IsError()) { | ||
| 1690 | lock.CancelSleep(); | ||
| 1691 | return release_result; | ||
| 1692 | } | ||
| 1693 | |||
| 1694 | if (nano_seconds == 0) { | ||
| 1695 | lock.CancelSleep(); | ||
| 1696 | return RESULT_TIMEOUT; | ||
| 1697 | } | ||
| 1555 | 1698 | ||
| 1556 | const auto release_result = current_process->GetMutex().Release(mutex_addr); | 1699 | current_thread->SetCondVarWaitAddress(condition_variable_addr); |
| 1557 | if (release_result.IsError()) { | 1700 | current_thread->SetMutexWaitAddress(mutex_addr); |
| 1558 | return release_result; | 1701 | current_thread->SetWaitHandle(thread_handle); |
| 1702 | current_thread->SetStatus(ThreadStatus::WaitCondVar); | ||
| 1703 | current_process->InsertConditionVariableThread(SharedFrom(current_thread)); | ||
| 1559 | } | 1704 | } |
| 1560 | 1705 | ||
| 1561 | Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); | 1706 | if (event_handle != InvalidHandle) { |
| 1562 | current_thread->SetCondVarWaitAddress(condition_variable_addr); | 1707 | auto& time_manager = kernel.TimeManager(); |
| 1563 | current_thread->SetMutexWaitAddress(mutex_addr); | 1708 | time_manager.UnscheduleTimeEvent(event_handle); |
| 1564 | current_thread->SetWaitHandle(thread_handle); | 1709 | } |
| 1565 | current_thread->SetStatus(ThreadStatus::WaitCondVar); | 1710 | |
| 1566 | current_thread->InvalidateWakeupCallback(); | 1711 | { |
| 1567 | current_process->InsertConditionVariableThread(SharedFrom(current_thread)); | 1712 | SchedulerLock lock(kernel); |
| 1568 | 1713 | ||
| 1569 | current_thread->WakeAfterDelay(nano_seconds); | 1714 | auto* owner = current_thread->GetLockOwner(); |
| 1715 | if (owner != nullptr) { | ||
| 1716 | owner->RemoveMutexWaiter(SharedFrom(current_thread)); | ||
| 1717 | } | ||
| 1570 | 1718 | ||
| 1719 | current_process->RemoveConditionVariableThread(SharedFrom(current_thread)); | ||
| 1720 | } | ||
| 1571 | // Note: Deliberately don't attempt to inherit the lock owner's priority. | 1721 | // Note: Deliberately don't attempt to inherit the lock owner's priority. |
| 1572 | 1722 | ||
| 1573 | system.PrepareReschedule(current_thread->GetProcessorID()); | 1723 | return current_thread->GetSignalingResult(); |
| 1574 | return RESULT_SUCCESS; | 1724 | } |
| 1725 | |||
| 1726 | static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 mutex_addr, | ||
| 1727 | u32 condition_variable_addr, Handle thread_handle, | ||
| 1728 | u32 nanoseconds_low, u32 nanoseconds_high) { | ||
| 1729 | const s64 nanoseconds = | ||
| 1730 | static_cast<s64>(nanoseconds_low | (static_cast<u64>(nanoseconds_high) << 32)); | ||
| 1731 | return WaitProcessWideKeyAtomic(system, static_cast<VAddr>(mutex_addr), | ||
| 1732 | static_cast<VAddr>(condition_variable_addr), thread_handle, | ||
| 1733 | nanoseconds); | ||
| 1575 | } | 1734 | } |
| 1576 | 1735 | ||
| 1577 | /// Signal process wide key | 1736 | /// Signal process wide key |
| @@ -1582,7 +1741,9 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_ | |||
| 1582 | ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); | 1741 | ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); |
| 1583 | 1742 | ||
| 1584 | // Retrieve a list of all threads that are waiting for this condition variable. | 1743 | // Retrieve a list of all threads that are waiting for this condition variable. |
| 1585 | auto* const current_process = system.Kernel().CurrentProcess(); | 1744 | auto& kernel = system.Kernel(); |
| 1745 | SchedulerLock lock(kernel); | ||
| 1746 | auto* const current_process = kernel.CurrentProcess(); | ||
| 1586 | std::vector<std::shared_ptr<Thread>> waiting_threads = | 1747 | std::vector<std::shared_ptr<Thread>> waiting_threads = |
| 1587 | current_process->GetConditionVariableThreads(condition_variable_addr); | 1748 | current_process->GetConditionVariableThreads(condition_variable_addr); |
| 1588 | 1749 | ||
| @@ -1591,7 +1752,7 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_ | |||
| 1591 | std::size_t last = waiting_threads.size(); | 1752 | std::size_t last = waiting_threads.size(); |
| 1592 | if (target > 0) | 1753 | if (target > 0) |
| 1593 | last = std::min(waiting_threads.size(), static_cast<std::size_t>(target)); | 1754 | last = std::min(waiting_threads.size(), static_cast<std::size_t>(target)); |
| 1594 | 1755 | auto& time_manager = kernel.TimeManager(); | |
| 1595 | for (std::size_t index = 0; index < last; ++index) { | 1756 | for (std::size_t index = 0; index < last; ++index) { |
| 1596 | auto& thread = waiting_threads[index]; | 1757 | auto& thread = waiting_threads[index]; |
| 1597 | 1758 | ||
| @@ -1599,7 +1760,6 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_ | |||
| 1599 | 1760 | ||
| 1600 | // liberate Cond Var Thread. | 1761 | // liberate Cond Var Thread. |
| 1601 | current_process->RemoveConditionVariableThread(thread); | 1762 | current_process->RemoveConditionVariableThread(thread); |
| 1602 | thread->SetCondVarWaitAddress(0); | ||
| 1603 | 1763 | ||
| 1604 | const std::size_t current_core = system.CurrentCoreIndex(); | 1764 | const std::size_t current_core = system.CurrentCoreIndex(); |
| 1605 | auto& monitor = system.Monitor(); | 1765 | auto& monitor = system.Monitor(); |
| @@ -1610,10 +1770,8 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_ | |||
| 1610 | u32 update_val = 0; | 1770 | u32 update_val = 0; |
| 1611 | const VAddr mutex_address = thread->GetMutexWaitAddress(); | 1771 | const VAddr mutex_address = thread->GetMutexWaitAddress(); |
| 1612 | do { | 1772 | do { |
| 1613 | monitor.SetExclusive(current_core, mutex_address); | ||
| 1614 | |||
| 1615 | // If the mutex is not yet acquired, acquire it. | 1773 | // If the mutex is not yet acquired, acquire it. |
| 1616 | mutex_val = memory.Read32(mutex_address); | 1774 | mutex_val = monitor.ExclusiveRead32(current_core, mutex_address); |
| 1617 | 1775 | ||
| 1618 | if (mutex_val != 0) { | 1776 | if (mutex_val != 0) { |
| 1619 | update_val = mutex_val | Mutex::MutexHasWaitersFlag; | 1777 | update_val = mutex_val | Mutex::MutexHasWaitersFlag; |
| @@ -1621,33 +1779,28 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_ | |||
| 1621 | update_val = thread->GetWaitHandle(); | 1779 | update_val = thread->GetWaitHandle(); |
| 1622 | } | 1780 | } |
| 1623 | } while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val)); | 1781 | } while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val)); |
| 1782 | monitor.ClearExclusive(); | ||
| 1624 | if (mutex_val == 0) { | 1783 | if (mutex_val == 0) { |
| 1625 | // We were able to acquire the mutex, resume this thread. | 1784 | // We were able to acquire the mutex, resume this thread. |
| 1626 | ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar); | ||
| 1627 | thread->ResumeFromWait(); | ||
| 1628 | |||
| 1629 | auto* const lock_owner = thread->GetLockOwner(); | 1785 | auto* const lock_owner = thread->GetLockOwner(); |
| 1630 | if (lock_owner != nullptr) { | 1786 | if (lock_owner != nullptr) { |
| 1631 | lock_owner->RemoveMutexWaiter(thread); | 1787 | lock_owner->RemoveMutexWaiter(thread); |
| 1632 | } | 1788 | } |
| 1633 | 1789 | ||
| 1634 | thread->SetLockOwner(nullptr); | 1790 | thread->SetLockOwner(nullptr); |
| 1635 | thread->SetMutexWaitAddress(0); | 1791 | thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS); |
| 1636 | thread->SetWaitHandle(0); | 1792 | thread->ResumeFromWait(); |
| 1637 | thread->SetWaitSynchronizationResult(RESULT_SUCCESS); | ||
| 1638 | system.PrepareReschedule(thread->GetProcessorID()); | ||
| 1639 | } else { | 1793 | } else { |
| 1640 | // The mutex is already owned by some other thread, make this thread wait on it. | 1794 | // The mutex is already owned by some other thread, make this thread wait on it. |
| 1641 | const Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask); | 1795 | const Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask); |
| 1642 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | 1796 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); |
| 1643 | auto owner = handle_table.Get<Thread>(owner_handle); | 1797 | auto owner = handle_table.Get<Thread>(owner_handle); |
| 1644 | ASSERT(owner); | 1798 | ASSERT(owner); |
| 1645 | ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar); | 1799 | if (thread->GetStatus() == ThreadStatus::WaitCondVar) { |
| 1646 | thread->InvalidateWakeupCallback(); | 1800 | thread->SetStatus(ThreadStatus::WaitMutex); |
| 1647 | thread->SetStatus(ThreadStatus::WaitMutex); | 1801 | } |
| 1648 | 1802 | ||
| 1649 | owner->AddMutexWaiter(thread); | 1803 | owner->AddMutexWaiter(thread); |
| 1650 | system.PrepareReschedule(thread->GetProcessorID()); | ||
| 1651 | } | 1804 | } |
| 1652 | } | 1805 | } |
| 1653 | } | 1806 | } |
| @@ -1678,12 +1831,15 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type, | |||
| 1678 | auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter(); | 1831 | auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter(); |
| 1679 | const ResultCode result = | 1832 | const ResultCode result = |
| 1680 | address_arbiter.WaitForAddress(address, arbitration_type, value, timeout); | 1833 | address_arbiter.WaitForAddress(address, arbitration_type, value, timeout); |
| 1681 | if (result == RESULT_SUCCESS) { | ||
| 1682 | system.PrepareReschedule(); | ||
| 1683 | } | ||
| 1684 | return result; | 1834 | return result; |
| 1685 | } | 1835 | } |
| 1686 | 1836 | ||
| 1837 | static ResultCode WaitForAddress32(Core::System& system, u32 address, u32 type, s32 value, | ||
| 1838 | u32 timeout_low, u32 timeout_high) { | ||
| 1839 | s64 timeout = static_cast<s64>(timeout_low | (static_cast<u64>(timeout_high) << 32)); | ||
| 1840 | return WaitForAddress(system, static_cast<VAddr>(address), type, value, timeout); | ||
| 1841 | } | ||
| 1842 | |||
| 1687 | // Signals to an address (via Address Arbiter) | 1843 | // Signals to an address (via Address Arbiter) |
| 1688 | static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, s32 value, | 1844 | static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, s32 value, |
| 1689 | s32 num_to_wake) { | 1845 | s32 num_to_wake) { |
| @@ -1707,6 +1863,11 @@ static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, | |||
| 1707 | return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake); | 1863 | return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake); |
| 1708 | } | 1864 | } |
| 1709 | 1865 | ||
| 1866 | static ResultCode SignalToAddress32(Core::System& system, u32 address, u32 type, s32 value, | ||
| 1867 | s32 num_to_wake) { | ||
| 1868 | return SignalToAddress(system, static_cast<VAddr>(address), type, value, num_to_wake); | ||
| 1869 | } | ||
| 1870 | |||
| 1710 | static void KernelDebug([[maybe_unused]] Core::System& system, | 1871 | static void KernelDebug([[maybe_unused]] Core::System& system, |
| 1711 | [[maybe_unused]] u32 kernel_debug_type, [[maybe_unused]] u64 param1, | 1872 | [[maybe_unused]] u32 kernel_debug_type, [[maybe_unused]] u64 param1, |
| 1712 | [[maybe_unused]] u64 param2, [[maybe_unused]] u64 param3) { | 1873 | [[maybe_unused]] u64 param2, [[maybe_unused]] u64 param3) { |
| @@ -1725,14 +1886,21 @@ static u64 GetSystemTick(Core::System& system) { | |||
| 1725 | auto& core_timing = system.CoreTiming(); | 1886 | auto& core_timing = system.CoreTiming(); |
| 1726 | 1887 | ||
| 1727 | // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick) | 1888 | // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick) |
| 1728 | const u64 result{Core::Timing::CpuCyclesToClockCycles(system.CoreTiming().GetTicks())}; | 1889 | const u64 result{system.CoreTiming().GetClockTicks()}; |
| 1729 | 1890 | ||
| 1730 | // Advance time to defeat dumb games that busy-wait for the frame to end. | 1891 | if (!system.Kernel().IsMulticore()) { |
| 1731 | core_timing.AddTicks(400); | 1892 | core_timing.AddTicks(400U); |
| 1893 | } | ||
| 1732 | 1894 | ||
| 1733 | return result; | 1895 | return result; |
| 1734 | } | 1896 | } |
| 1735 | 1897 | ||
| 1898 | static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high) { | ||
| 1899 | u64 time = GetSystemTick(system); | ||
| 1900 | *time_low = static_cast<u32>(time); | ||
| 1901 | *time_high = static_cast<u32>(time >> 32); | ||
| 1902 | } | ||
| 1903 | |||
| 1736 | /// Close a handle | 1904 | /// Close a handle |
| 1737 | static ResultCode CloseHandle(Core::System& system, Handle handle) { | 1905 | static ResultCode CloseHandle(Core::System& system, Handle handle) { |
| 1738 | LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle); | 1906 | LOG_TRACE(Kernel_SVC, "Closing handle 0x{:08X}", handle); |
| @@ -1765,9 +1933,14 @@ static ResultCode ResetSignal(Core::System& system, Handle handle) { | |||
| 1765 | return ERR_INVALID_HANDLE; | 1933 | return ERR_INVALID_HANDLE; |
| 1766 | } | 1934 | } |
| 1767 | 1935 | ||
| 1936 | static ResultCode ResetSignal32(Core::System& system, Handle handle) { | ||
| 1937 | return ResetSignal(system, handle); | ||
| 1938 | } | ||
| 1939 | |||
| 1768 | /// Creates a TransferMemory object | 1940 | /// Creates a TransferMemory object |
| 1769 | static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAddr addr, u64 size, | 1941 | static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAddr addr, u64 size, |
| 1770 | u32 permissions) { | 1942 | u32 permissions) { |
| 1943 | std::lock_guard lock{HLE::g_hle_lock}; | ||
| 1771 | LOG_DEBUG(Kernel_SVC, "called addr=0x{:X}, size=0x{:X}, perms=0x{:08X}", addr, size, | 1944 | LOG_DEBUG(Kernel_SVC, "called addr=0x{:X}, size=0x{:X}, perms=0x{:08X}", addr, size, |
| 1772 | permissions); | 1945 | permissions); |
| 1773 | 1946 | ||
| @@ -1812,6 +1985,12 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd | |||
| 1812 | return RESULT_SUCCESS; | 1985 | return RESULT_SUCCESS; |
| 1813 | } | 1986 | } |
| 1814 | 1987 | ||
| 1988 | static ResultCode CreateTransferMemory32(Core::System& system, Handle* handle, u32 addr, u32 size, | ||
| 1989 | u32 permissions) { | ||
| 1990 | return CreateTransferMemory(system, handle, static_cast<VAddr>(addr), | ||
| 1991 | static_cast<std::size_t>(size), permissions); | ||
| 1992 | } | ||
| 1993 | |||
| 1815 | static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core, | 1994 | static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core, |
| 1816 | u64* mask) { | 1995 | u64* mask) { |
| 1817 | LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle); | 1996 | LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle); |
| @@ -1821,6 +2000,8 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, | |||
| 1821 | if (!thread) { | 2000 | if (!thread) { |
| 1822 | LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}", | 2001 | LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}", |
| 1823 | thread_handle); | 2002 | thread_handle); |
| 2003 | *core = 0; | ||
| 2004 | *mask = 0; | ||
| 1824 | return ERR_INVALID_HANDLE; | 2005 | return ERR_INVALID_HANDLE; |
| 1825 | } | 2006 | } |
| 1826 | 2007 | ||
| @@ -1830,6 +2011,15 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, | |||
| 1830 | return RESULT_SUCCESS; | 2011 | return RESULT_SUCCESS; |
| 1831 | } | 2012 | } |
| 1832 | 2013 | ||
| 2014 | static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle, u32* core, | ||
| 2015 | u32* mask_low, u32* mask_high) { | ||
| 2016 | u64 mask{}; | ||
| 2017 | const auto result = GetThreadCoreMask(system, thread_handle, core, &mask); | ||
| 2018 | *mask_high = static_cast<u32>(mask >> 32); | ||
| 2019 | *mask_low = static_cast<u32>(mask); | ||
| 2020 | return result; | ||
| 2021 | } | ||
| 2022 | |||
| 1833 | static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, u32 core, | 2023 | static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, u32 core, |
| 1834 | u64 affinity_mask) { | 2024 | u64 affinity_mask) { |
| 1835 | LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core=0x{:X}, affinity_mask=0x{:016X}", | 2025 | LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core=0x{:X}, affinity_mask=0x{:016X}", |
| @@ -1861,7 +2051,7 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, | |||
| 1861 | return ERR_INVALID_COMBINATION; | 2051 | return ERR_INVALID_COMBINATION; |
| 1862 | } | 2052 | } |
| 1863 | 2053 | ||
| 1864 | if (core < Core::NUM_CPU_CORES) { | 2054 | if (core < Core::Hardware::NUM_CPU_CORES) { |
| 1865 | if ((affinity_mask & (1ULL << core)) == 0) { | 2055 | if ((affinity_mask & (1ULL << core)) == 0) { |
| 1866 | LOG_ERROR(Kernel_SVC, | 2056 | LOG_ERROR(Kernel_SVC, |
| 1867 | "Core is not enabled for the current mask, core={}, mask={:016X}", core, | 2057 | "Core is not enabled for the current mask, core={}, mask={:016X}", core, |
| @@ -1883,11 +2073,14 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, | |||
| 1883 | return ERR_INVALID_HANDLE; | 2073 | return ERR_INVALID_HANDLE; |
| 1884 | } | 2074 | } |
| 1885 | 2075 | ||
| 1886 | system.PrepareReschedule(thread->GetProcessorID()); | 2076 | return thread->SetCoreAndAffinityMask(core, affinity_mask); |
| 1887 | thread->ChangeCore(core, affinity_mask); | 2077 | } |
| 1888 | system.PrepareReschedule(thread->GetProcessorID()); | ||
| 1889 | 2078 | ||
| 1890 | return RESULT_SUCCESS; | 2079 | static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core, |
| 2080 | u32 affinity_mask_low, u32 affinity_mask_high) { | ||
| 2081 | const u64 affinity_mask = | ||
| 2082 | static_cast<u64>(affinity_mask_low) | (static_cast<u64>(affinity_mask_high) << 32); | ||
| 2083 | return SetThreadCoreMask(system, thread_handle, core, affinity_mask); | ||
| 1891 | } | 2084 | } |
| 1892 | 2085 | ||
| 1893 | static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle* read_handle) { | 2086 | static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle* read_handle) { |
| @@ -1918,6 +2111,10 @@ static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle | |||
| 1918 | return RESULT_SUCCESS; | 2111 | return RESULT_SUCCESS; |
| 1919 | } | 2112 | } |
| 1920 | 2113 | ||
| 2114 | static ResultCode CreateEvent32(Core::System& system, Handle* write_handle, Handle* read_handle) { | ||
| 2115 | return CreateEvent(system, write_handle, read_handle); | ||
| 2116 | } | ||
| 2117 | |||
| 1921 | static ResultCode ClearEvent(Core::System& system, Handle handle) { | 2118 | static ResultCode ClearEvent(Core::System& system, Handle handle) { |
| 1922 | LOG_TRACE(Kernel_SVC, "called, event=0x{:08X}", handle); | 2119 | LOG_TRACE(Kernel_SVC, "called, event=0x{:08X}", handle); |
| 1923 | 2120 | ||
| @@ -1939,6 +2136,10 @@ static ResultCode ClearEvent(Core::System& system, Handle handle) { | |||
| 1939 | return ERR_INVALID_HANDLE; | 2136 | return ERR_INVALID_HANDLE; |
| 1940 | } | 2137 | } |
| 1941 | 2138 | ||
| 2139 | static ResultCode ClearEvent32(Core::System& system, Handle handle) { | ||
| 2140 | return ClearEvent(system, handle); | ||
| 2141 | } | ||
| 2142 | |||
| 1942 | static ResultCode SignalEvent(Core::System& system, Handle handle) { | 2143 | static ResultCode SignalEvent(Core::System& system, Handle handle) { |
| 1943 | LOG_DEBUG(Kernel_SVC, "called. Handle=0x{:08X}", handle); | 2144 | LOG_DEBUG(Kernel_SVC, "called. Handle=0x{:08X}", handle); |
| 1944 | 2145 | ||
| @@ -1951,10 +2152,13 @@ static ResultCode SignalEvent(Core::System& system, Handle handle) { | |||
| 1951 | } | 2152 | } |
| 1952 | 2153 | ||
| 1953 | writable_event->Signal(); | 2154 | writable_event->Signal(); |
| 1954 | system.PrepareReschedule(); | ||
| 1955 | return RESULT_SUCCESS; | 2155 | return RESULT_SUCCESS; |
| 1956 | } | 2156 | } |
| 1957 | 2157 | ||
| 2158 | static ResultCode SignalEvent32(Core::System& system, Handle handle) { | ||
| 2159 | return SignalEvent(system, handle); | ||
| 2160 | } | ||
| 2161 | |||
| 1958 | static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) { | 2162 | static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) { |
| 1959 | LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type); | 2163 | LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type); |
| 1960 | 2164 | ||
| @@ -1982,6 +2186,7 @@ static ResultCode GetProcessInfo(Core::System& system, u64* out, Handle process_ | |||
| 1982 | } | 2186 | } |
| 1983 | 2187 | ||
| 1984 | static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle) { | 2188 | static ResultCode CreateResourceLimit(Core::System& system, Handle* out_handle) { |
| 2189 | std::lock_guard lock{HLE::g_hle_lock}; | ||
| 1985 | LOG_DEBUG(Kernel_SVC, "called"); | 2190 | LOG_DEBUG(Kernel_SVC, "called"); |
| 1986 | 2191 | ||
| 1987 | auto& kernel = system.Kernel(); | 2192 | auto& kernel = system.Kernel(); |
| @@ -2139,6 +2344,15 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd | |||
| 2139 | return RESULT_SUCCESS; | 2344 | return RESULT_SUCCESS; |
| 2140 | } | 2345 | } |
| 2141 | 2346 | ||
| 2347 | static ResultCode FlushProcessDataCache32(Core::System& system, Handle handle, u32 address, | ||
| 2348 | u32 size) { | ||
| 2349 | // Note(Blinkhawk): For emulation purposes of the data cache this is mostly a nope | ||
| 2350 | // as all emulation is done in the same cache level in host architecture, thus data cache | ||
| 2351 | // does not need flushing. | ||
| 2352 | LOG_DEBUG(Kernel_SVC, "called"); | ||
| 2353 | return RESULT_SUCCESS; | ||
| 2354 | } | ||
| 2355 | |||
| 2142 | namespace { | 2356 | namespace { |
| 2143 | struct FunctionDef { | 2357 | struct FunctionDef { |
| 2144 | using Func = void(Core::System&); | 2358 | using Func = void(Core::System&); |
| @@ -2153,57 +2367,57 @@ static const FunctionDef SVC_Table_32[] = { | |||
| 2153 | {0x00, nullptr, "Unknown"}, | 2367 | {0x00, nullptr, "Unknown"}, |
| 2154 | {0x01, SvcWrap32<SetHeapSize32>, "SetHeapSize32"}, | 2368 | {0x01, SvcWrap32<SetHeapSize32>, "SetHeapSize32"}, |
| 2155 | {0x02, nullptr, "Unknown"}, | 2369 | {0x02, nullptr, "Unknown"}, |
| 2156 | {0x03, nullptr, "SetMemoryAttribute32"}, | 2370 | {0x03, SvcWrap32<SetMemoryAttribute32>, "SetMemoryAttribute32"}, |
| 2157 | {0x04, nullptr, "MapMemory32"}, | 2371 | {0x04, SvcWrap32<MapMemory32>, "MapMemory32"}, |
| 2158 | {0x05, nullptr, "UnmapMemory32"}, | 2372 | {0x05, SvcWrap32<UnmapMemory32>, "UnmapMemory32"}, |
| 2159 | {0x06, SvcWrap32<QueryMemory32>, "QueryMemory32"}, | 2373 | {0x06, SvcWrap32<QueryMemory32>, "QueryMemory32"}, |
| 2160 | {0x07, nullptr, "ExitProcess32"}, | 2374 | {0x07, SvcWrap32<ExitProcess32>, "ExitProcess32"}, |
| 2161 | {0x08, nullptr, "CreateThread32"}, | 2375 | {0x08, SvcWrap32<CreateThread32>, "CreateThread32"}, |
| 2162 | {0x09, nullptr, "StartThread32"}, | 2376 | {0x09, SvcWrap32<StartThread32>, "StartThread32"}, |
| 2163 | {0x0a, nullptr, "ExitThread32"}, | 2377 | {0x0a, SvcWrap32<ExitThread32>, "ExitThread32"}, |
| 2164 | {0x0b, nullptr, "SleepThread32"}, | 2378 | {0x0b, SvcWrap32<SleepThread32>, "SleepThread32"}, |
| 2165 | {0x0c, SvcWrap32<GetThreadPriority32>, "GetThreadPriority32"}, | 2379 | {0x0c, SvcWrap32<GetThreadPriority32>, "GetThreadPriority32"}, |
| 2166 | {0x0d, nullptr, "SetThreadPriority32"}, | 2380 | {0x0d, SvcWrap32<SetThreadPriority32>, "SetThreadPriority32"}, |
| 2167 | {0x0e, nullptr, "GetThreadCoreMask32"}, | 2381 | {0x0e, SvcWrap32<GetThreadCoreMask32>, "GetThreadCoreMask32"}, |
| 2168 | {0x0f, nullptr, "SetThreadCoreMask32"}, | 2382 | {0x0f, SvcWrap32<SetThreadCoreMask32>, "SetThreadCoreMask32"}, |
| 2169 | {0x10, nullptr, "GetCurrentProcessorNumber32"}, | 2383 | {0x10, SvcWrap32<GetCurrentProcessorNumber32>, "GetCurrentProcessorNumber32"}, |
| 2170 | {0x11, nullptr, "SignalEvent32"}, | 2384 | {0x11, SvcWrap32<SignalEvent32>, "SignalEvent32"}, |
| 2171 | {0x12, nullptr, "ClearEvent32"}, | 2385 | {0x12, SvcWrap32<ClearEvent32>, "ClearEvent32"}, |
| 2172 | {0x13, nullptr, "MapSharedMemory32"}, | 2386 | {0x13, SvcWrap32<MapSharedMemory32>, "MapSharedMemory32"}, |
| 2173 | {0x14, nullptr, "UnmapSharedMemory32"}, | 2387 | {0x14, nullptr, "UnmapSharedMemory32"}, |
| 2174 | {0x15, nullptr, "CreateTransferMemory32"}, | 2388 | {0x15, SvcWrap32<CreateTransferMemory32>, "CreateTransferMemory32"}, |
| 2175 | {0x16, SvcWrap32<CloseHandle32>, "CloseHandle32"}, | 2389 | {0x16, SvcWrap32<CloseHandle32>, "CloseHandle32"}, |
| 2176 | {0x17, nullptr, "ResetSignal32"}, | 2390 | {0x17, SvcWrap32<ResetSignal32>, "ResetSignal32"}, |
| 2177 | {0x18, SvcWrap32<WaitSynchronization32>, "WaitSynchronization32"}, | 2391 | {0x18, SvcWrap32<WaitSynchronization32>, "WaitSynchronization32"}, |
| 2178 | {0x19, nullptr, "CancelSynchronization32"}, | 2392 | {0x19, SvcWrap32<CancelSynchronization32>, "CancelSynchronization32"}, |
| 2179 | {0x1a, nullptr, "ArbitrateLock32"}, | 2393 | {0x1a, SvcWrap32<ArbitrateLock32>, "ArbitrateLock32"}, |
| 2180 | {0x1b, nullptr, "ArbitrateUnlock32"}, | 2394 | {0x1b, SvcWrap32<ArbitrateUnlock32>, "ArbitrateUnlock32"}, |
| 2181 | {0x1c, nullptr, "WaitProcessWideKeyAtomic32"}, | 2395 | {0x1c, SvcWrap32<WaitProcessWideKeyAtomic32>, "WaitProcessWideKeyAtomic32"}, |
| 2182 | {0x1d, SvcWrap32<SignalProcessWideKey32>, "SignalProcessWideKey32"}, | 2396 | {0x1d, SvcWrap32<SignalProcessWideKey32>, "SignalProcessWideKey32"}, |
| 2183 | {0x1e, nullptr, "GetSystemTick32"}, | 2397 | {0x1e, SvcWrap32<GetSystemTick32>, "GetSystemTick32"}, |
| 2184 | {0x1f, SvcWrap32<ConnectToNamedPort32>, "ConnectToNamedPort32"}, | 2398 | {0x1f, SvcWrap32<ConnectToNamedPort32>, "ConnectToNamedPort32"}, |
| 2185 | {0x20, nullptr, "Unknown"}, | 2399 | {0x20, nullptr, "Unknown"}, |
| 2186 | {0x21, SvcWrap32<SendSyncRequest32>, "SendSyncRequest32"}, | 2400 | {0x21, SvcWrap32<SendSyncRequest32>, "SendSyncRequest32"}, |
| 2187 | {0x22, nullptr, "SendSyncRequestWithUserBuffer32"}, | 2401 | {0x22, nullptr, "SendSyncRequestWithUserBuffer32"}, |
| 2188 | {0x23, nullptr, "Unknown"}, | 2402 | {0x23, nullptr, "Unknown"}, |
| 2189 | {0x24, nullptr, "GetProcessId32"}, | 2403 | {0x24, SvcWrap32<GetProcessId32>, "GetProcessId32"}, |
| 2190 | {0x25, SvcWrap32<GetThreadId32>, "GetThreadId32"}, | 2404 | {0x25, SvcWrap32<GetThreadId32>, "GetThreadId32"}, |
| 2191 | {0x26, nullptr, "Break32"}, | 2405 | {0x26, SvcWrap32<Break32>, "Break32"}, |
| 2192 | {0x27, nullptr, "OutputDebugString32"}, | 2406 | {0x27, nullptr, "OutputDebugString32"}, |
| 2193 | {0x28, nullptr, "Unknown"}, | 2407 | {0x28, nullptr, "Unknown"}, |
| 2194 | {0x29, SvcWrap32<GetInfo32>, "GetInfo32"}, | 2408 | {0x29, SvcWrap32<GetInfo32>, "GetInfo32"}, |
| 2195 | {0x2a, nullptr, "Unknown"}, | 2409 | {0x2a, nullptr, "Unknown"}, |
| 2196 | {0x2b, nullptr, "Unknown"}, | 2410 | {0x2b, nullptr, "Unknown"}, |
| 2197 | {0x2c, nullptr, "MapPhysicalMemory32"}, | 2411 | {0x2c, SvcWrap32<MapPhysicalMemory32>, "MapPhysicalMemory32"}, |
| 2198 | {0x2d, nullptr, "UnmapPhysicalMemory32"}, | 2412 | {0x2d, SvcWrap32<UnmapPhysicalMemory32>, "UnmapPhysicalMemory32"}, |
| 2199 | {0x2e, nullptr, "Unknown"}, | 2413 | {0x2e, nullptr, "Unknown"}, |
| 2200 | {0x2f, nullptr, "Unknown"}, | 2414 | {0x2f, nullptr, "Unknown"}, |
| 2201 | {0x30, nullptr, "Unknown"}, | 2415 | {0x30, nullptr, "Unknown"}, |
| 2202 | {0x31, nullptr, "Unknown"}, | 2416 | {0x31, nullptr, "Unknown"}, |
| 2203 | {0x32, nullptr, "SetThreadActivity32"}, | 2417 | {0x32, SvcWrap32<SetThreadActivity32>, "SetThreadActivity32"}, |
| 2204 | {0x33, nullptr, "GetThreadContext32"}, | 2418 | {0x33, SvcWrap32<GetThreadContext32>, "GetThreadContext32"}, |
| 2205 | {0x34, nullptr, "WaitForAddress32"}, | 2419 | {0x34, SvcWrap32<WaitForAddress32>, "WaitForAddress32"}, |
| 2206 | {0x35, nullptr, "SignalToAddress32"}, | 2420 | {0x35, SvcWrap32<SignalToAddress32>, "SignalToAddress32"}, |
| 2207 | {0x36, nullptr, "Unknown"}, | 2421 | {0x36, nullptr, "Unknown"}, |
| 2208 | {0x37, nullptr, "Unknown"}, | 2422 | {0x37, nullptr, "Unknown"}, |
| 2209 | {0x38, nullptr, "Unknown"}, | 2423 | {0x38, nullptr, "Unknown"}, |
| @@ -2219,7 +2433,7 @@ static const FunctionDef SVC_Table_32[] = { | |||
| 2219 | {0x42, nullptr, "Unknown"}, | 2433 | {0x42, nullptr, "Unknown"}, |
| 2220 | {0x43, nullptr, "ReplyAndReceive32"}, | 2434 | {0x43, nullptr, "ReplyAndReceive32"}, |
| 2221 | {0x44, nullptr, "Unknown"}, | 2435 | {0x44, nullptr, "Unknown"}, |
| 2222 | {0x45, nullptr, "CreateEvent32"}, | 2436 | {0x45, SvcWrap32<CreateEvent32>, "CreateEvent32"}, |
| 2223 | {0x46, nullptr, "Unknown"}, | 2437 | {0x46, nullptr, "Unknown"}, |
| 2224 | {0x47, nullptr, "Unknown"}, | 2438 | {0x47, nullptr, "Unknown"}, |
| 2225 | {0x48, nullptr, "Unknown"}, | 2439 | {0x48, nullptr, "Unknown"}, |
| @@ -2245,7 +2459,7 @@ static const FunctionDef SVC_Table_32[] = { | |||
| 2245 | {0x5c, nullptr, "Unknown"}, | 2459 | {0x5c, nullptr, "Unknown"}, |
| 2246 | {0x5d, nullptr, "Unknown"}, | 2460 | {0x5d, nullptr, "Unknown"}, |
| 2247 | {0x5e, nullptr, "Unknown"}, | 2461 | {0x5e, nullptr, "Unknown"}, |
| 2248 | {0x5F, nullptr, "FlushProcessDataCache32"}, | 2462 | {0x5F, SvcWrap32<FlushProcessDataCache32>, "FlushProcessDataCache32"}, |
| 2249 | {0x60, nullptr, "Unknown"}, | 2463 | {0x60, nullptr, "Unknown"}, |
| 2250 | {0x61, nullptr, "Unknown"}, | 2464 | {0x61, nullptr, "Unknown"}, |
| 2251 | {0x62, nullptr, "Unknown"}, | 2465 | {0x62, nullptr, "Unknown"}, |
| @@ -2423,13 +2637,10 @@ static const FunctionDef* GetSVCInfo64(u32 func_num) { | |||
| 2423 | return &SVC_Table_64[func_num]; | 2637 | return &SVC_Table_64[func_num]; |
| 2424 | } | 2638 | } |
| 2425 | 2639 | ||
| 2426 | MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70)); | ||
| 2427 | |||
| 2428 | void Call(Core::System& system, u32 immediate) { | 2640 | void Call(Core::System& system, u32 immediate) { |
| 2429 | MICROPROFILE_SCOPE(Kernel_SVC); | 2641 | system.ExitDynarmicProfile(); |
| 2430 | 2642 | auto& kernel = system.Kernel(); | |
| 2431 | // Lock the global kernel mutex when we enter the kernel HLE. | 2643 | kernel.EnterSVCProfile(); |
| 2432 | std::lock_guard lock{HLE::g_hle_lock}; | ||
| 2433 | 2644 | ||
| 2434 | const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate) | 2645 | const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate) |
| 2435 | : GetSVCInfo32(immediate); | 2646 | : GetSVCInfo32(immediate); |
| @@ -2442,6 +2653,9 @@ void Call(Core::System& system, u32 immediate) { | |||
| 2442 | } else { | 2653 | } else { |
| 2443 | LOG_CRITICAL(Kernel_SVC, "Unknown SVC function 0x{:X}", immediate); | 2654 | LOG_CRITICAL(Kernel_SVC, "Unknown SVC function 0x{:X}", immediate); |
| 2444 | } | 2655 | } |
| 2656 | |||
| 2657 | kernel.ExitSVCProfile(); | ||
| 2658 | system.EnterDynarmicProfile(); | ||
| 2445 | } | 2659 | } |
| 2446 | 2660 | ||
| 2447 | } // namespace Kernel::Svc | 2661 | } // namespace Kernel::Svc |
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index 7d735e3fa..0b6dd9df0 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h | |||
| @@ -350,13 +350,50 @@ void SvcWrap64(Core::System& system) { | |||
| 350 | func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2)); | 350 | func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2)); |
| 351 | } | 351 | } |
| 352 | 352 | ||
| 353 | // Used by QueryMemory32 | 353 | // Used by QueryMemory32, ArbitrateLock32 |
| 354 | template <ResultCode func(Core::System&, u32, u32, u32)> | 354 | template <ResultCode func(Core::System&, u32, u32, u32)> |
| 355 | void SvcWrap32(Core::System& system) { | 355 | void SvcWrap32(Core::System& system) { |
| 356 | FuncReturn32(system, | 356 | FuncReturn32(system, |
| 357 | func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2)).raw); | 357 | func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2)).raw); |
| 358 | } | 358 | } |
| 359 | 359 | ||
| 360 | // Used by Break32 | ||
| 361 | template <void func(Core::System&, u32, u32, u32)> | ||
| 362 | void SvcWrap32(Core::System& system) { | ||
| 363 | func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2)); | ||
| 364 | } | ||
| 365 | |||
| 366 | // Used by ExitProcess32, ExitThread32 | ||
| 367 | template <void func(Core::System&)> | ||
| 368 | void SvcWrap32(Core::System& system) { | ||
| 369 | func(system); | ||
| 370 | } | ||
| 371 | |||
| 372 | // Used by GetCurrentProcessorNumber32 | ||
| 373 | template <u32 func(Core::System&)> | ||
| 374 | void SvcWrap32(Core::System& system) { | ||
| 375 | FuncReturn32(system, func(system)); | ||
| 376 | } | ||
| 377 | |||
| 378 | // Used by SleepThread32 | ||
| 379 | template <void func(Core::System&, u32, u32)> | ||
| 380 | void SvcWrap32(Core::System& system) { | ||
| 381 | func(system, Param32(system, 0), Param32(system, 1)); | ||
| 382 | } | ||
| 383 | |||
| 384 | // Used by CreateThread32 | ||
| 385 | template <ResultCode func(Core::System&, Handle*, u32, u32, u32, u32, s32)> | ||
| 386 | void SvcWrap32(Core::System& system) { | ||
| 387 | Handle param_1 = 0; | ||
| 388 | |||
| 389 | const u32 retval = func(system, ¶m_1, Param32(system, 0), Param32(system, 1), | ||
| 390 | Param32(system, 2), Param32(system, 3), Param32(system, 4)) | ||
| 391 | .raw; | ||
| 392 | |||
| 393 | system.CurrentArmInterface().SetReg(1, param_1); | ||
| 394 | FuncReturn(system, retval); | ||
| 395 | } | ||
| 396 | |||
| 360 | // Used by GetInfo32 | 397 | // Used by GetInfo32 |
| 361 | template <ResultCode func(Core::System&, u32*, u32*, u32, u32, u32, u32)> | 398 | template <ResultCode func(Core::System&, u32*, u32*, u32, u32, u32, u32)> |
| 362 | void SvcWrap32(Core::System& system) { | 399 | void SvcWrap32(Core::System& system) { |
| @@ -393,18 +430,114 @@ void SvcWrap32(Core::System& system) { | |||
| 393 | FuncReturn(system, retval); | 430 | FuncReturn(system, retval); |
| 394 | } | 431 | } |
| 395 | 432 | ||
| 433 | // Used by GetSystemTick32 | ||
| 434 | template <void func(Core::System&, u32*, u32*)> | ||
| 435 | void SvcWrap32(Core::System& system) { | ||
| 436 | u32 param_1 = 0; | ||
| 437 | u32 param_2 = 0; | ||
| 438 | |||
| 439 | func(system, ¶m_1, ¶m_2); | ||
| 440 | system.CurrentArmInterface().SetReg(0, param_1); | ||
| 441 | system.CurrentArmInterface().SetReg(1, param_2); | ||
| 442 | } | ||
| 443 | |||
| 444 | // Used by CreateEvent32 | ||
| 445 | template <ResultCode func(Core::System&, Handle*, Handle*)> | ||
| 446 | void SvcWrap32(Core::System& system) { | ||
| 447 | Handle param_1 = 0; | ||
| 448 | Handle param_2 = 0; | ||
| 449 | |||
| 450 | const u32 retval = func(system, ¶m_1, ¶m_2).raw; | ||
| 451 | system.CurrentArmInterface().SetReg(1, param_1); | ||
| 452 | system.CurrentArmInterface().SetReg(2, param_2); | ||
| 453 | FuncReturn(system, retval); | ||
| 454 | } | ||
| 455 | |||
| 456 | // Used by GetThreadId32 | ||
| 457 | template <ResultCode func(Core::System&, Handle, u32*, u32*, u32*)> | ||
| 458 | void SvcWrap32(Core::System& system) { | ||
| 459 | u32 param_1 = 0; | ||
| 460 | u32 param_2 = 0; | ||
| 461 | u32 param_3 = 0; | ||
| 462 | |||
| 463 | const u32 retval = func(system, Param32(system, 2), ¶m_1, ¶m_2, ¶m_3).raw; | ||
| 464 | system.CurrentArmInterface().SetReg(1, param_1); | ||
| 465 | system.CurrentArmInterface().SetReg(2, param_2); | ||
| 466 | system.CurrentArmInterface().SetReg(3, param_3); | ||
| 467 | FuncReturn(system, retval); | ||
| 468 | } | ||
| 469 | |||
| 396 | // Used by SignalProcessWideKey32 | 470 | // Used by SignalProcessWideKey32 |
| 397 | template <void func(Core::System&, u32, s32)> | 471 | template <void func(Core::System&, u32, s32)> |
| 398 | void SvcWrap32(Core::System& system) { | 472 | void SvcWrap32(Core::System& system) { |
| 399 | func(system, static_cast<u32>(Param(system, 0)), static_cast<s32>(Param(system, 1))); | 473 | func(system, static_cast<u32>(Param(system, 0)), static_cast<s32>(Param(system, 1))); |
| 400 | } | 474 | } |
| 401 | 475 | ||
| 402 | // Used by SendSyncRequest32 | 476 | // Used by SetThreadPriority32 |
| 477 | template <ResultCode func(Core::System&, Handle, u32)> | ||
| 478 | void SvcWrap32(Core::System& system) { | ||
| 479 | const u32 retval = | ||
| 480 | func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1))).raw; | ||
| 481 | FuncReturn(system, retval); | ||
| 482 | } | ||
| 483 | |||
| 484 | // Used by SetThreadCoreMask32 | ||
| 485 | template <ResultCode func(Core::System&, Handle, u32, u32, u32)> | ||
| 486 | void SvcWrap32(Core::System& system) { | ||
| 487 | const u32 retval = | ||
| 488 | func(system, static_cast<Handle>(Param(system, 0)), static_cast<u32>(Param(system, 1)), | ||
| 489 | static_cast<u32>(Param(system, 2)), static_cast<u32>(Param(system, 3))) | ||
| 490 | .raw; | ||
| 491 | FuncReturn(system, retval); | ||
| 492 | } | ||
| 493 | |||
| 494 | // Used by WaitProcessWideKeyAtomic32 | ||
| 495 | template <ResultCode func(Core::System&, u32, u32, Handle, u32, u32)> | ||
| 496 | void SvcWrap32(Core::System& system) { | ||
| 497 | const u32 retval = | ||
| 498 | func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)), | ||
| 499 | static_cast<Handle>(Param(system, 2)), static_cast<u32>(Param(system, 3)), | ||
| 500 | static_cast<u32>(Param(system, 4))) | ||
| 501 | .raw; | ||
| 502 | FuncReturn(system, retval); | ||
| 503 | } | ||
| 504 | |||
| 505 | // Used by WaitForAddress32 | ||
| 506 | template <ResultCode func(Core::System&, u32, u32, s32, u32, u32)> | ||
| 507 | void SvcWrap32(Core::System& system) { | ||
| 508 | const u32 retval = func(system, static_cast<u32>(Param(system, 0)), | ||
| 509 | static_cast<u32>(Param(system, 1)), static_cast<s32>(Param(system, 2)), | ||
| 510 | static_cast<u32>(Param(system, 3)), static_cast<u32>(Param(system, 4))) | ||
| 511 | .raw; | ||
| 512 | FuncReturn(system, retval); | ||
| 513 | } | ||
| 514 | |||
| 515 | // Used by SignalToAddress32 | ||
| 516 | template <ResultCode func(Core::System&, u32, u32, s32, s32)> | ||
| 517 | void SvcWrap32(Core::System& system) { | ||
| 518 | const u32 retval = | ||
| 519 | func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)), | ||
| 520 | static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3))) | ||
| 521 | .raw; | ||
| 522 | FuncReturn(system, retval); | ||
| 523 | } | ||
| 524 | |||
| 525 | // Used by SendSyncRequest32, ArbitrateUnlock32 | ||
| 403 | template <ResultCode func(Core::System&, u32)> | 526 | template <ResultCode func(Core::System&, u32)> |
| 404 | void SvcWrap32(Core::System& system) { | 527 | void SvcWrap32(Core::System& system) { |
| 405 | FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw); | 528 | FuncReturn(system, func(system, static_cast<u32>(Param(system, 0))).raw); |
| 406 | } | 529 | } |
| 407 | 530 | ||
| 531 | // Used by CreateTransferMemory32 | ||
| 532 | template <ResultCode func(Core::System&, Handle*, u32, u32, u32)> | ||
| 533 | void SvcWrap32(Core::System& system) { | ||
| 534 | Handle handle = 0; | ||
| 535 | const u32 retval = | ||
| 536 | func(system, &handle, Param32(system, 1), Param32(system, 2), Param32(system, 3)).raw; | ||
| 537 | system.CurrentArmInterface().SetReg(1, handle); | ||
| 538 | FuncReturn(system, retval); | ||
| 539 | } | ||
| 540 | |||
| 408 | // Used by WaitSynchronization32 | 541 | // Used by WaitSynchronization32 |
| 409 | template <ResultCode func(Core::System&, u32, u32, s32, u32, Handle*)> | 542 | template <ResultCode func(Core::System&, u32, u32, s32, u32, Handle*)> |
| 410 | void SvcWrap32(Core::System& system) { | 543 | void SvcWrap32(Core::System& system) { |
diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp index dc37fad1a..851b702a5 100644 --- a/src/core/hle/kernel/synchronization.cpp +++ b/src/core/hle/kernel/synchronization.cpp | |||
| @@ -10,78 +10,107 @@ | |||
| 10 | #include "core/hle/kernel/synchronization.h" | 10 | #include "core/hle/kernel/synchronization.h" |
| 11 | #include "core/hle/kernel/synchronization_object.h" | 11 | #include "core/hle/kernel/synchronization_object.h" |
| 12 | #include "core/hle/kernel/thread.h" | 12 | #include "core/hle/kernel/thread.h" |
| 13 | #include "core/hle/kernel/time_manager.h" | ||
| 13 | 14 | ||
| 14 | namespace Kernel { | 15 | namespace Kernel { |
| 15 | 16 | ||
| 16 | /// Default thread wakeup callback for WaitSynchronization | ||
| 17 | static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread, | ||
| 18 | std::shared_ptr<SynchronizationObject> object, | ||
| 19 | std::size_t index) { | ||
| 20 | ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch); | ||
| 21 | |||
| 22 | if (reason == ThreadWakeupReason::Timeout) { | ||
| 23 | thread->SetWaitSynchronizationResult(RESULT_TIMEOUT); | ||
| 24 | return true; | ||
| 25 | } | ||
| 26 | |||
| 27 | ASSERT(reason == ThreadWakeupReason::Signal); | ||
| 28 | thread->SetWaitSynchronizationResult(RESULT_SUCCESS); | ||
| 29 | thread->SetWaitSynchronizationOutput(static_cast<u32>(index)); | ||
| 30 | return true; | ||
| 31 | } | ||
| 32 | |||
| 33 | Synchronization::Synchronization(Core::System& system) : system{system} {} | 17 | Synchronization::Synchronization(Core::System& system) : system{system} {} |
| 34 | 18 | ||
| 35 | void Synchronization::SignalObject(SynchronizationObject& obj) const { | 19 | void Synchronization::SignalObject(SynchronizationObject& obj) const { |
| 20 | auto& kernel = system.Kernel(); | ||
| 21 | SchedulerLock lock(kernel); | ||
| 22 | auto& time_manager = kernel.TimeManager(); | ||
| 36 | if (obj.IsSignaled()) { | 23 | if (obj.IsSignaled()) { |
| 37 | obj.WakeupAllWaitingThreads(); | 24 | for (auto thread : obj.GetWaitingThreads()) { |
| 25 | if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) { | ||
| 26 | if (thread->GetStatus() != ThreadStatus::WaitHLEEvent) { | ||
| 27 | ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch); | ||
| 28 | ASSERT(thread->IsWaitingSync()); | ||
| 29 | } | ||
| 30 | thread->SetSynchronizationResults(&obj, RESULT_SUCCESS); | ||
| 31 | thread->ResumeFromWait(); | ||
| 32 | } | ||
| 33 | } | ||
| 34 | obj.ClearWaitingThreads(); | ||
| 38 | } | 35 | } |
| 39 | } | 36 | } |
| 40 | 37 | ||
| 41 | std::pair<ResultCode, Handle> Synchronization::WaitFor( | 38 | std::pair<ResultCode, Handle> Synchronization::WaitFor( |
| 42 | std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) { | 39 | std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) { |
| 40 | auto& kernel = system.Kernel(); | ||
| 43 | auto* const thread = system.CurrentScheduler().GetCurrentThread(); | 41 | auto* const thread = system.CurrentScheduler().GetCurrentThread(); |
| 44 | // Find the first object that is acquirable in the provided list of objects | 42 | Handle event_handle = InvalidHandle; |
| 45 | const auto itr = std::find_if(sync_objects.begin(), sync_objects.end(), | 43 | { |
| 46 | [thread](const std::shared_ptr<SynchronizationObject>& object) { | 44 | SchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds); |
| 47 | return object->IsSignaled(); | 45 | const auto itr = |
| 48 | }); | 46 | std::find_if(sync_objects.begin(), sync_objects.end(), |
| 49 | 47 | [thread](const std::shared_ptr<SynchronizationObject>& object) { | |
| 50 | if (itr != sync_objects.end()) { | 48 | return object->IsSignaled(); |
| 51 | // We found a ready object, acquire it and set the result value | 49 | }); |
| 52 | SynchronizationObject* object = itr->get(); | 50 | |
| 53 | object->Acquire(thread); | 51 | if (itr != sync_objects.end()) { |
| 54 | const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr)); | 52 | // We found a ready object, acquire it and set the result value |
| 55 | return {RESULT_SUCCESS, index}; | 53 | SynchronizationObject* object = itr->get(); |
| 54 | object->Acquire(thread); | ||
| 55 | const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr)); | ||
| 56 | lock.CancelSleep(); | ||
| 57 | return {RESULT_SUCCESS, index}; | ||
| 58 | } | ||
| 59 | |||
| 60 | if (nano_seconds == 0) { | ||
| 61 | lock.CancelSleep(); | ||
| 62 | return {RESULT_TIMEOUT, InvalidHandle}; | ||
| 63 | } | ||
| 64 | |||
| 65 | if (thread->IsPendingTermination()) { | ||
| 66 | lock.CancelSleep(); | ||
| 67 | return {ERR_THREAD_TERMINATING, InvalidHandle}; | ||
| 68 | } | ||
| 69 | |||
| 70 | if (thread->IsSyncCancelled()) { | ||
| 71 | thread->SetSyncCancelled(false); | ||
| 72 | lock.CancelSleep(); | ||
| 73 | return {ERR_SYNCHRONIZATION_CANCELED, InvalidHandle}; | ||
| 74 | } | ||
| 75 | |||
| 76 | for (auto& object : sync_objects) { | ||
| 77 | object->AddWaitingThread(SharedFrom(thread)); | ||
| 78 | } | ||
| 79 | |||
| 80 | thread->SetSynchronizationObjects(&sync_objects); | ||
| 81 | thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT); | ||
| 82 | thread->SetStatus(ThreadStatus::WaitSynch); | ||
| 83 | thread->SetWaitingSync(true); | ||
| 56 | } | 84 | } |
| 85 | thread->SetWaitingSync(false); | ||
| 57 | 86 | ||
| 58 | // No objects were ready to be acquired, prepare to suspend the thread. | 87 | if (event_handle != InvalidHandle) { |
| 59 | 88 | auto& time_manager = kernel.TimeManager(); | |
| 60 | // If a timeout value of 0 was provided, just return the Timeout error code instead of | 89 | time_manager.UnscheduleTimeEvent(event_handle); |
| 61 | // suspending the thread. | ||
| 62 | if (nano_seconds == 0) { | ||
| 63 | return {RESULT_TIMEOUT, InvalidHandle}; | ||
| 64 | } | 90 | } |
| 65 | 91 | ||
| 66 | if (thread->IsSyncCancelled()) { | 92 | { |
| 67 | thread->SetSyncCancelled(false); | 93 | SchedulerLock lock(kernel); |
| 68 | return {ERR_SYNCHRONIZATION_CANCELED, InvalidHandle}; | 94 | ResultCode signaling_result = thread->GetSignalingResult(); |
| 95 | SynchronizationObject* signaling_object = thread->GetSignalingObject(); | ||
| 96 | thread->SetSynchronizationObjects(nullptr); | ||
| 97 | auto shared_thread = SharedFrom(thread); | ||
| 98 | for (auto& obj : sync_objects) { | ||
| 99 | obj->RemoveWaitingThread(shared_thread); | ||
| 100 | } | ||
| 101 | if (signaling_object != nullptr) { | ||
| 102 | const auto itr = std::find_if( | ||
| 103 | sync_objects.begin(), sync_objects.end(), | ||
| 104 | [signaling_object](const std::shared_ptr<SynchronizationObject>& object) { | ||
| 105 | return object.get() == signaling_object; | ||
| 106 | }); | ||
| 107 | ASSERT(itr != sync_objects.end()); | ||
| 108 | signaling_object->Acquire(thread); | ||
| 109 | const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr)); | ||
| 110 | return {signaling_result, index}; | ||
| 111 | } | ||
| 112 | return {signaling_result, -1}; | ||
| 69 | } | 113 | } |
| 70 | |||
| 71 | for (auto& object : sync_objects) { | ||
| 72 | object->AddWaitingThread(SharedFrom(thread)); | ||
| 73 | } | ||
| 74 | |||
| 75 | thread->SetSynchronizationObjects(std::move(sync_objects)); | ||
| 76 | thread->SetStatus(ThreadStatus::WaitSynch); | ||
| 77 | |||
| 78 | // Create an event to wake the thread up after the specified nanosecond delay has passed | ||
| 79 | thread->WakeAfterDelay(nano_seconds); | ||
| 80 | thread->SetWakeupCallback(DefaultThreadWakeupCallback); | ||
| 81 | |||
| 82 | system.PrepareReschedule(thread->GetProcessorID()); | ||
| 83 | |||
| 84 | return {RESULT_TIMEOUT, InvalidHandle}; | ||
| 85 | } | 114 | } |
| 86 | 115 | ||
| 87 | } // namespace Kernel | 116 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/synchronization_object.cpp b/src/core/hle/kernel/synchronization_object.cpp index 43f3eef18..ba4d39157 100644 --- a/src/core/hle/kernel/synchronization_object.cpp +++ b/src/core/hle/kernel/synchronization_object.cpp | |||
| @@ -38,68 +38,8 @@ void SynchronizationObject::RemoveWaitingThread(std::shared_ptr<Thread> thread) | |||
| 38 | waiting_threads.erase(itr); | 38 | waiting_threads.erase(itr); |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | std::shared_ptr<Thread> SynchronizationObject::GetHighestPriorityReadyThread() const { | 41 | void SynchronizationObject::ClearWaitingThreads() { |
| 42 | Thread* candidate = nullptr; | 42 | waiting_threads.clear(); |
| 43 | u32 candidate_priority = THREADPRIO_LOWEST + 1; | ||
| 44 | |||
| 45 | for (const auto& thread : waiting_threads) { | ||
| 46 | const ThreadStatus thread_status = thread->GetStatus(); | ||
| 47 | |||
| 48 | // The list of waiting threads must not contain threads that are not waiting to be awakened. | ||
| 49 | ASSERT_MSG(thread_status == ThreadStatus::WaitSynch || | ||
| 50 | thread_status == ThreadStatus::WaitHLEEvent, | ||
| 51 | "Inconsistent thread statuses in waiting_threads"); | ||
| 52 | |||
| 53 | if (thread->GetPriority() >= candidate_priority) | ||
| 54 | continue; | ||
| 55 | |||
| 56 | if (ShouldWait(thread.get())) | ||
| 57 | continue; | ||
| 58 | |||
| 59 | candidate = thread.get(); | ||
| 60 | candidate_priority = thread->GetPriority(); | ||
| 61 | } | ||
| 62 | |||
| 63 | return SharedFrom(candidate); | ||
| 64 | } | ||
| 65 | |||
| 66 | void SynchronizationObject::WakeupWaitingThread(std::shared_ptr<Thread> thread) { | ||
| 67 | ASSERT(!ShouldWait(thread.get())); | ||
| 68 | |||
| 69 | if (!thread) { | ||
| 70 | return; | ||
| 71 | } | ||
| 72 | |||
| 73 | if (thread->IsSleepingOnWait()) { | ||
| 74 | for (const auto& object : thread->GetSynchronizationObjects()) { | ||
| 75 | ASSERT(!object->ShouldWait(thread.get())); | ||
| 76 | object->Acquire(thread.get()); | ||
| 77 | } | ||
| 78 | } else { | ||
| 79 | Acquire(thread.get()); | ||
| 80 | } | ||
| 81 | |||
| 82 | const std::size_t index = thread->GetSynchronizationObjectIndex(SharedFrom(this)); | ||
| 83 | |||
| 84 | thread->ClearSynchronizationObjects(); | ||
| 85 | |||
| 86 | thread->CancelWakeupTimer(); | ||
| 87 | |||
| 88 | bool resume = true; | ||
| 89 | if (thread->HasWakeupCallback()) { | ||
| 90 | resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Signal, thread, SharedFrom(this), | ||
| 91 | index); | ||
| 92 | } | ||
| 93 | if (resume) { | ||
| 94 | thread->ResumeFromWait(); | ||
| 95 | kernel.PrepareReschedule(thread->GetProcessorID()); | ||
| 96 | } | ||
| 97 | } | ||
| 98 | |||
| 99 | void SynchronizationObject::WakeupAllWaitingThreads() { | ||
| 100 | while (auto thread = GetHighestPriorityReadyThread()) { | ||
| 101 | WakeupWaitingThread(thread); | ||
| 102 | } | ||
| 103 | } | 43 | } |
| 104 | 44 | ||
| 105 | const std::vector<std::shared_ptr<Thread>>& SynchronizationObject::GetWaitingThreads() const { | 45 | const std::vector<std::shared_ptr<Thread>>& SynchronizationObject::GetWaitingThreads() const { |
diff --git a/src/core/hle/kernel/synchronization_object.h b/src/core/hle/kernel/synchronization_object.h index 741c31faf..f89b24204 100644 --- a/src/core/hle/kernel/synchronization_object.h +++ b/src/core/hle/kernel/synchronization_object.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | namespace Kernel { | 12 | namespace Kernel { |
| 13 | 13 | ||
| 14 | class KernelCore; | 14 | class KernelCore; |
| 15 | class Synchronization; | ||
| 15 | class Thread; | 16 | class Thread; |
| 16 | 17 | ||
| 17 | /// Class that represents a Kernel object that a thread can be waiting on | 18 | /// Class that represents a Kernel object that a thread can be waiting on |
| @@ -49,24 +50,11 @@ public: | |||
| 49 | */ | 50 | */ |
| 50 | void RemoveWaitingThread(std::shared_ptr<Thread> thread); | 51 | void RemoveWaitingThread(std::shared_ptr<Thread> thread); |
| 51 | 52 | ||
| 52 | /** | ||
| 53 | * Wake up all threads waiting on this object that can be awoken, in priority order, | ||
| 54 | * and set the synchronization result and output of the thread. | ||
| 55 | */ | ||
| 56 | void WakeupAllWaitingThreads(); | ||
| 57 | |||
| 58 | /** | ||
| 59 | * Wakes up a single thread waiting on this object. | ||
| 60 | * @param thread Thread that is waiting on this object to wakeup. | ||
| 61 | */ | ||
| 62 | void WakeupWaitingThread(std::shared_ptr<Thread> thread); | ||
| 63 | |||
| 64 | /// Obtains the highest priority thread that is ready to run from this object's waiting list. | ||
| 65 | std::shared_ptr<Thread> GetHighestPriorityReadyThread() const; | ||
| 66 | |||
| 67 | /// Get a const reference to the waiting threads list for debug use | 53 | /// Get a const reference to the waiting threads list for debug use |
| 68 | const std::vector<std::shared_ptr<Thread>>& GetWaitingThreads() const; | 54 | const std::vector<std::shared_ptr<Thread>>& GetWaitingThreads() const; |
| 69 | 55 | ||
| 56 | void ClearWaitingThreads(); | ||
| 57 | |||
| 70 | protected: | 58 | protected: |
| 71 | bool is_signaled{}; // Tells if this sync object is signalled; | 59 | bool is_signaled{}; // Tells if this sync object is signalled; |
| 72 | 60 | ||
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index db7f379ac..2b1092697 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -9,12 +9,21 @@ | |||
| 9 | 9 | ||
| 10 | #include "common/assert.h" | 10 | #include "common/assert.h" |
| 11 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 12 | #include "common/fiber.h" | ||
| 12 | #include "common/logging/log.h" | 13 | #include "common/logging/log.h" |
| 13 | #include "common/thread_queue_list.h" | 14 | #include "common/thread_queue_list.h" |
| 14 | #include "core/arm/arm_interface.h" | 15 | #include "core/arm/arm_interface.h" |
| 16 | #ifdef ARCHITECTURE_x86_64 | ||
| 17 | #include "core/arm/dynarmic/arm_dynarmic_32.h" | ||
| 18 | #include "core/arm/dynarmic/arm_dynarmic_64.h" | ||
| 19 | #endif | ||
| 20 | #include "core/arm/cpu_interrupt_handler.h" | ||
| 21 | #include "core/arm/exclusive_monitor.h" | ||
| 22 | #include "core/arm/unicorn/arm_unicorn.h" | ||
| 15 | #include "core/core.h" | 23 | #include "core/core.h" |
| 16 | #include "core/core_timing.h" | 24 | #include "core/core_timing.h" |
| 17 | #include "core/core_timing_util.h" | 25 | #include "core/core_timing_util.h" |
| 26 | #include "core/cpu_manager.h" | ||
| 18 | #include "core/hardware_properties.h" | 27 | #include "core/hardware_properties.h" |
| 19 | #include "core/hle/kernel/errors.h" | 28 | #include "core/hle/kernel/errors.h" |
| 20 | #include "core/hle/kernel/handle_table.h" | 29 | #include "core/hle/kernel/handle_table.h" |
| @@ -23,6 +32,7 @@ | |||
| 23 | #include "core/hle/kernel/process.h" | 32 | #include "core/hle/kernel/process.h" |
| 24 | #include "core/hle/kernel/scheduler.h" | 33 | #include "core/hle/kernel/scheduler.h" |
| 25 | #include "core/hle/kernel/thread.h" | 34 | #include "core/hle/kernel/thread.h" |
| 35 | #include "core/hle/kernel/time_manager.h" | ||
| 26 | #include "core/hle/result.h" | 36 | #include "core/hle/result.h" |
| 27 | #include "core/memory.h" | 37 | #include "core/memory.h" |
| 28 | 38 | ||
| @@ -44,46 +54,26 @@ Thread::Thread(KernelCore& kernel) : SynchronizationObject{kernel} {} | |||
| 44 | Thread::~Thread() = default; | 54 | Thread::~Thread() = default; |
| 45 | 55 | ||
| 46 | void Thread::Stop() { | 56 | void Thread::Stop() { |
| 47 | // Cancel any outstanding wakeup events for this thread | 57 | { |
| 48 | Core::System::GetInstance().CoreTiming().UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(), | 58 | SchedulerLock lock(kernel); |
| 49 | global_handle); | 59 | SetStatus(ThreadStatus::Dead); |
| 50 | kernel.GlobalHandleTable().Close(global_handle); | 60 | Signal(); |
| 51 | global_handle = 0; | 61 | kernel.GlobalHandleTable().Close(global_handle); |
| 52 | SetStatus(ThreadStatus::Dead); | ||
| 53 | Signal(); | ||
| 54 | |||
| 55 | // Clean up any dangling references in objects that this thread was waiting for | ||
| 56 | for (auto& wait_object : wait_objects) { | ||
| 57 | wait_object->RemoveWaitingThread(SharedFrom(this)); | ||
| 58 | } | ||
| 59 | wait_objects.clear(); | ||
| 60 | |||
| 61 | owner_process->UnregisterThread(this); | ||
| 62 | |||
| 63 | // Mark the TLS slot in the thread's page as free. | ||
| 64 | owner_process->FreeTLSRegion(tls_address); | ||
| 65 | } | ||
| 66 | |||
| 67 | void Thread::WakeAfterDelay(s64 nanoseconds) { | ||
| 68 | // Don't schedule a wakeup if the thread wants to wait forever | ||
| 69 | if (nanoseconds == -1) | ||
| 70 | return; | ||
| 71 | 62 | ||
| 72 | // This function might be called from any thread so we have to be cautious and use the | 63 | if (owner_process) { |
| 73 | // thread-safe version of ScheduleEvent. | 64 | owner_process->UnregisterThread(this); |
| 74 | const s64 cycles = Core::Timing::nsToCycles(std::chrono::nanoseconds{nanoseconds}); | ||
| 75 | Core::System::GetInstance().CoreTiming().ScheduleEvent( | ||
| 76 | cycles, kernel.ThreadWakeupCallbackEventType(), global_handle); | ||
| 77 | } | ||
| 78 | 65 | ||
| 79 | void Thread::CancelWakeupTimer() { | 66 | // Mark the TLS slot in the thread's page as free. |
| 80 | Core::System::GetInstance().CoreTiming().UnscheduleEvent(kernel.ThreadWakeupCallbackEventType(), | 67 | owner_process->FreeTLSRegion(tls_address); |
| 81 | global_handle); | 68 | } |
| 69 | arm_interface.reset(); | ||
| 70 | has_exited = true; | ||
| 71 | } | ||
| 72 | global_handle = 0; | ||
| 82 | } | 73 | } |
| 83 | 74 | ||
| 84 | void Thread::ResumeFromWait() { | 75 | void Thread::ResumeFromWait() { |
| 85 | ASSERT_MSG(wait_objects.empty(), "Thread is waking up while waiting for objects"); | 76 | SchedulerLock lock(kernel); |
| 86 | |||
| 87 | switch (status) { | 77 | switch (status) { |
| 88 | case ThreadStatus::Paused: | 78 | case ThreadStatus::Paused: |
| 89 | case ThreadStatus::WaitSynch: | 79 | case ThreadStatus::WaitSynch: |
| @@ -99,7 +89,7 @@ void Thread::ResumeFromWait() { | |||
| 99 | case ThreadStatus::Ready: | 89 | case ThreadStatus::Ready: |
| 100 | // The thread's wakeup callback must have already been cleared when the thread was first | 90 | // The thread's wakeup callback must have already been cleared when the thread was first |
| 101 | // awoken. | 91 | // awoken. |
| 102 | ASSERT(wakeup_callback == nullptr); | 92 | ASSERT(hle_callback == nullptr); |
| 103 | // If the thread is waiting on multiple wait objects, it might be awoken more than once | 93 | // If the thread is waiting on multiple wait objects, it might be awoken more than once |
| 104 | // before actually resuming. We can ignore subsequent wakeups if the thread status has | 94 | // before actually resuming. We can ignore subsequent wakeups if the thread status has |
| 105 | // already been set to ThreadStatus::Ready. | 95 | // already been set to ThreadStatus::Ready. |
| @@ -115,24 +105,31 @@ void Thread::ResumeFromWait() { | |||
| 115 | return; | 105 | return; |
| 116 | } | 106 | } |
| 117 | 107 | ||
| 118 | wakeup_callback = nullptr; | 108 | SetStatus(ThreadStatus::Ready); |
| 109 | } | ||
| 110 | |||
| 111 | void Thread::OnWakeUp() { | ||
| 112 | SchedulerLock lock(kernel); | ||
| 119 | 113 | ||
| 120 | if (activity == ThreadActivity::Paused) { | 114 | SetStatus(ThreadStatus::Ready); |
| 121 | SetStatus(ThreadStatus::Paused); | 115 | } |
| 122 | return; | ||
| 123 | } | ||
| 124 | 116 | ||
| 117 | ResultCode Thread::Start() { | ||
| 118 | SchedulerLock lock(kernel); | ||
| 125 | SetStatus(ThreadStatus::Ready); | 119 | SetStatus(ThreadStatus::Ready); |
| 120 | return RESULT_SUCCESS; | ||
| 126 | } | 121 | } |
| 127 | 122 | ||
| 128 | void Thread::CancelWait() { | 123 | void Thread::CancelWait() { |
| 129 | if (GetSchedulingStatus() != ThreadSchedStatus::Paused) { | 124 | SchedulerLock lock(kernel); |
| 125 | if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) { | ||
| 130 | is_sync_cancelled = true; | 126 | is_sync_cancelled = true; |
| 131 | return; | 127 | return; |
| 132 | } | 128 | } |
| 129 | // TODO(Blinkhawk): Implement cancel of server session | ||
| 133 | is_sync_cancelled = false; | 130 | is_sync_cancelled = false; |
| 134 | SetWaitSynchronizationResult(ERR_SYNCHRONIZATION_CANCELED); | 131 | SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED); |
| 135 | ResumeFromWait(); | 132 | SetStatus(ThreadStatus::Ready); |
| 136 | } | 133 | } |
| 137 | 134 | ||
| 138 | static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, | 135 | static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, |
| @@ -153,12 +150,29 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, | |||
| 153 | context.fpcr = 0; | 150 | context.fpcr = 0; |
| 154 | } | 151 | } |
| 155 | 152 | ||
| 156 | ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::string name, | 153 | std::shared_ptr<Common::Fiber>& Thread::GetHostContext() { |
| 157 | VAddr entry_point, u32 priority, u64 arg, | 154 | return host_context; |
| 158 | s32 processor_id, VAddr stack_top, | 155 | } |
| 159 | Process& owner_process) { | 156 | |
| 157 | ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags, | ||
| 158 | std::string name, VAddr entry_point, u32 priority, | ||
| 159 | u64 arg, s32 processor_id, VAddr stack_top, | ||
| 160 | Process* owner_process) { | ||
| 161 | std::function<void(void*)> init_func = system.GetCpuManager().GetGuestThreadStartFunc(); | ||
| 162 | void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); | ||
| 163 | return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top, | ||
| 164 | owner_process, std::move(init_func), init_func_parameter); | ||
| 165 | } | ||
| 166 | |||
| 167 | ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags, | ||
| 168 | std::string name, VAddr entry_point, u32 priority, | ||
| 169 | u64 arg, s32 processor_id, VAddr stack_top, | ||
| 170 | Process* owner_process, | ||
| 171 | std::function<void(void*)>&& thread_start_func, | ||
| 172 | void* thread_start_parameter) { | ||
| 173 | auto& kernel = system.Kernel(); | ||
| 160 | // Check if priority is in ranged. Lowest priority -> highest priority id. | 174 | // Check if priority is in ranged. Lowest priority -> highest priority id. |
| 161 | if (priority > THREADPRIO_LOWEST) { | 175 | if (priority > THREADPRIO_LOWEST && ((type_flags & THREADTYPE_IDLE) == 0)) { |
| 162 | LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority); | 176 | LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority); |
| 163 | return ERR_INVALID_THREAD_PRIORITY; | 177 | return ERR_INVALID_THREAD_PRIORITY; |
| 164 | } | 178 | } |
| @@ -168,11 +182,12 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::strin | |||
| 168 | return ERR_INVALID_PROCESSOR_ID; | 182 | return ERR_INVALID_PROCESSOR_ID; |
| 169 | } | 183 | } |
| 170 | 184 | ||
| 171 | auto& system = Core::System::GetInstance(); | 185 | if (owner_process) { |
| 172 | if (!system.Memory().IsValidVirtualAddress(owner_process, entry_point)) { | 186 | if (!system.Memory().IsValidVirtualAddress(*owner_process, entry_point)) { |
| 173 | LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point); | 187 | LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point); |
| 174 | // TODO (bunnei): Find the correct error code to use here | 188 | // TODO (bunnei): Find the correct error code to use here |
| 175 | return RESULT_UNKNOWN; | 189 | return RESULT_UNKNOWN; |
| 190 | } | ||
| 176 | } | 191 | } |
| 177 | 192 | ||
| 178 | std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel); | 193 | std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel); |
| @@ -183,51 +198,82 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::strin | |||
| 183 | thread->stack_top = stack_top; | 198 | thread->stack_top = stack_top; |
| 184 | thread->tpidr_el0 = 0; | 199 | thread->tpidr_el0 = 0; |
| 185 | thread->nominal_priority = thread->current_priority = priority; | 200 | thread->nominal_priority = thread->current_priority = priority; |
| 186 | thread->last_running_ticks = system.CoreTiming().GetTicks(); | 201 | thread->last_running_ticks = 0; |
| 187 | thread->processor_id = processor_id; | 202 | thread->processor_id = processor_id; |
| 188 | thread->ideal_core = processor_id; | 203 | thread->ideal_core = processor_id; |
| 189 | thread->affinity_mask = 1ULL << processor_id; | 204 | thread->affinity_mask = 1ULL << processor_id; |
| 190 | thread->wait_objects.clear(); | 205 | thread->wait_objects = nullptr; |
| 191 | thread->mutex_wait_address = 0; | 206 | thread->mutex_wait_address = 0; |
| 192 | thread->condvar_wait_address = 0; | 207 | thread->condvar_wait_address = 0; |
| 193 | thread->wait_handle = 0; | 208 | thread->wait_handle = 0; |
| 194 | thread->name = std::move(name); | 209 | thread->name = std::move(name); |
| 195 | thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap(); | 210 | thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap(); |
| 196 | thread->owner_process = &owner_process; | 211 | thread->owner_process = owner_process; |
| 197 | auto& scheduler = kernel.GlobalScheduler(); | 212 | thread->type = type_flags; |
| 198 | scheduler.AddThread(thread); | 213 | if ((type_flags & THREADTYPE_IDLE) == 0) { |
| 199 | thread->tls_address = thread->owner_process->CreateTLSRegion(); | 214 | auto& scheduler = kernel.GlobalScheduler(); |
| 200 | 215 | scheduler.AddThread(thread); | |
| 201 | thread->owner_process->RegisterThread(thread.get()); | 216 | } |
| 217 | if (owner_process) { | ||
| 218 | thread->tls_address = thread->owner_process->CreateTLSRegion(); | ||
| 219 | thread->owner_process->RegisterThread(thread.get()); | ||
| 220 | } else { | ||
| 221 | thread->tls_address = 0; | ||
| 222 | } | ||
| 223 | // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used | ||
| 224 | // to initialize the context | ||
| 225 | thread->arm_interface.reset(); | ||
| 226 | if ((type_flags & THREADTYPE_HLE) == 0) { | ||
| 227 | #ifdef ARCHITECTURE_x86_64 | ||
| 228 | if (owner_process && !owner_process->Is64BitProcess()) { | ||
| 229 | thread->arm_interface = std::make_unique<Core::ARM_Dynarmic_32>( | ||
| 230 | system, kernel.Interrupts(), kernel.IsMulticore(), kernel.GetExclusiveMonitor(), | ||
| 231 | processor_id); | ||
| 232 | } else { | ||
| 233 | thread->arm_interface = std::make_unique<Core::ARM_Dynarmic_64>( | ||
| 234 | system, kernel.Interrupts(), kernel.IsMulticore(), kernel.GetExclusiveMonitor(), | ||
| 235 | processor_id); | ||
| 236 | } | ||
| 202 | 237 | ||
| 203 | ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top), | 238 | #else |
| 204 | static_cast<u32>(entry_point), static_cast<u32>(arg)); | 239 | if (owner_process && !owner_process->Is64BitProcess()) { |
| 205 | ResetThreadContext64(thread->context_64, stack_top, entry_point, arg); | 240 | thread->arm_interface = std::make_shared<Core::ARM_Unicorn>( |
| 241 | system, kernel.Interrupts(), kernel.IsMulticore(), ARM_Unicorn::Arch::AArch32, | ||
| 242 | processor_id); | ||
| 243 | } else { | ||
| 244 | thread->arm_interface = std::make_shared<Core::ARM_Unicorn>( | ||
| 245 | system, kernel.Interrupts(), kernel.IsMulticore(), ARM_Unicorn::Arch::AArch64, | ||
| 246 | processor_id); | ||
| 247 | } | ||
| 248 | LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available"); | ||
| 249 | #endif | ||
| 250 | ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top), | ||
| 251 | static_cast<u32>(entry_point), static_cast<u32>(arg)); | ||
| 252 | ResetThreadContext64(thread->context_64, stack_top, entry_point, arg); | ||
| 253 | } | ||
| 254 | thread->host_context = | ||
| 255 | std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter); | ||
| 206 | 256 | ||
| 207 | return MakeResult<std::shared_ptr<Thread>>(std::move(thread)); | 257 | return MakeResult<std::shared_ptr<Thread>>(std::move(thread)); |
| 208 | } | 258 | } |
| 209 | 259 | ||
| 210 | void Thread::SetPriority(u32 priority) { | 260 | void Thread::SetPriority(u32 priority) { |
| 261 | SchedulerLock lock(kernel); | ||
| 211 | ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, | 262 | ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, |
| 212 | "Invalid priority value."); | 263 | "Invalid priority value."); |
| 213 | nominal_priority = priority; | 264 | nominal_priority = priority; |
| 214 | UpdatePriority(); | 265 | UpdatePriority(); |
| 215 | } | 266 | } |
| 216 | 267 | ||
| 217 | void Thread::SetWaitSynchronizationResult(ResultCode result) { | 268 | void Thread::SetSynchronizationResults(SynchronizationObject* object, ResultCode result) { |
| 218 | context_32.cpu_registers[0] = result.raw; | 269 | signaling_object = object; |
| 219 | context_64.cpu_registers[0] = result.raw; | 270 | signaling_result = result; |
| 220 | } | ||
| 221 | |||
| 222 | void Thread::SetWaitSynchronizationOutput(s32 output) { | ||
| 223 | context_32.cpu_registers[1] = output; | ||
| 224 | context_64.cpu_registers[1] = output; | ||
| 225 | } | 271 | } |
| 226 | 272 | ||
| 227 | s32 Thread::GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const { | 273 | s32 Thread::GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const { |
| 228 | ASSERT_MSG(!wait_objects.empty(), "Thread is not waiting for anything"); | 274 | ASSERT_MSG(!wait_objects->empty(), "Thread is not waiting for anything"); |
| 229 | const auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object); | 275 | const auto match = std::find(wait_objects->rbegin(), wait_objects->rend(), object); |
| 230 | return static_cast<s32>(std::distance(match, wait_objects.rend()) - 1); | 276 | return static_cast<s32>(std::distance(match, wait_objects->rend()) - 1); |
| 231 | } | 277 | } |
| 232 | 278 | ||
| 233 | VAddr Thread::GetCommandBufferAddress() const { | 279 | VAddr Thread::GetCommandBufferAddress() const { |
| @@ -236,6 +282,14 @@ VAddr Thread::GetCommandBufferAddress() const { | |||
| 236 | return GetTLSAddress() + command_header_offset; | 282 | return GetTLSAddress() + command_header_offset; |
| 237 | } | 283 | } |
| 238 | 284 | ||
| 285 | Core::ARM_Interface& Thread::ArmInterface() { | ||
| 286 | return *arm_interface; | ||
| 287 | } | ||
| 288 | |||
| 289 | const Core::ARM_Interface& Thread::ArmInterface() const { | ||
| 290 | return *arm_interface; | ||
| 291 | } | ||
| 292 | |||
| 239 | void Thread::SetStatus(ThreadStatus new_status) { | 293 | void Thread::SetStatus(ThreadStatus new_status) { |
| 240 | if (new_status == status) { | 294 | if (new_status == status) { |
| 241 | return; | 295 | return; |
| @@ -257,10 +311,6 @@ void Thread::SetStatus(ThreadStatus new_status) { | |||
| 257 | break; | 311 | break; |
| 258 | } | 312 | } |
| 259 | 313 | ||
| 260 | if (status == ThreadStatus::Running) { | ||
| 261 | last_running_ticks = Core::System::GetInstance().CoreTiming().GetTicks(); | ||
| 262 | } | ||
| 263 | |||
| 264 | status = new_status; | 314 | status = new_status; |
| 265 | } | 315 | } |
| 266 | 316 | ||
| @@ -341,75 +391,116 @@ void Thread::UpdatePriority() { | |||
| 341 | lock_owner->UpdatePriority(); | 391 | lock_owner->UpdatePriority(); |
| 342 | } | 392 | } |
| 343 | 393 | ||
| 344 | void Thread::ChangeCore(u32 core, u64 mask) { | ||
| 345 | SetCoreAndAffinityMask(core, mask); | ||
| 346 | } | ||
| 347 | |||
| 348 | bool Thread::AllSynchronizationObjectsReady() const { | 394 | bool Thread::AllSynchronizationObjectsReady() const { |
| 349 | return std::none_of(wait_objects.begin(), wait_objects.end(), | 395 | return std::none_of(wait_objects->begin(), wait_objects->end(), |
| 350 | [this](const std::shared_ptr<SynchronizationObject>& object) { | 396 | [this](const std::shared_ptr<SynchronizationObject>& object) { |
| 351 | return object->ShouldWait(this); | 397 | return object->ShouldWait(this); |
| 352 | }); | 398 | }); |
| 353 | } | 399 | } |
| 354 | 400 | ||
| 355 | bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread, | 401 | bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) { |
| 356 | std::shared_ptr<SynchronizationObject> object, | 402 | ASSERT(hle_callback); |
| 357 | std::size_t index) { | 403 | return hle_callback(std::move(thread)); |
| 358 | ASSERT(wakeup_callback); | ||
| 359 | return wakeup_callback(reason, std::move(thread), std::move(object), index); | ||
| 360 | } | 404 | } |
| 361 | 405 | ||
| 362 | void Thread::SetActivity(ThreadActivity value) { | 406 | ResultCode Thread::SetActivity(ThreadActivity value) { |
| 363 | activity = value; | 407 | SchedulerLock lock(kernel); |
| 408 | |||
| 409 | auto sched_status = GetSchedulingStatus(); | ||
| 410 | |||
| 411 | if (sched_status != ThreadSchedStatus::Runnable && sched_status != ThreadSchedStatus::Paused) { | ||
| 412 | return ERR_INVALID_STATE; | ||
| 413 | } | ||
| 414 | |||
| 415 | if (IsPendingTermination()) { | ||
| 416 | return RESULT_SUCCESS; | ||
| 417 | } | ||
| 364 | 418 | ||
| 365 | if (value == ThreadActivity::Paused) { | 419 | if (value == ThreadActivity::Paused) { |
| 366 | // Set status if not waiting | 420 | if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) != 0) { |
| 367 | if (status == ThreadStatus::Ready || status == ThreadStatus::Running) { | 421 | return ERR_INVALID_STATE; |
| 368 | SetStatus(ThreadStatus::Paused); | 422 | } |
| 369 | kernel.PrepareReschedule(processor_id); | 423 | AddSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag); |
| 424 | } else { | ||
| 425 | if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) == 0) { | ||
| 426 | return ERR_INVALID_STATE; | ||
| 370 | } | 427 | } |
| 371 | } else if (status == ThreadStatus::Paused) { | 428 | RemoveSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag); |
| 372 | // Ready to reschedule | ||
| 373 | ResumeFromWait(); | ||
| 374 | } | 429 | } |
| 430 | return RESULT_SUCCESS; | ||
| 375 | } | 431 | } |
| 376 | 432 | ||
| 377 | void Thread::Sleep(s64 nanoseconds) { | 433 | ResultCode Thread::Sleep(s64 nanoseconds) { |
| 378 | // Sleep current thread and check for next thread to schedule | 434 | Handle event_handle{}; |
| 379 | SetStatus(ThreadStatus::WaitSleep); | 435 | { |
| 436 | SchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); | ||
| 437 | SetStatus(ThreadStatus::WaitSleep); | ||
| 438 | } | ||
| 380 | 439 | ||
| 381 | // Create an event to wake the thread up after the specified nanosecond delay has passed | 440 | if (event_handle != InvalidHandle) { |
| 382 | WakeAfterDelay(nanoseconds); | 441 | auto& time_manager = kernel.TimeManager(); |
| 442 | time_manager.UnscheduleTimeEvent(event_handle); | ||
| 443 | } | ||
| 444 | return RESULT_SUCCESS; | ||
| 445 | } | ||
| 446 | |||
| 447 | std::pair<ResultCode, bool> Thread::YieldSimple() { | ||
| 448 | bool is_redundant = false; | ||
| 449 | { | ||
| 450 | SchedulerLock lock(kernel); | ||
| 451 | is_redundant = kernel.GlobalScheduler().YieldThread(this); | ||
| 452 | } | ||
| 453 | return {RESULT_SUCCESS, is_redundant}; | ||
| 454 | } | ||
| 455 | |||
| 456 | std::pair<ResultCode, bool> Thread::YieldAndBalanceLoad() { | ||
| 457 | bool is_redundant = false; | ||
| 458 | { | ||
| 459 | SchedulerLock lock(kernel); | ||
| 460 | is_redundant = kernel.GlobalScheduler().YieldThreadAndBalanceLoad(this); | ||
| 461 | } | ||
| 462 | return {RESULT_SUCCESS, is_redundant}; | ||
| 383 | } | 463 | } |
| 384 | 464 | ||
| 385 | bool Thread::YieldSimple() { | 465 | std::pair<ResultCode, bool> Thread::YieldAndWaitForLoadBalancing() { |
| 386 | auto& scheduler = kernel.GlobalScheduler(); | 466 | bool is_redundant = false; |
| 387 | return scheduler.YieldThread(this); | 467 | { |
| 468 | SchedulerLock lock(kernel); | ||
| 469 | is_redundant = kernel.GlobalScheduler().YieldThreadAndWaitForLoadBalancing(this); | ||
| 470 | } | ||
| 471 | return {RESULT_SUCCESS, is_redundant}; | ||
| 388 | } | 472 | } |
| 389 | 473 | ||
| 390 | bool Thread::YieldAndBalanceLoad() { | 474 | void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { |
| 391 | auto& scheduler = kernel.GlobalScheduler(); | 475 | const u32 old_state = scheduling_state; |
| 392 | return scheduler.YieldThreadAndBalanceLoad(this); | 476 | pausing_state |= static_cast<u32>(flag); |
| 477 | const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); | ||
| 478 | scheduling_state = base_scheduling | pausing_state; | ||
| 479 | kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); | ||
| 393 | } | 480 | } |
| 394 | 481 | ||
| 395 | bool Thread::YieldAndWaitForLoadBalancing() { | 482 | void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { |
| 396 | auto& scheduler = kernel.GlobalScheduler(); | 483 | const u32 old_state = scheduling_state; |
| 397 | return scheduler.YieldThreadAndWaitForLoadBalancing(this); | 484 | pausing_state &= ~static_cast<u32>(flag); |
| 485 | const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); | ||
| 486 | scheduling_state = base_scheduling | pausing_state; | ||
| 487 | kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); | ||
| 398 | } | 488 | } |
| 399 | 489 | ||
| 400 | void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { | 490 | void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { |
| 401 | const u32 old_flags = scheduling_state; | 491 | const u32 old_state = scheduling_state; |
| 402 | scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) | | 492 | scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) | |
| 403 | static_cast<u32>(new_status); | 493 | static_cast<u32>(new_status); |
| 404 | AdjustSchedulingOnStatus(old_flags); | 494 | kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); |
| 405 | } | 495 | } |
| 406 | 496 | ||
| 407 | void Thread::SetCurrentPriority(u32 new_priority) { | 497 | void Thread::SetCurrentPriority(u32 new_priority) { |
| 408 | const u32 old_priority = std::exchange(current_priority, new_priority); | 498 | const u32 old_priority = std::exchange(current_priority, new_priority); |
| 409 | AdjustSchedulingOnPriority(old_priority); | 499 | kernel.GlobalScheduler().AdjustSchedulingOnPriority(this, old_priority); |
| 410 | } | 500 | } |
| 411 | 501 | ||
| 412 | ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { | 502 | ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { |
| 503 | SchedulerLock lock(kernel); | ||
| 413 | const auto HighestSetCore = [](u64 mask, u32 max_cores) { | 504 | const auto HighestSetCore = [](u64 mask, u32 max_cores) { |
| 414 | for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) { | 505 | for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) { |
| 415 | if (((mask >> core) & 1) != 0) { | 506 | if (((mask >> core) & 1) != 0) { |
| @@ -443,111 +534,12 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { | |||
| 443 | processor_id = ideal_core; | 534 | processor_id = ideal_core; |
| 444 | } | 535 | } |
| 445 | } | 536 | } |
| 446 | AdjustSchedulingOnAffinity(old_affinity_mask, old_core); | 537 | kernel.GlobalScheduler().AdjustSchedulingOnAffinity(this, old_affinity_mask, old_core); |
| 447 | } | 538 | } |
| 448 | } | 539 | } |
| 449 | return RESULT_SUCCESS; | 540 | return RESULT_SUCCESS; |
| 450 | } | 541 | } |
| 451 | 542 | ||
| 452 | void Thread::AdjustSchedulingOnStatus(u32 old_flags) { | ||
| 453 | if (old_flags == scheduling_state) { | ||
| 454 | return; | ||
| 455 | } | ||
| 456 | |||
| 457 | auto& scheduler = kernel.GlobalScheduler(); | ||
| 458 | if (static_cast<ThreadSchedStatus>(old_flags & static_cast<u32>(ThreadSchedMasks::LowMask)) == | ||
| 459 | ThreadSchedStatus::Runnable) { | ||
| 460 | // In this case the thread was running, now it's pausing/exitting | ||
| 461 | if (processor_id >= 0) { | ||
| 462 | scheduler.Unschedule(current_priority, static_cast<u32>(processor_id), this); | ||
| 463 | } | ||
| 464 | |||
| 465 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 466 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { | ||
| 467 | scheduler.Unsuggest(current_priority, core, this); | ||
| 468 | } | ||
| 469 | } | ||
| 470 | } else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) { | ||
| 471 | // The thread is now set to running from being stopped | ||
| 472 | if (processor_id >= 0) { | ||
| 473 | scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this); | ||
| 474 | } | ||
| 475 | |||
| 476 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 477 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { | ||
| 478 | scheduler.Suggest(current_priority, core, this); | ||
| 479 | } | ||
| 480 | } | ||
| 481 | } | ||
| 482 | |||
| 483 | scheduler.SetReselectionPending(); | ||
| 484 | } | ||
| 485 | |||
| 486 | void Thread::AdjustSchedulingOnPriority(u32 old_priority) { | ||
| 487 | if (GetSchedulingStatus() != ThreadSchedStatus::Runnable) { | ||
| 488 | return; | ||
| 489 | } | ||
| 490 | auto& scheduler = kernel.GlobalScheduler(); | ||
| 491 | if (processor_id >= 0) { | ||
| 492 | scheduler.Unschedule(old_priority, static_cast<u32>(processor_id), this); | ||
| 493 | } | ||
| 494 | |||
| 495 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 496 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { | ||
| 497 | scheduler.Unsuggest(old_priority, core, this); | ||
| 498 | } | ||
| 499 | } | ||
| 500 | |||
| 501 | // Add thread to the new priority queues. | ||
| 502 | Thread* current_thread = GetCurrentThread(); | ||
| 503 | |||
| 504 | if (processor_id >= 0) { | ||
| 505 | if (current_thread == this) { | ||
| 506 | scheduler.SchedulePrepend(current_priority, static_cast<u32>(processor_id), this); | ||
| 507 | } else { | ||
| 508 | scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this); | ||
| 509 | } | ||
| 510 | } | ||
| 511 | |||
| 512 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 513 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { | ||
| 514 | scheduler.Suggest(current_priority, core, this); | ||
| 515 | } | ||
| 516 | } | ||
| 517 | |||
| 518 | scheduler.SetReselectionPending(); | ||
| 519 | } | ||
| 520 | |||
| 521 | void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) { | ||
| 522 | auto& scheduler = kernel.GlobalScheduler(); | ||
| 523 | if (GetSchedulingStatus() != ThreadSchedStatus::Runnable || | ||
| 524 | current_priority >= THREADPRIO_COUNT) { | ||
| 525 | return; | ||
| 526 | } | ||
| 527 | |||
| 528 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 529 | if (((old_affinity_mask >> core) & 1) != 0) { | ||
| 530 | if (core == static_cast<u32>(old_core)) { | ||
| 531 | scheduler.Unschedule(current_priority, core, this); | ||
| 532 | } else { | ||
| 533 | scheduler.Unsuggest(current_priority, core, this); | ||
| 534 | } | ||
| 535 | } | ||
| 536 | } | ||
| 537 | |||
| 538 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | ||
| 539 | if (((affinity_mask >> core) & 1) != 0) { | ||
| 540 | if (core == static_cast<u32>(processor_id)) { | ||
| 541 | scheduler.Schedule(current_priority, core, this); | ||
| 542 | } else { | ||
| 543 | scheduler.Suggest(current_priority, core, this); | ||
| 544 | } | ||
| 545 | } | ||
| 546 | } | ||
| 547 | |||
| 548 | scheduler.SetReselectionPending(); | ||
| 549 | } | ||
| 550 | |||
| 551 | //////////////////////////////////////////////////////////////////////////////////////////////////// | 543 | //////////////////////////////////////////////////////////////////////////////////////////////////// |
| 552 | 544 | ||
| 553 | /** | 545 | /** |
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index 23fdef8a4..c0342c462 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h | |||
| @@ -6,26 +6,47 @@ | |||
| 6 | 6 | ||
| 7 | #include <functional> | 7 | #include <functional> |
| 8 | #include <string> | 8 | #include <string> |
| 9 | #include <utility> | ||
| 9 | #include <vector> | 10 | #include <vector> |
| 10 | 11 | ||
| 11 | #include "common/common_types.h" | 12 | #include "common/common_types.h" |
| 13 | #include "common/spin_lock.h" | ||
| 12 | #include "core/arm/arm_interface.h" | 14 | #include "core/arm/arm_interface.h" |
| 13 | #include "core/hle/kernel/object.h" | 15 | #include "core/hle/kernel/object.h" |
| 14 | #include "core/hle/kernel/synchronization_object.h" | 16 | #include "core/hle/kernel/synchronization_object.h" |
| 15 | #include "core/hle/result.h" | 17 | #include "core/hle/result.h" |
| 16 | 18 | ||
| 19 | namespace Common { | ||
| 20 | class Fiber; | ||
| 21 | } | ||
| 22 | |||
| 23 | namespace Core { | ||
| 24 | class ARM_Interface; | ||
| 25 | class System; | ||
| 26 | } // namespace Core | ||
| 27 | |||
| 17 | namespace Kernel { | 28 | namespace Kernel { |
| 18 | 29 | ||
| 30 | class GlobalScheduler; | ||
| 19 | class KernelCore; | 31 | class KernelCore; |
| 20 | class Process; | 32 | class Process; |
| 21 | class Scheduler; | 33 | class Scheduler; |
| 22 | 34 | ||
| 23 | enum ThreadPriority : u32 { | 35 | enum ThreadPriority : u32 { |
| 24 | THREADPRIO_HIGHEST = 0, ///< Highest thread priority | 36 | THREADPRIO_HIGHEST = 0, ///< Highest thread priority |
| 25 | THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps | 37 | THREADPRIO_MAX_CORE_MIGRATION = 2, ///< Highest priority for a core migration |
| 26 | THREADPRIO_DEFAULT = 44, ///< Default thread priority for userland apps | 38 | THREADPRIO_USERLAND_MAX = 24, ///< Highest thread priority for userland apps |
| 27 | THREADPRIO_LOWEST = 63, ///< Lowest thread priority | 39 | THREADPRIO_DEFAULT = 44, ///< Default thread priority for userland apps |
| 28 | THREADPRIO_COUNT = 64, ///< Total number of possible thread priorities. | 40 | THREADPRIO_LOWEST = 63, ///< Lowest thread priority |
| 41 | THREADPRIO_COUNT = 64, ///< Total number of possible thread priorities. | ||
| 42 | }; | ||
| 43 | |||
| 44 | enum ThreadType : u32 { | ||
| 45 | THREADTYPE_USER = 0x1, | ||
| 46 | THREADTYPE_KERNEL = 0x2, | ||
| 47 | THREADTYPE_HLE = 0x4, | ||
| 48 | THREADTYPE_IDLE = 0x8, | ||
| 49 | THREADTYPE_SUSPEND = 0x10, | ||
| 29 | }; | 50 | }; |
| 30 | 51 | ||
| 31 | enum ThreadProcessorId : s32 { | 52 | enum ThreadProcessorId : s32 { |
| @@ -107,26 +128,45 @@ public: | |||
| 107 | 128 | ||
| 108 | using ThreadSynchronizationObjects = std::vector<std::shared_ptr<SynchronizationObject>>; | 129 | using ThreadSynchronizationObjects = std::vector<std::shared_ptr<SynchronizationObject>>; |
| 109 | 130 | ||
| 110 | using WakeupCallback = | 131 | using HLECallback = std::function<bool(std::shared_ptr<Thread> thread)>; |
| 111 | std::function<bool(ThreadWakeupReason reason, std::shared_ptr<Thread> thread, | 132 | |
| 112 | std::shared_ptr<SynchronizationObject> object, std::size_t index)>; | 133 | /** |
| 134 | * Creates and returns a new thread. The new thread is immediately scheduled | ||
| 135 | * @param system The instance of the whole system | ||
| 136 | * @param name The friendly name desired for the thread | ||
| 137 | * @param entry_point The address at which the thread should start execution | ||
| 138 | * @param priority The thread's priority | ||
| 139 | * @param arg User data to pass to the thread | ||
| 140 | * @param processor_id The ID(s) of the processors on which the thread is desired to be run | ||
| 141 | * @param stack_top The address of the thread's stack top | ||
| 142 | * @param owner_process The parent process for the thread, if null, it's a kernel thread | ||
| 143 | * @return A shared pointer to the newly created thread | ||
| 144 | */ | ||
| 145 | static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags, | ||
| 146 | std::string name, VAddr entry_point, | ||
| 147 | u32 priority, u64 arg, s32 processor_id, | ||
| 148 | VAddr stack_top, Process* owner_process); | ||
| 113 | 149 | ||
| 114 | /** | 150 | /** |
| 115 | * Creates and returns a new thread. The new thread is immediately scheduled | 151 | * Creates and returns a new thread. The new thread is immediately scheduled |
| 116 | * @param kernel The kernel instance this thread will be created under. | 152 | * @param system The instance of the whole system |
| 117 | * @param name The friendly name desired for the thread | 153 | * @param name The friendly name desired for the thread |
| 118 | * @param entry_point The address at which the thread should start execution | 154 | * @param entry_point The address at which the thread should start execution |
| 119 | * @param priority The thread's priority | 155 | * @param priority The thread's priority |
| 120 | * @param arg User data to pass to the thread | 156 | * @param arg User data to pass to the thread |
| 121 | * @param processor_id The ID(s) of the processors on which the thread is desired to be run | 157 | * @param processor_id The ID(s) of the processors on which the thread is desired to be run |
| 122 | * @param stack_top The address of the thread's stack top | 158 | * @param stack_top The address of the thread's stack top |
| 123 | * @param owner_process The parent process for the thread | 159 | * @param owner_process The parent process for the thread, if null, it's a kernel thread |
| 160 | * @param thread_start_func The function where the host context will start. | ||
| 161 | * @param thread_start_parameter The parameter which will passed to host context on init | ||
| 124 | * @return A shared pointer to the newly created thread | 162 | * @return A shared pointer to the newly created thread |
| 125 | */ | 163 | */ |
| 126 | static ResultVal<std::shared_ptr<Thread>> Create(KernelCore& kernel, std::string name, | 164 | static ResultVal<std::shared_ptr<Thread>> Create(Core::System& system, ThreadType type_flags, |
| 127 | VAddr entry_point, u32 priority, u64 arg, | 165 | std::string name, VAddr entry_point, |
| 128 | s32 processor_id, VAddr stack_top, | 166 | u32 priority, u64 arg, s32 processor_id, |
| 129 | Process& owner_process); | 167 | VAddr stack_top, Process* owner_process, |
| 168 | std::function<void(void*)>&& thread_start_func, | ||
| 169 | void* thread_start_parameter); | ||
| 130 | 170 | ||
| 131 | std::string GetName() const override { | 171 | std::string GetName() const override { |
| 132 | return name; | 172 | return name; |
| @@ -181,7 +221,7 @@ public: | |||
| 181 | void UpdatePriority(); | 221 | void UpdatePriority(); |
| 182 | 222 | ||
| 183 | /// Changes the core that the thread is running or scheduled to run on. | 223 | /// Changes the core that the thread is running or scheduled to run on. |
| 184 | void ChangeCore(u32 core, u64 mask); | 224 | ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask); |
| 185 | 225 | ||
| 186 | /** | 226 | /** |
| 187 | * Gets the thread's thread ID | 227 | * Gets the thread's thread ID |
| @@ -194,6 +234,10 @@ public: | |||
| 194 | /// Resumes a thread from waiting | 234 | /// Resumes a thread from waiting |
| 195 | void ResumeFromWait(); | 235 | void ResumeFromWait(); |
| 196 | 236 | ||
| 237 | void OnWakeUp(); | ||
| 238 | |||
| 239 | ResultCode Start(); | ||
| 240 | |||
| 197 | /// Cancels a waiting operation that this thread may or may not be within. | 241 | /// Cancels a waiting operation that this thread may or may not be within. |
| 198 | /// | 242 | /// |
| 199 | /// When the thread is within a waiting state, this will set the thread's | 243 | /// When the thread is within a waiting state, this will set the thread's |
| @@ -202,26 +246,19 @@ public: | |||
| 202 | /// | 246 | /// |
| 203 | void CancelWait(); | 247 | void CancelWait(); |
| 204 | 248 | ||
| 205 | /** | 249 | void SetSynchronizationResults(SynchronizationObject* object, ResultCode result); |
| 206 | * Schedules an event to wake up the specified thread after the specified delay | ||
| 207 | * @param nanoseconds The time this thread will be allowed to sleep for | ||
| 208 | */ | ||
| 209 | void WakeAfterDelay(s64 nanoseconds); | ||
| 210 | 250 | ||
| 211 | /// Cancel any outstanding wakeup events for this thread | 251 | Core::ARM_Interface& ArmInterface(); |
| 212 | void CancelWakeupTimer(); | ||
| 213 | 252 | ||
| 214 | /** | 253 | const Core::ARM_Interface& ArmInterface() const; |
| 215 | * Sets the result after the thread awakens (from svcWaitSynchronization) | ||
| 216 | * @param result Value to set to the returned result | ||
| 217 | */ | ||
| 218 | void SetWaitSynchronizationResult(ResultCode result); | ||
| 219 | 254 | ||
| 220 | /** | 255 | SynchronizationObject* GetSignalingObject() const { |
| 221 | * Sets the output parameter value after the thread awakens (from svcWaitSynchronization) | 256 | return signaling_object; |
| 222 | * @param output Value to set to the output parameter | 257 | } |
| 223 | */ | 258 | |
| 224 | void SetWaitSynchronizationOutput(s32 output); | 259 | ResultCode GetSignalingResult() const { |
| 260 | return signaling_result; | ||
| 261 | } | ||
| 225 | 262 | ||
| 226 | /** | 263 | /** |
| 227 | * Retrieves the index that this particular object occupies in the list of objects | 264 | * Retrieves the index that this particular object occupies in the list of objects |
| @@ -269,11 +306,6 @@ public: | |||
| 269 | */ | 306 | */ |
| 270 | VAddr GetCommandBufferAddress() const; | 307 | VAddr GetCommandBufferAddress() const; |
| 271 | 308 | ||
| 272 | /// Returns whether this thread is waiting on objects from a WaitSynchronization call. | ||
| 273 | bool IsSleepingOnWait() const { | ||
| 274 | return status == ThreadStatus::WaitSynch; | ||
| 275 | } | ||
| 276 | |||
| 277 | ThreadContext32& GetContext32() { | 309 | ThreadContext32& GetContext32() { |
| 278 | return context_32; | 310 | return context_32; |
| 279 | } | 311 | } |
| @@ -290,6 +322,28 @@ public: | |||
| 290 | return context_64; | 322 | return context_64; |
| 291 | } | 323 | } |
| 292 | 324 | ||
| 325 | bool IsHLEThread() const { | ||
| 326 | return (type & THREADTYPE_HLE) != 0; | ||
| 327 | } | ||
| 328 | |||
| 329 | bool IsSuspendThread() const { | ||
| 330 | return (type & THREADTYPE_SUSPEND) != 0; | ||
| 331 | } | ||
| 332 | |||
| 333 | bool IsIdleThread() const { | ||
| 334 | return (type & THREADTYPE_IDLE) != 0; | ||
| 335 | } | ||
| 336 | |||
| 337 | bool WasRunning() const { | ||
| 338 | return was_running; | ||
| 339 | } | ||
| 340 | |||
| 341 | void SetWasRunning(bool value) { | ||
| 342 | was_running = value; | ||
| 343 | } | ||
| 344 | |||
| 345 | std::shared_ptr<Common::Fiber>& GetHostContext(); | ||
| 346 | |||
| 293 | ThreadStatus GetStatus() const { | 347 | ThreadStatus GetStatus() const { |
| 294 | return status; | 348 | return status; |
| 295 | } | 349 | } |
| @@ -325,18 +379,18 @@ public: | |||
| 325 | } | 379 | } |
| 326 | 380 | ||
| 327 | const ThreadSynchronizationObjects& GetSynchronizationObjects() const { | 381 | const ThreadSynchronizationObjects& GetSynchronizationObjects() const { |
| 328 | return wait_objects; | 382 | return *wait_objects; |
| 329 | } | 383 | } |
| 330 | 384 | ||
| 331 | void SetSynchronizationObjects(ThreadSynchronizationObjects objects) { | 385 | void SetSynchronizationObjects(ThreadSynchronizationObjects* objects) { |
| 332 | wait_objects = std::move(objects); | 386 | wait_objects = objects; |
| 333 | } | 387 | } |
| 334 | 388 | ||
| 335 | void ClearSynchronizationObjects() { | 389 | void ClearSynchronizationObjects() { |
| 336 | for (const auto& waiting_object : wait_objects) { | 390 | for (const auto& waiting_object : *wait_objects) { |
| 337 | waiting_object->RemoveWaitingThread(SharedFrom(this)); | 391 | waiting_object->RemoveWaitingThread(SharedFrom(this)); |
| 338 | } | 392 | } |
| 339 | wait_objects.clear(); | 393 | wait_objects->clear(); |
| 340 | } | 394 | } |
| 341 | 395 | ||
| 342 | /// Determines whether all the objects this thread is waiting on are ready. | 396 | /// Determines whether all the objects this thread is waiting on are ready. |
| @@ -386,26 +440,35 @@ public: | |||
| 386 | arb_wait_address = address; | 440 | arb_wait_address = address; |
| 387 | } | 441 | } |
| 388 | 442 | ||
| 389 | bool HasWakeupCallback() const { | 443 | bool HasHLECallback() const { |
| 390 | return wakeup_callback != nullptr; | 444 | return hle_callback != nullptr; |
| 391 | } | 445 | } |
| 392 | 446 | ||
| 393 | void SetWakeupCallback(WakeupCallback callback) { | 447 | void SetHLECallback(HLECallback callback) { |
| 394 | wakeup_callback = std::move(callback); | 448 | hle_callback = std::move(callback); |
| 395 | } | 449 | } |
| 396 | 450 | ||
| 397 | void InvalidateWakeupCallback() { | 451 | void SetHLETimeEvent(Handle time_event) { |
| 398 | SetWakeupCallback(nullptr); | 452 | hle_time_event = time_event; |
| 399 | } | 453 | } |
| 400 | 454 | ||
| 401 | /** | 455 | void SetHLESyncObject(SynchronizationObject* object) { |
| 402 | * Invokes the thread's wakeup callback. | 456 | hle_object = object; |
| 403 | * | 457 | } |
| 404 | * @pre A valid wakeup callback has been set. Violating this precondition | 458 | |
| 405 | * will cause an assertion to trigger. | 459 | Handle GetHLETimeEvent() const { |
| 406 | */ | 460 | return hle_time_event; |
| 407 | bool InvokeWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread, | 461 | } |
| 408 | std::shared_ptr<SynchronizationObject> object, std::size_t index); | 462 | |
| 463 | SynchronizationObject* GetHLESyncObject() const { | ||
| 464 | return hle_object; | ||
| 465 | } | ||
| 466 | |||
| 467 | void InvalidateHLECallback() { | ||
| 468 | SetHLECallback(nullptr); | ||
| 469 | } | ||
| 470 | |||
| 471 | bool InvokeHLECallback(std::shared_ptr<Thread> thread); | ||
| 409 | 472 | ||
| 410 | u32 GetIdealCore() const { | 473 | u32 GetIdealCore() const { |
| 411 | return ideal_core; | 474 | return ideal_core; |
| @@ -415,23 +478,19 @@ public: | |||
| 415 | return affinity_mask; | 478 | return affinity_mask; |
| 416 | } | 479 | } |
| 417 | 480 | ||
| 418 | ThreadActivity GetActivity() const { | 481 | ResultCode SetActivity(ThreadActivity value); |
| 419 | return activity; | ||
| 420 | } | ||
| 421 | |||
| 422 | void SetActivity(ThreadActivity value); | ||
| 423 | 482 | ||
| 424 | /// Sleeps this thread for the given amount of nanoseconds. | 483 | /// Sleeps this thread for the given amount of nanoseconds. |
| 425 | void Sleep(s64 nanoseconds); | 484 | ResultCode Sleep(s64 nanoseconds); |
| 426 | 485 | ||
| 427 | /// Yields this thread without rebalancing loads. | 486 | /// Yields this thread without rebalancing loads. |
| 428 | bool YieldSimple(); | 487 | std::pair<ResultCode, bool> YieldSimple(); |
| 429 | 488 | ||
| 430 | /// Yields this thread and does a load rebalancing. | 489 | /// Yields this thread and does a load rebalancing. |
| 431 | bool YieldAndBalanceLoad(); | 490 | std::pair<ResultCode, bool> YieldAndBalanceLoad(); |
| 432 | 491 | ||
| 433 | /// Yields this thread and if the core is left idle, loads are rebalanced | 492 | /// Yields this thread and if the core is left idle, loads are rebalanced |
| 434 | bool YieldAndWaitForLoadBalancing(); | 493 | std::pair<ResultCode, bool> YieldAndWaitForLoadBalancing(); |
| 435 | 494 | ||
| 436 | void IncrementYieldCount() { | 495 | void IncrementYieldCount() { |
| 437 | yield_count++; | 496 | yield_count++; |
| @@ -446,6 +505,10 @@ public: | |||
| 446 | static_cast<u32>(ThreadSchedMasks::LowMask)); | 505 | static_cast<u32>(ThreadSchedMasks::LowMask)); |
| 447 | } | 506 | } |
| 448 | 507 | ||
| 508 | bool IsRunnable() const { | ||
| 509 | return scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable); | ||
| 510 | } | ||
| 511 | |||
| 449 | bool IsRunning() const { | 512 | bool IsRunning() const { |
| 450 | return is_running; | 513 | return is_running; |
| 451 | } | 514 | } |
| @@ -466,17 +529,67 @@ public: | |||
| 466 | return global_handle; | 529 | return global_handle; |
| 467 | } | 530 | } |
| 468 | 531 | ||
| 532 | bool IsWaitingForArbitration() const { | ||
| 533 | return waiting_for_arbitration; | ||
| 534 | } | ||
| 535 | |||
| 536 | void WaitForArbitration(bool set) { | ||
| 537 | waiting_for_arbitration = set; | ||
| 538 | } | ||
| 539 | |||
| 540 | bool IsWaitingSync() const { | ||
| 541 | return is_waiting_on_sync; | ||
| 542 | } | ||
| 543 | |||
| 544 | void SetWaitingSync(bool is_waiting) { | ||
| 545 | is_waiting_on_sync = is_waiting; | ||
| 546 | } | ||
| 547 | |||
| 548 | bool IsPendingTermination() const { | ||
| 549 | return will_be_terminated || GetSchedulingStatus() == ThreadSchedStatus::Exited; | ||
| 550 | } | ||
| 551 | |||
| 552 | bool IsPaused() const { | ||
| 553 | return pausing_state != 0; | ||
| 554 | } | ||
| 555 | |||
| 556 | bool IsContinuousOnSVC() const { | ||
| 557 | return is_continuous_on_svc; | ||
| 558 | } | ||
| 559 | |||
| 560 | void SetContinuousOnSVC(bool is_continuous) { | ||
| 561 | is_continuous_on_svc = is_continuous; | ||
| 562 | } | ||
| 563 | |||
| 564 | bool IsPhantomMode() const { | ||
| 565 | return is_phantom_mode; | ||
| 566 | } | ||
| 567 | |||
| 568 | void SetPhantomMode(bool phantom) { | ||
| 569 | is_phantom_mode = phantom; | ||
| 570 | } | ||
| 571 | |||
| 572 | bool HasExited() const { | ||
| 573 | return has_exited; | ||
| 574 | } | ||
| 575 | |||
| 469 | private: | 576 | private: |
| 577 | friend class GlobalScheduler; | ||
| 578 | friend class Scheduler; | ||
| 579 | |||
| 470 | void SetSchedulingStatus(ThreadSchedStatus new_status); | 580 | void SetSchedulingStatus(ThreadSchedStatus new_status); |
| 581 | void AddSchedulingFlag(ThreadSchedFlags flag); | ||
| 582 | void RemoveSchedulingFlag(ThreadSchedFlags flag); | ||
| 583 | |||
| 471 | void SetCurrentPriority(u32 new_priority); | 584 | void SetCurrentPriority(u32 new_priority); |
| 472 | ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask); | ||
| 473 | 585 | ||
| 474 | void AdjustSchedulingOnStatus(u32 old_flags); | ||
| 475 | void AdjustSchedulingOnPriority(u32 old_priority); | ||
| 476 | void AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core); | 586 | void AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core); |
| 477 | 587 | ||
| 588 | Common::SpinLock context_guard{}; | ||
| 478 | ThreadContext32 context_32{}; | 589 | ThreadContext32 context_32{}; |
| 479 | ThreadContext64 context_64{}; | 590 | ThreadContext64 context_64{}; |
| 591 | std::unique_ptr<Core::ARM_Interface> arm_interface{}; | ||
| 592 | std::shared_ptr<Common::Fiber> host_context{}; | ||
| 480 | 593 | ||
| 481 | u64 thread_id = 0; | 594 | u64 thread_id = 0; |
| 482 | 595 | ||
| @@ -485,6 +598,8 @@ private: | |||
| 485 | VAddr entry_point = 0; | 598 | VAddr entry_point = 0; |
| 486 | VAddr stack_top = 0; | 599 | VAddr stack_top = 0; |
| 487 | 600 | ||
| 601 | ThreadType type; | ||
| 602 | |||
| 488 | /// Nominal thread priority, as set by the emulated application. | 603 | /// Nominal thread priority, as set by the emulated application. |
| 489 | /// The nominal priority is the thread priority without priority | 604 | /// The nominal priority is the thread priority without priority |
| 490 | /// inheritance taken into account. | 605 | /// inheritance taken into account. |
| @@ -509,7 +624,10 @@ private: | |||
| 509 | 624 | ||
| 510 | /// Objects that the thread is waiting on, in the same order as they were | 625 | /// Objects that the thread is waiting on, in the same order as they were |
| 511 | /// passed to WaitSynchronization. | 626 | /// passed to WaitSynchronization. |
| 512 | ThreadSynchronizationObjects wait_objects; | 627 | ThreadSynchronizationObjects* wait_objects; |
| 628 | |||
| 629 | SynchronizationObject* signaling_object; | ||
| 630 | ResultCode signaling_result{RESULT_SUCCESS}; | ||
| 513 | 631 | ||
| 514 | /// List of threads that are waiting for a mutex that is held by this thread. | 632 | /// List of threads that are waiting for a mutex that is held by this thread. |
| 515 | MutexWaitingThreads wait_mutex_threads; | 633 | MutexWaitingThreads wait_mutex_threads; |
| @@ -526,30 +644,39 @@ private: | |||
| 526 | 644 | ||
| 527 | /// If waiting for an AddressArbiter, this is the address being waited on. | 645 | /// If waiting for an AddressArbiter, this is the address being waited on. |
| 528 | VAddr arb_wait_address{0}; | 646 | VAddr arb_wait_address{0}; |
| 647 | bool waiting_for_arbitration{}; | ||
| 529 | 648 | ||
| 530 | /// Handle used as userdata to reference this object when inserting into the CoreTiming queue. | 649 | /// Handle used as userdata to reference this object when inserting into the CoreTiming queue. |
| 531 | Handle global_handle = 0; | 650 | Handle global_handle = 0; |
| 532 | 651 | ||
| 533 | /// Callback that will be invoked when the thread is resumed from a waiting state. If the thread | 652 | /// Callback for HLE Events |
| 534 | /// was waiting via WaitSynchronization then the object will be the last object that became | 653 | HLECallback hle_callback; |
| 535 | /// available. In case of a timeout, the object will be nullptr. | 654 | Handle hle_time_event; |
| 536 | WakeupCallback wakeup_callback; | 655 | SynchronizationObject* hle_object; |
| 537 | 656 | ||
| 538 | Scheduler* scheduler = nullptr; | 657 | Scheduler* scheduler = nullptr; |
| 539 | 658 | ||
| 540 | u32 ideal_core{0xFFFFFFFF}; | 659 | u32 ideal_core{0xFFFFFFFF}; |
| 541 | u64 affinity_mask{0x1}; | 660 | u64 affinity_mask{0x1}; |
| 542 | 661 | ||
| 543 | ThreadActivity activity = ThreadActivity::Normal; | ||
| 544 | |||
| 545 | s32 ideal_core_override = -1; | 662 | s32 ideal_core_override = -1; |
| 546 | u64 affinity_mask_override = 0x1; | 663 | u64 affinity_mask_override = 0x1; |
| 547 | u32 affinity_override_count = 0; | 664 | u32 affinity_override_count = 0; |
| 548 | 665 | ||
| 549 | u32 scheduling_state = 0; | 666 | u32 scheduling_state = 0; |
| 667 | u32 pausing_state = 0; | ||
| 550 | bool is_running = false; | 668 | bool is_running = false; |
| 669 | bool is_waiting_on_sync = false; | ||
| 551 | bool is_sync_cancelled = false; | 670 | bool is_sync_cancelled = false; |
| 552 | 671 | ||
| 672 | bool is_continuous_on_svc = false; | ||
| 673 | |||
| 674 | bool will_be_terminated = false; | ||
| 675 | bool is_phantom_mode = false; | ||
| 676 | bool has_exited = false; | ||
| 677 | |||
| 678 | bool was_running = false; | ||
| 679 | |||
| 553 | std::string name; | 680 | std::string name; |
| 554 | }; | 681 | }; |
| 555 | 682 | ||
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index 21b290468..941305e8e 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp | |||
| @@ -8,30 +8,37 @@ | |||
| 8 | #include "core/core_timing_util.h" | 8 | #include "core/core_timing_util.h" |
| 9 | #include "core/hle/kernel/handle_table.h" | 9 | #include "core/hle/kernel/handle_table.h" |
| 10 | #include "core/hle/kernel/kernel.h" | 10 | #include "core/hle/kernel/kernel.h" |
| 11 | #include "core/hle/kernel/scheduler.h" | ||
| 11 | #include "core/hle/kernel/thread.h" | 12 | #include "core/hle/kernel/thread.h" |
| 12 | #include "core/hle/kernel/time_manager.h" | 13 | #include "core/hle/kernel/time_manager.h" |
| 13 | 14 | ||
| 14 | namespace Kernel { | 15 | namespace Kernel { |
| 15 | 16 | ||
| 16 | TimeManager::TimeManager(Core::System& system) : system{system} { | 17 | TimeManager::TimeManager(Core::System& system_) : system{system_} { |
| 17 | time_manager_event_type = Core::Timing::CreateEvent( | 18 | time_manager_event_type = Core::Timing::CreateEvent( |
| 18 | "Kernel::TimeManagerCallback", [this](u64 thread_handle, [[maybe_unused]] s64 cycles_late) { | 19 | "Kernel::TimeManagerCallback", [this](u64 thread_handle, [[maybe_unused]] s64 cycles_late) { |
| 20 | SchedulerLock lock(system.Kernel()); | ||
| 19 | Handle proper_handle = static_cast<Handle>(thread_handle); | 21 | Handle proper_handle = static_cast<Handle>(thread_handle); |
| 22 | if (cancelled_events[proper_handle]) { | ||
| 23 | return; | ||
| 24 | } | ||
| 20 | std::shared_ptr<Thread> thread = | 25 | std::shared_ptr<Thread> thread = |
| 21 | this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle); | 26 | this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle); |
| 22 | thread->ResumeFromWait(); | 27 | thread->OnWakeUp(); |
| 23 | }); | 28 | }); |
| 24 | } | 29 | } |
| 25 | 30 | ||
| 26 | void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds) { | 31 | void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds) { |
| 32 | event_handle = timetask->GetGlobalHandle(); | ||
| 27 | if (nanoseconds > 0) { | 33 | if (nanoseconds > 0) { |
| 28 | ASSERT(timetask); | 34 | ASSERT(timetask); |
| 29 | event_handle = timetask->GetGlobalHandle(); | 35 | ASSERT(timetask->GetStatus() != ThreadStatus::Ready); |
| 30 | const s64 cycles = Core::Timing::nsToCycles(std::chrono::nanoseconds{nanoseconds}); | 36 | ASSERT(timetask->GetStatus() != ThreadStatus::WaitMutex); |
| 31 | system.CoreTiming().ScheduleEvent(cycles, time_manager_event_type, event_handle); | 37 | system.CoreTiming().ScheduleEvent(nanoseconds, time_manager_event_type, event_handle); |
| 32 | } else { | 38 | } else { |
| 33 | event_handle = InvalidHandle; | 39 | event_handle = InvalidHandle; |
| 34 | } | 40 | } |
| 41 | cancelled_events[event_handle] = false; | ||
| 35 | } | 42 | } |
| 36 | 43 | ||
| 37 | void TimeManager::UnscheduleTimeEvent(Handle event_handle) { | 44 | void TimeManager::UnscheduleTimeEvent(Handle event_handle) { |
| @@ -39,6 +46,12 @@ void TimeManager::UnscheduleTimeEvent(Handle event_handle) { | |||
| 39 | return; | 46 | return; |
| 40 | } | 47 | } |
| 41 | system.CoreTiming().UnscheduleEvent(time_manager_event_type, event_handle); | 48 | system.CoreTiming().UnscheduleEvent(time_manager_event_type, event_handle); |
| 49 | cancelled_events[event_handle] = true; | ||
| 50 | } | ||
| 51 | |||
| 52 | void TimeManager::CancelTimeEvent(Thread* time_task) { | ||
| 53 | Handle event_handle = time_task->GetGlobalHandle(); | ||
| 54 | UnscheduleTimeEvent(event_handle); | ||
| 42 | } | 55 | } |
| 43 | 56 | ||
| 44 | } // namespace Kernel | 57 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/time_manager.h b/src/core/hle/kernel/time_manager.h index eaec486d1..307a18765 100644 --- a/src/core/hle/kernel/time_manager.h +++ b/src/core/hle/kernel/time_manager.h | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <memory> | 7 | #include <memory> |
| 8 | #include <unordered_map> | ||
| 8 | 9 | ||
| 9 | #include "core/hle/kernel/object.h" | 10 | #include "core/hle/kernel/object.h" |
| 10 | 11 | ||
| @@ -35,9 +36,12 @@ public: | |||
| 35 | /// Unschedule an existing time event | 36 | /// Unschedule an existing time event |
| 36 | void UnscheduleTimeEvent(Handle event_handle); | 37 | void UnscheduleTimeEvent(Handle event_handle); |
| 37 | 38 | ||
| 39 | void CancelTimeEvent(Thread* time_task); | ||
| 40 | |||
| 38 | private: | 41 | private: |
| 39 | Core::System& system; | 42 | Core::System& system; |
| 40 | std::shared_ptr<Core::Timing::EventType> time_manager_event_type; | 43 | std::shared_ptr<Core::Timing::EventType> time_manager_event_type; |
| 44 | std::unordered_map<Handle, bool> cancelled_events; | ||
| 41 | }; | 45 | }; |
| 42 | 46 | ||
| 43 | } // namespace Kernel | 47 | } // namespace Kernel |