diff options
| author | 2016-12-16 00:41:22 -0500 | |
|---|---|---|
| committer | 2016-12-16 00:41:22 -0500 | |
| commit | cda7210fade53a96fcba5fe5cd6dfd7b604f8277 (patch) | |
| tree | c4090e3871e717ee4d0a2edd837feffc2c877cb0 /src | |
| parent | Merge pull request #2316 from endrift/macos-gcc (diff) | |
| parent | Fixed the codestyle to match our clang-format rules. (diff) | |
| download | yuzu-cda7210fade53a96fcba5fe5cd6dfd7b604f8277.tar.gz yuzu-cda7210fade53a96fcba5fe5cd6dfd7b604f8277.tar.xz yuzu-cda7210fade53a96fcba5fe5cd6dfd7b604f8277.zip | |
Merge pull request #2260 from Subv/scheduling
Threading: Reworked the way our scheduler works.
Diffstat (limited to 'src')
| -rw-r--r-- | src/citra_qt/debugger/wait_tree.cpp | 3 | ||||
| -rw-r--r-- | src/core/hle/kernel/address_arbiter.cpp | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 58 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.h | 8 | ||||
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 101 | ||||
| -rw-r--r-- | src/core/hle/kernel/thread.h | 44 | ||||
| -rw-r--r-- | src/core/hle/kernel/timer.cpp | 4 | ||||
| -rw-r--r-- | src/core/hle/svc.cpp | 187 |
8 files changed, 211 insertions, 196 deletions
diff --git a/src/citra_qt/debugger/wait_tree.cpp b/src/citra_qt/debugger/wait_tree.cpp index 51e70fae3..5a308bf7f 100644 --- a/src/citra_qt/debugger/wait_tree.cpp +++ b/src/citra_qt/debugger/wait_tree.cpp | |||
| @@ -229,7 +229,8 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const { | |||
| 229 | list.push_back(std::make_unique<WaitTreeMutexList>(thread.held_mutexes)); | 229 | list.push_back(std::make_unique<WaitTreeMutexList>(thread.held_mutexes)); |
| 230 | } | 230 | } |
| 231 | if (thread.status == THREADSTATUS_WAIT_SYNCH) { | 231 | if (thread.status == THREADSTATUS_WAIT_SYNCH) { |
| 232 | list.push_back(std::make_unique<WaitTreeObjectList>(thread.wait_objects, thread.wait_all)); | 232 | list.push_back(std::make_unique<WaitTreeObjectList>(thread.wait_objects, |
| 233 | thread.IsSleepingOnWaitAll())); | ||
| 233 | } | 234 | } |
| 234 | 235 | ||
| 235 | return list; | 236 | return list; |
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index 37eec4c84..b5a0cc3a3 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp | |||
| @@ -79,8 +79,6 @@ ResultCode AddressArbiter::ArbitrateAddress(ArbitrationType type, VAddr address, | |||
| 79 | ErrorSummary::WrongArgument, ErrorLevel::Usage); | 79 | ErrorSummary::WrongArgument, ErrorLevel::Usage); |
| 80 | } | 80 | } |
| 81 | 81 | ||
| 82 | HLE::Reschedule(__func__); | ||
| 83 | |||
| 84 | // The calls that use a timeout seem to always return a Timeout error even if they did not put | 82 | // The calls that use a timeout seem to always return a Timeout error even if they did not put |
| 85 | // the thread to sleep | 83 | // the thread to sleep |
| 86 | if (type == ArbitrationType::WaitIfLessThanWithTimeout || | 84 | if (type == ArbitrationType::WaitIfLessThanWithTimeout || |
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 0c8752670..209d35270 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <algorithm> | 5 | #include <algorithm> |
| 6 | #include <boost/range/algorithm_ext/erase.hpp> | ||
| 6 | #include "common/assert.h" | 7 | #include "common/assert.h" |
| 7 | #include "common/logging/log.h" | 8 | #include "common/logging/log.h" |
| 8 | #include "core/hle/config_mem.h" | 9 | #include "core/hle/config_mem.h" |
| @@ -31,13 +32,60 @@ void WaitObject::RemoveWaitingThread(Thread* thread) { | |||
| 31 | waiting_threads.erase(itr); | 32 | waiting_threads.erase(itr); |
| 32 | } | 33 | } |
| 33 | 34 | ||
| 34 | void WaitObject::WakeupAllWaitingThreads() { | 35 | SharedPtr<Thread> WaitObject::GetHighestPriorityReadyThread() { |
| 35 | for (auto thread : waiting_threads) | 36 | // Remove the threads that are ready or already running from our waitlist |
| 36 | thread->ResumeFromWait(); | 37 | boost::range::remove_erase_if(waiting_threads, [](const SharedPtr<Thread>& thread) { |
| 38 | return thread->status == THREADSTATUS_RUNNING || thread->status == THREADSTATUS_READY; | ||
| 39 | }); | ||
| 40 | |||
| 41 | // TODO(Subv): This call should be performed inside the loop below to check if an object can be | ||
| 42 | // acquired by a particular thread. This is useful for things like recursive locking of Mutexes. | ||
| 43 | if (ShouldWait()) | ||
| 44 | return nullptr; | ||
| 45 | |||
| 46 | Thread* candidate = nullptr; | ||
| 47 | s32 candidate_priority = THREADPRIO_LOWEST + 1; | ||
| 48 | |||
| 49 | for (const auto& thread : waiting_threads) { | ||
| 50 | if (thread->current_priority >= candidate_priority) | ||
| 51 | continue; | ||
| 37 | 52 | ||
| 38 | waiting_threads.clear(); | 53 | bool ready_to_run = |
| 54 | std::none_of(thread->wait_objects.begin(), thread->wait_objects.end(), | ||
| 55 | [](const SharedPtr<WaitObject>& object) { return object->ShouldWait(); }); | ||
| 56 | if (ready_to_run) { | ||
| 57 | candidate = thread.get(); | ||
| 58 | candidate_priority = thread->current_priority; | ||
| 59 | } | ||
| 60 | } | ||
| 61 | |||
| 62 | return candidate; | ||
| 63 | } | ||
| 39 | 64 | ||
| 40 | HLE::Reschedule(__func__); | 65 | void WaitObject::WakeupAllWaitingThreads() { |
| 66 | while (auto thread = GetHighestPriorityReadyThread()) { | ||
| 67 | if (!thread->IsSleepingOnWaitAll()) { | ||
| 68 | Acquire(); | ||
| 69 | // Set the output index of the WaitSynchronizationN call to the index of this object. | ||
| 70 | if (thread->wait_set_output) { | ||
| 71 | thread->SetWaitSynchronizationOutput(thread->GetWaitObjectIndex(this)); | ||
| 72 | thread->wait_set_output = false; | ||
| 73 | } | ||
| 74 | } else { | ||
| 75 | for (auto& object : thread->wait_objects) { | ||
| 76 | object->Acquire(); | ||
| 77 | object->RemoveWaitingThread(thread.get()); | ||
| 78 | } | ||
| 79 | // Note: This case doesn't update the output index of WaitSynchronizationN. | ||
| 80 | // Clear the thread's waitlist | ||
| 81 | thread->wait_objects.clear(); | ||
| 82 | } | ||
| 83 | |||
| 84 | thread->SetWaitSynchronizationResult(RESULT_SUCCESS); | ||
| 85 | thread->ResumeFromWait(); | ||
| 86 | // Note: Removing the thread from the object's waitlist will be | ||
| 87 | // done by GetHighestPriorityReadyThread. | ||
| 88 | } | ||
| 41 | } | 89 | } |
| 42 | 90 | ||
| 43 | const std::vector<SharedPtr<Thread>>& WaitObject::GetWaitingThreads() const { | 91 | const std::vector<SharedPtr<Thread>>& WaitObject::GetWaitingThreads() const { |
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 0b811c5a7..1adcf6c71 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h | |||
| @@ -151,9 +151,15 @@ public: | |||
| 151 | */ | 151 | */ |
| 152 | void RemoveWaitingThread(Thread* thread); | 152 | void RemoveWaitingThread(Thread* thread); |
| 153 | 153 | ||
| 154 | /// Wake up all threads waiting on this object | 154 | /** |
| 155 | * Wake up all threads waiting on this object that can be awoken, in priority order, | ||
| 156 | * and set the synchronization result and output of the thread. | ||
| 157 | */ | ||
| 155 | void WakeupAllWaitingThreads(); | 158 | void WakeupAllWaitingThreads(); |
| 156 | 159 | ||
| 160 | /// Obtains the highest priority thread that is ready to run from this object's waiting list. | ||
| 161 | SharedPtr<Thread> GetHighestPriorityReadyThread(); | ||
| 162 | |||
| 157 | /// Get a const reference to the waiting threads list for debug use | 163 | /// Get a const reference to the waiting threads list for debug use |
| 158 | const std::vector<SharedPtr<Thread>>& GetWaitingThreads() const; | 164 | const std::vector<SharedPtr<Thread>>& GetWaitingThreads() const; |
| 159 | 165 | ||
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 84d6d24c6..4bbc08516 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -120,8 +120,6 @@ void Thread::Stop() { | |||
| 120 | u32 tls_slot = | 120 | u32 tls_slot = |
| 121 | ((tls_address - Memory::TLS_AREA_VADDR) % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE; | 121 | ((tls_address - Memory::TLS_AREA_VADDR) % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE; |
| 122 | Kernel::g_current_process->tls_slots[tls_page].reset(tls_slot); | 122 | Kernel::g_current_process->tls_slots[tls_page].reset(tls_slot); |
| 123 | |||
| 124 | HLE::Reschedule(__func__); | ||
| 125 | } | 123 | } |
| 126 | 124 | ||
| 127 | Thread* ArbitrateHighestPriorityThread(u32 address) { | 125 | Thread* ArbitrateHighestPriorityThread(u32 address) { |
| @@ -181,50 +179,6 @@ static void PriorityBoostStarvedThreads() { | |||
| 181 | } | 179 | } |
| 182 | 180 | ||
| 183 | /** | 181 | /** |
| 184 | * Gets the registers for timeout parameter of the next WaitSynchronization call. | ||
| 185 | * @param thread a pointer to the thread that is ready to call WaitSynchronization | ||
| 186 | * @returns a tuple of two register pointers to low and high part of the timeout parameter | ||
| 187 | */ | ||
| 188 | static std::tuple<u32*, u32*> GetWaitSynchTimeoutParameterRegister(Thread* thread) { | ||
| 189 | bool thumb_mode = (thread->context.cpsr & TBIT) != 0; | ||
| 190 | u16 thumb_inst = Memory::Read16(thread->context.pc & 0xFFFFFFFE); | ||
| 191 | u32 inst = Memory::Read32(thread->context.pc & 0xFFFFFFFC) & 0x0FFFFFFF; | ||
| 192 | |||
| 193 | if ((thumb_mode && thumb_inst == 0xDF24) || (!thumb_mode && inst == 0x0F000024)) { | ||
| 194 | // svc #0x24 (WaitSynchronization1) | ||
| 195 | return std::make_tuple(&thread->context.cpu_registers[2], | ||
| 196 | &thread->context.cpu_registers[3]); | ||
| 197 | } else if ((thumb_mode && thumb_inst == 0xDF25) || (!thumb_mode && inst == 0x0F000025)) { | ||
| 198 | // svc #0x25 (WaitSynchronizationN) | ||
| 199 | return std::make_tuple(&thread->context.cpu_registers[0], | ||
| 200 | &thread->context.cpu_registers[4]); | ||
| 201 | } | ||
| 202 | |||
| 203 | UNREACHABLE(); | ||
| 204 | } | ||
| 205 | |||
| 206 | /** | ||
| 207 | * Updates the WaitSynchronization timeout parameter according to the difference | ||
| 208 | * between ticks of the last WaitSynchronization call and the incoming one. | ||
| 209 | * @param timeout_low a pointer to the register for the low part of the timeout parameter | ||
| 210 | * @param timeout_high a pointer to the register for the high part of the timeout parameter | ||
| 211 | * @param last_tick tick of the last WaitSynchronization call | ||
| 212 | */ | ||
| 213 | static void UpdateTimeoutParameter(u32* timeout_low, u32* timeout_high, u64 last_tick) { | ||
| 214 | s64 timeout = ((s64)*timeout_high << 32) | *timeout_low; | ||
| 215 | |||
| 216 | if (timeout != -1) { | ||
| 217 | timeout -= cyclesToUs(CoreTiming::GetTicks() - last_tick) * 1000; // in nanoseconds | ||
| 218 | |||
| 219 | if (timeout < 0) | ||
| 220 | timeout = 0; | ||
| 221 | |||
| 222 | *timeout_low = timeout & 0xFFFFFFFF; | ||
| 223 | *timeout_high = timeout >> 32; | ||
| 224 | } | ||
| 225 | } | ||
| 226 | |||
| 227 | /** | ||
| 228 | * Switches the CPU's active thread context to that of the specified thread | 182 | * Switches the CPU's active thread context to that of the specified thread |
| 229 | * @param new_thread The thread to switch to | 183 | * @param new_thread The thread to switch to |
| 230 | */ | 184 | */ |
| @@ -254,32 +208,6 @@ static void SwitchContext(Thread* new_thread) { | |||
| 254 | 208 | ||
| 255 | current_thread = new_thread; | 209 | current_thread = new_thread; |
| 256 | 210 | ||
| 257 | // If the thread was waited by a svcWaitSynch call, step back PC by one instruction to rerun | ||
| 258 | // the SVC when the thread wakes up. This is necessary to ensure that the thread can acquire | ||
| 259 | // the requested wait object(s) before continuing. | ||
| 260 | if (new_thread->waitsynch_waited) { | ||
| 261 | // CPSR flag indicates CPU mode | ||
| 262 | bool thumb_mode = (new_thread->context.cpsr & TBIT) != 0; | ||
| 263 | |||
| 264 | // SVC instruction is 2 bytes for THUMB, 4 bytes for ARM | ||
| 265 | new_thread->context.pc -= thumb_mode ? 2 : 4; | ||
| 266 | |||
| 267 | // Get the register for timeout parameter | ||
| 268 | u32 *timeout_low, *timeout_high; | ||
| 269 | std::tie(timeout_low, timeout_high) = GetWaitSynchTimeoutParameterRegister(new_thread); | ||
| 270 | |||
| 271 | // Update the timeout parameter | ||
| 272 | UpdateTimeoutParameter(timeout_low, timeout_high, new_thread->last_running_ticks); | ||
| 273 | } | ||
| 274 | |||
| 275 | // Clean up the thread's wait_objects, they'll be restored if needed during | ||
| 276 | // the svcWaitSynchronization call | ||
| 277 | for (size_t i = 0; i < new_thread->wait_objects.size(); ++i) { | ||
| 278 | SharedPtr<WaitObject> object = new_thread->wait_objects[i]; | ||
| 279 | object->RemoveWaitingThread(new_thread); | ||
| 280 | } | ||
| 281 | new_thread->wait_objects.clear(); | ||
| 282 | |||
| 283 | ready_queue.remove(new_thread->current_priority, new_thread); | 211 | ready_queue.remove(new_thread->current_priority, new_thread); |
| 284 | new_thread->status = THREADSTATUS_RUNNING; | 212 | new_thread->status = THREADSTATUS_RUNNING; |
| 285 | 213 | ||
| @@ -319,17 +247,13 @@ static Thread* PopNextReadyThread() { | |||
| 319 | void WaitCurrentThread_Sleep() { | 247 | void WaitCurrentThread_Sleep() { |
| 320 | Thread* thread = GetCurrentThread(); | 248 | Thread* thread = GetCurrentThread(); |
| 321 | thread->status = THREADSTATUS_WAIT_SLEEP; | 249 | thread->status = THREADSTATUS_WAIT_SLEEP; |
| 322 | |||
| 323 | HLE::Reschedule(__func__); | ||
| 324 | } | 250 | } |
| 325 | 251 | ||
| 326 | void WaitCurrentThread_WaitSynchronization(std::vector<SharedPtr<WaitObject>> wait_objects, | 252 | void WaitCurrentThread_WaitSynchronization(std::vector<SharedPtr<WaitObject>> wait_objects, |
| 327 | bool wait_set_output, bool wait_all) { | 253 | bool wait_set_output) { |
| 328 | Thread* thread = GetCurrentThread(); | 254 | Thread* thread = GetCurrentThread(); |
| 329 | thread->wait_set_output = wait_set_output; | 255 | thread->wait_set_output = wait_set_output; |
| 330 | thread->wait_all = wait_all; | ||
| 331 | thread->wait_objects = std::move(wait_objects); | 256 | thread->wait_objects = std::move(wait_objects); |
| 332 | thread->waitsynch_waited = true; | ||
| 333 | thread->status = THREADSTATUS_WAIT_SYNCH; | 257 | thread->status = THREADSTATUS_WAIT_SYNCH; |
| 334 | } | 258 | } |
| 335 | 259 | ||
| @@ -351,15 +275,15 @@ static void ThreadWakeupCallback(u64 thread_handle, int cycles_late) { | |||
| 351 | return; | 275 | return; |
| 352 | } | 276 | } |
| 353 | 277 | ||
| 354 | thread->waitsynch_waited = false; | ||
| 355 | |||
| 356 | if (thread->status == THREADSTATUS_WAIT_SYNCH || thread->status == THREADSTATUS_WAIT_ARB) { | 278 | if (thread->status == THREADSTATUS_WAIT_SYNCH || thread->status == THREADSTATUS_WAIT_ARB) { |
| 279 | thread->wait_set_output = false; | ||
| 280 | // Remove the thread from each of its waiting objects' waitlists | ||
| 281 | for (auto& object : thread->wait_objects) | ||
| 282 | object->RemoveWaitingThread(thread.get()); | ||
| 283 | thread->wait_objects.clear(); | ||
| 357 | thread->SetWaitSynchronizationResult(ResultCode(ErrorDescription::Timeout, ErrorModule::OS, | 284 | thread->SetWaitSynchronizationResult(ResultCode(ErrorDescription::Timeout, ErrorModule::OS, |
| 358 | ErrorSummary::StatusChanged, | 285 | ErrorSummary::StatusChanged, |
| 359 | ErrorLevel::Info)); | 286 | ErrorLevel::Info)); |
| 360 | |||
| 361 | if (thread->wait_set_output) | ||
| 362 | thread->SetWaitSynchronizationOutput(-1); | ||
| 363 | } | 287 | } |
| 364 | 288 | ||
| 365 | thread->ResumeFromWait(); | 289 | thread->ResumeFromWait(); |
| @@ -399,6 +323,7 @@ void Thread::ResumeFromWait() { | |||
| 399 | 323 | ||
| 400 | ready_queue.push_back(current_priority, this); | 324 | ready_queue.push_back(current_priority, this); |
| 401 | status = THREADSTATUS_READY; | 325 | status = THREADSTATUS_READY; |
| 326 | HLE::Reschedule(__func__); | ||
| 402 | } | 327 | } |
| 403 | 328 | ||
| 404 | /** | 329 | /** |
| @@ -494,13 +419,11 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point, | |||
| 494 | thread->last_running_ticks = CoreTiming::GetTicks(); | 419 | thread->last_running_ticks = CoreTiming::GetTicks(); |
| 495 | thread->processor_id = processor_id; | 420 | thread->processor_id = processor_id; |
| 496 | thread->wait_set_output = false; | 421 | thread->wait_set_output = false; |
| 497 | thread->wait_all = false; | ||
| 498 | thread->wait_objects.clear(); | 422 | thread->wait_objects.clear(); |
| 499 | thread->wait_address = 0; | 423 | thread->wait_address = 0; |
| 500 | thread->name = std::move(name); | 424 | thread->name = std::move(name); |
| 501 | thread->callback_handle = wakeup_callback_handle_table.Create(thread).MoveFrom(); | 425 | thread->callback_handle = wakeup_callback_handle_table.Create(thread).MoveFrom(); |
| 502 | thread->owner_process = g_current_process; | 426 | thread->owner_process = g_current_process; |
| 503 | thread->waitsynch_waited = false; | ||
| 504 | 427 | ||
| 505 | // Find the next available TLS index, and mark it as used | 428 | // Find the next available TLS index, and mark it as used |
| 506 | auto& tls_slots = Kernel::g_current_process->tls_slots; | 429 | auto& tls_slots = Kernel::g_current_process->tls_slots; |
| @@ -555,8 +478,6 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point, | |||
| 555 | ready_queue.push_back(thread->current_priority, thread.get()); | 478 | ready_queue.push_back(thread->current_priority, thread.get()); |
| 556 | thread->status = THREADSTATUS_READY; | 479 | thread->status = THREADSTATUS_READY; |
| 557 | 480 | ||
| 558 | HLE::Reschedule(__func__); | ||
| 559 | |||
| 560 | return MakeResult<SharedPtr<Thread>>(std::move(thread)); | 481 | return MakeResult<SharedPtr<Thread>>(std::move(thread)); |
| 561 | } | 482 | } |
| 562 | 483 | ||
| @@ -619,14 +540,6 @@ void Reschedule() { | |||
| 619 | 540 | ||
| 620 | HLE::DoneRescheduling(); | 541 | HLE::DoneRescheduling(); |
| 621 | 542 | ||
| 622 | // Don't bother switching to the same thread. | ||
| 623 | // But if the thread was waiting on objects, we still need to switch it | ||
| 624 | // to perform PC modification, change state to RUNNING, etc. | ||
| 625 | // This occurs in the case when an object the thread is waiting on immediately wakes up | ||
| 626 | // the current thread before Reschedule() is called. | ||
| 627 | if (next == cur && (next == nullptr || next->waitsynch_waited == false)) | ||
| 628 | return; | ||
| 629 | |||
| 630 | if (cur && next) { | 543 | if (cur && next) { |
| 631 | LOG_TRACE(Kernel, "context switch %u -> %u", cur->GetObjectId(), next->GetObjectId()); | 544 | LOG_TRACE(Kernel, "context switch %u -> %u", cur->GetObjectId(), next->GetObjectId()); |
| 632 | } else if (cur) { | 545 | } else if (cur) { |
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index e0ffcea8a..238359fc5 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h | |||
| @@ -5,7 +5,9 @@ | |||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <string> | 7 | #include <string> |
| 8 | #include <unordered_map> | ||
| 8 | #include <vector> | 9 | #include <vector> |
| 10 | #include <boost/container/flat_map.hpp> | ||
| 9 | #include <boost/container/flat_set.hpp> | 11 | #include <boost/container/flat_set.hpp> |
| 10 | #include "common/common_types.h" | 12 | #include "common/common_types.h" |
| 11 | #include "core/core.h" | 13 | #include "core/core.h" |
| @@ -125,6 +127,16 @@ public: | |||
| 125 | void SetWaitSynchronizationOutput(s32 output); | 127 | void SetWaitSynchronizationOutput(s32 output); |
| 126 | 128 | ||
| 127 | /** | 129 | /** |
| 130 | * Retrieves the index that this particular object occupies in the list of objects | ||
| 131 | * that the thread passed to WaitSynchronizationN. | ||
| 132 | * It is used to set the output value of WaitSynchronizationN when the thread is awakened. | ||
| 133 | * @param object Object to query the index of. | ||
| 134 | */ | ||
| 135 | s32 GetWaitObjectIndex(const WaitObject* object) const { | ||
| 136 | return wait_objects_index.at(object->GetObjectId()); | ||
| 137 | } | ||
| 138 | |||
| 139 | /** | ||
| 128 | * Stops a thread, invalidating it from further use | 140 | * Stops a thread, invalidating it from further use |
| 129 | */ | 141 | */ |
| 130 | void Stop(); | 142 | void Stop(); |
| @@ -137,6 +149,15 @@ public: | |||
| 137 | return tls_address; | 149 | return tls_address; |
| 138 | } | 150 | } |
| 139 | 151 | ||
| 152 | /** | ||
| 153 | * Returns whether this thread is waiting for all the objects in | ||
| 154 | * its wait list to become ready, as a result of a WaitSynchronizationN call | ||
| 155 | * with wait_all = true, or a ReplyAndReceive call. | ||
| 156 | */ | ||
| 157 | bool IsSleepingOnWaitAll() const { | ||
| 158 | return !wait_objects.empty(); | ||
| 159 | } | ||
| 160 | |||
| 140 | Core::ThreadContext context; | 161 | Core::ThreadContext context; |
| 141 | 162 | ||
| 142 | u32 thread_id; | 163 | u32 thread_id; |
| @@ -154,16 +175,22 @@ public: | |||
| 154 | 175 | ||
| 155 | VAddr tls_address; ///< Virtual address of the Thread Local Storage of the thread | 176 | VAddr tls_address; ///< Virtual address of the Thread Local Storage of the thread |
| 156 | 177 | ||
| 157 | bool waitsynch_waited; ///< Set to true if the last svcWaitSynch call caused the thread to wait | ||
| 158 | |||
| 159 | /// Mutexes currently held by this thread, which will be released when it exits. | 178 | /// Mutexes currently held by this thread, which will be released when it exits. |
| 160 | boost::container::flat_set<SharedPtr<Mutex>> held_mutexes; | 179 | boost::container::flat_set<SharedPtr<Mutex>> held_mutexes; |
| 161 | 180 | ||
| 162 | SharedPtr<Process> owner_process; ///< Process that owns this thread | 181 | SharedPtr<Process> owner_process; ///< Process that owns this thread |
| 163 | std::vector<SharedPtr<WaitObject>> wait_objects; ///< Objects that the thread is waiting on | 182 | |
| 164 | VAddr wait_address; ///< If waiting on an AddressArbiter, this is the arbitration address | 183 | /// Objects that the thread is waiting on. |
| 165 | bool wait_all; ///< True if the thread is waiting on all objects before resuming | 184 | /// This is only populated when the thread should wait for all the objects to become ready. |
| 166 | bool wait_set_output; ///< True if the output parameter should be set on thread wakeup | 185 | std::vector<SharedPtr<WaitObject>> wait_objects; |
| 186 | |||
| 187 | /// Mapping of Object ids to their position in the last waitlist that this object waited on. | ||
| 188 | boost::container::flat_map<int, s32> wait_objects_index; | ||
| 189 | |||
| 190 | VAddr wait_address; ///< If waiting on an AddressArbiter, this is the arbitration address | ||
| 191 | |||
| 192 | /// True if the WaitSynchronizationN output parameter should be set on thread wakeup. | ||
| 193 | bool wait_set_output; | ||
| 167 | 194 | ||
| 168 | std::string name; | 195 | std::string name; |
| 169 | 196 | ||
| @@ -215,10 +242,9 @@ void WaitCurrentThread_Sleep(); | |||
| 215 | * @param wait_objects Kernel objects that we are waiting on | 242 | * @param wait_objects Kernel objects that we are waiting on |
| 216 | * @param wait_set_output If true, set the output parameter on thread wakeup (for | 243 | * @param wait_set_output If true, set the output parameter on thread wakeup (for |
| 217 | * WaitSynchronizationN only) | 244 | * WaitSynchronizationN only) |
| 218 | * @param wait_all If true, wait on all objects before resuming (for WaitSynchronizationN only) | ||
| 219 | */ | 245 | */ |
| 220 | void WaitCurrentThread_WaitSynchronization(std::vector<SharedPtr<WaitObject>> wait_objects, | 246 | void WaitCurrentThread_WaitSynchronization(std::vector<SharedPtr<WaitObject>> wait_objects, |
| 221 | bool wait_set_output, bool wait_all); | 247 | bool wait_set_output); |
| 222 | 248 | ||
| 223 | /** | 249 | /** |
| 224 | * Waits the current thread from an ArbitrateAddress call | 250 | * Waits the current thread from an ArbitrateAddress call |
diff --git a/src/core/hle/kernel/timer.cpp b/src/core/hle/kernel/timer.cpp index eac181f4e..b50cf520d 100644 --- a/src/core/hle/kernel/timer.cpp +++ b/src/core/hle/kernel/timer.cpp | |||
| @@ -60,14 +60,10 @@ void Timer::Set(s64 initial, s64 interval) { | |||
| 60 | u64 initial_microseconds = initial / 1000; | 60 | u64 initial_microseconds = initial / 1000; |
| 61 | CoreTiming::ScheduleEvent(usToCycles(initial_microseconds), timer_callback_event_type, | 61 | CoreTiming::ScheduleEvent(usToCycles(initial_microseconds), timer_callback_event_type, |
| 62 | callback_handle); | 62 | callback_handle); |
| 63 | |||
| 64 | HLE::Reschedule(__func__); | ||
| 65 | } | 63 | } |
| 66 | 64 | ||
| 67 | void Timer::Cancel() { | 65 | void Timer::Cancel() { |
| 68 | CoreTiming::UnscheduleEvent(timer_callback_event_type, callback_handle); | 66 | CoreTiming::UnscheduleEvent(timer_callback_event_type, callback_handle); |
| 69 | |||
| 70 | HLE::Reschedule(__func__); | ||
| 71 | } | 67 | } |
| 72 | 68 | ||
| 73 | void Timer::Clear() { | 69 | void Timer::Clear() { |
diff --git a/src/core/hle/svc.cpp b/src/core/hle/svc.cpp index e5ba9a484..ef25acc4a 100644 --- a/src/core/hle/svc.cpp +++ b/src/core/hle/svc.cpp | |||
| @@ -43,6 +43,9 @@ const ResultCode ERR_PORT_NAME_TOO_LONG(ErrorDescription(30), ErrorModule::OS, | |||
| 43 | ErrorSummary::InvalidArgument, | 43 | ErrorSummary::InvalidArgument, |
| 44 | ErrorLevel::Usage); // 0xE0E0181E | 44 | ErrorLevel::Usage); // 0xE0E0181E |
| 45 | 45 | ||
| 46 | const ResultCode ERR_SYNC_TIMEOUT(ErrorDescription::Timeout, ErrorModule::OS, | ||
| 47 | ErrorSummary::StatusChanged, ErrorLevel::Info); | ||
| 48 | |||
| 46 | const ResultCode ERR_MISALIGNED_ADDRESS{// 0xE0E01BF1 | 49 | const ResultCode ERR_MISALIGNED_ADDRESS{// 0xE0E01BF1 |
| 47 | ErrorDescription::MisalignedAddress, ErrorModule::OS, | 50 | ErrorDescription::MisalignedAddress, ErrorModule::OS, |
| 48 | ErrorSummary::InvalidArgument, ErrorLevel::Usage}; | 51 | ErrorSummary::InvalidArgument, ErrorLevel::Usage}; |
| @@ -260,27 +263,30 @@ static ResultCode WaitSynchronization1(Handle handle, s64 nano_seconds) { | |||
| 260 | auto object = Kernel::g_handle_table.GetWaitObject(handle); | 263 | auto object = Kernel::g_handle_table.GetWaitObject(handle); |
| 261 | Kernel::Thread* thread = Kernel::GetCurrentThread(); | 264 | Kernel::Thread* thread = Kernel::GetCurrentThread(); |
| 262 | 265 | ||
| 263 | thread->waitsynch_waited = false; | ||
| 264 | |||
| 265 | if (object == nullptr) | 266 | if (object == nullptr) |
| 266 | return ERR_INVALID_HANDLE; | 267 | return ERR_INVALID_HANDLE; |
| 267 | 268 | ||
| 268 | LOG_TRACE(Kernel_SVC, "called handle=0x%08X(%s:%s), nanoseconds=%lld", handle, | 269 | LOG_TRACE(Kernel_SVC, "called handle=0x%08X(%s:%s), nanoseconds=%lld", handle, |
| 269 | object->GetTypeName().c_str(), object->GetName().c_str(), nano_seconds); | 270 | object->GetTypeName().c_str(), object->GetName().c_str(), nano_seconds); |
| 270 | 271 | ||
| 271 | HLE::Reschedule(__func__); | ||
| 272 | |||
| 273 | // Check for next thread to schedule | ||
| 274 | if (object->ShouldWait()) { | 272 | if (object->ShouldWait()) { |
| 275 | 273 | ||
| 274 | if (nano_seconds == 0) | ||
| 275 | return ERR_SYNC_TIMEOUT; | ||
| 276 | |||
| 276 | object->AddWaitingThread(thread); | 277 | object->AddWaitingThread(thread); |
| 277 | Kernel::WaitCurrentThread_WaitSynchronization({object}, false, false); | 278 | // TODO(Subv): Perform things like update the mutex lock owner's priority to |
| 279 | // prevent priority inversion. Currently this is done in Mutex::ShouldWait, | ||
| 280 | // but it should be moved to a function that is called from here. | ||
| 281 | thread->status = THREADSTATUS_WAIT_SYNCH; | ||
| 278 | 282 | ||
| 279 | // Create an event to wake the thread up after the specified nanosecond delay has passed | 283 | // Create an event to wake the thread up after the specified nanosecond delay has passed |
| 280 | thread->WakeAfterDelay(nano_seconds); | 284 | thread->WakeAfterDelay(nano_seconds); |
| 281 | 285 | ||
| 282 | // NOTE: output of this SVC will be set later depending on how the thread resumes | 286 | // Note: The output of this SVC will be set to RESULT_SUCCESS if the thread |
| 283 | return HLE::RESULT_INVALID; | 287 | // resumes due to a signal in its wait objects. |
| 288 | // Otherwise we retain the default value of timeout. | ||
| 289 | return ERR_SYNC_TIMEOUT; | ||
| 284 | } | 290 | } |
| 285 | 291 | ||
| 286 | object->Acquire(); | 292 | object->Acquire(); |
| @@ -291,11 +297,7 @@ static ResultCode WaitSynchronization1(Handle handle, s64 nano_seconds) { | |||
| 291 | /// Wait for the given handles to synchronize, timeout after the specified nanoseconds | 297 | /// Wait for the given handles to synchronize, timeout after the specified nanoseconds |
| 292 | static ResultCode WaitSynchronizationN(s32* out, Handle* handles, s32 handle_count, bool wait_all, | 298 | static ResultCode WaitSynchronizationN(s32* out, Handle* handles, s32 handle_count, bool wait_all, |
| 293 | s64 nano_seconds) { | 299 | s64 nano_seconds) { |
| 294 | bool wait_thread = !wait_all; | ||
| 295 | int handle_index = 0; | ||
| 296 | Kernel::Thread* thread = Kernel::GetCurrentThread(); | 300 | Kernel::Thread* thread = Kernel::GetCurrentThread(); |
| 297 | bool was_waiting = thread->waitsynch_waited; | ||
| 298 | thread->waitsynch_waited = false; | ||
| 299 | 301 | ||
| 300 | // Check if 'handles' is invalid | 302 | // Check if 'handles' is invalid |
| 301 | if (handles == nullptr) | 303 | if (handles == nullptr) |
| @@ -311,90 +313,113 @@ static ResultCode WaitSynchronizationN(s32* out, Handle* handles, s32 handle_cou | |||
| 311 | return ResultCode(ErrorDescription::OutOfRange, ErrorModule::OS, | 313 | return ResultCode(ErrorDescription::OutOfRange, ErrorModule::OS, |
| 312 | ErrorSummary::InvalidArgument, ErrorLevel::Usage); | 314 | ErrorSummary::InvalidArgument, ErrorLevel::Usage); |
| 313 | 315 | ||
| 314 | // If 'handle_count' is non-zero, iterate through each handle and wait the current thread if | 316 | using ObjectPtr = Kernel::SharedPtr<Kernel::WaitObject>; |
| 315 | // necessary | 317 | std::vector<ObjectPtr> objects(handle_count); |
| 316 | if (handle_count != 0) { | 318 | |
| 317 | bool selected = false; // True once an object has been selected | 319 | for (int i = 0; i < handle_count; ++i) { |
| 318 | 320 | auto object = Kernel::g_handle_table.GetWaitObject(handles[i]); | |
| 319 | Kernel::SharedPtr<Kernel::WaitObject> wait_object; | 321 | if (object == nullptr) |
| 320 | 322 | return ERR_INVALID_HANDLE; | |
| 321 | for (int i = 0; i < handle_count; ++i) { | 323 | objects[i] = object; |
| 322 | auto object = Kernel::g_handle_table.GetWaitObject(handles[i]); | ||
| 323 | if (object == nullptr) | ||
| 324 | return ERR_INVALID_HANDLE; | ||
| 325 | |||
| 326 | // Check if the current thread should wait on this object... | ||
| 327 | if (object->ShouldWait()) { | ||
| 328 | |||
| 329 | // Check we are waiting on all objects... | ||
| 330 | if (wait_all) | ||
| 331 | // Wait the thread | ||
| 332 | wait_thread = true; | ||
| 333 | } else { | ||
| 334 | // Do not wait on this object, check if this object should be selected... | ||
| 335 | if (!wait_all && (!selected || (wait_object == object && was_waiting))) { | ||
| 336 | // Do not wait the thread | ||
| 337 | wait_thread = false; | ||
| 338 | handle_index = i; | ||
| 339 | wait_object = object; | ||
| 340 | selected = true; | ||
| 341 | } | ||
| 342 | } | ||
| 343 | } | ||
| 344 | } else { | ||
| 345 | // If no handles were passed in, put the thread to sleep only when 'wait_all' is false | ||
| 346 | // NOTE: This should deadlock the current thread if no timeout was specified | ||
| 347 | if (!wait_all) { | ||
| 348 | wait_thread = true; | ||
| 349 | } | ||
| 350 | } | 324 | } |
| 351 | 325 | ||
| 352 | SCOPE_EXIT({ | 326 | // Clear the mapping of wait object indices. |
| 353 | HLE::Reschedule("WaitSynchronizationN"); | 327 | // We don't want any lingering state in this map. |
| 354 | }); // Reschedule after putting the threads to sleep. | 328 | // It will be repopulated later in the wait_all = false case. |
| 329 | thread->wait_objects_index.clear(); | ||
| 330 | |||
| 331 | if (wait_all) { | ||
| 332 | bool all_available = | ||
| 333 | std::all_of(objects.begin(), objects.end(), | ||
| 334 | [](const ObjectPtr& object) { return !object->ShouldWait(); }); | ||
| 335 | if (all_available) { | ||
| 336 | // We can acquire all objects right now, do so. | ||
| 337 | for (auto& object : objects) | ||
| 338 | object->Acquire(); | ||
| 339 | // Note: In this case, the `out` parameter is not set, | ||
| 340 | // and retains whatever value it had before. | ||
| 341 | return RESULT_SUCCESS; | ||
| 342 | } | ||
| 343 | |||
| 344 | // Not all objects were available right now, prepare to suspend the thread. | ||
| 355 | 345 | ||
| 356 | // If thread should wait, then set its state to waiting | 346 | // If a timeout value of 0 was provided, just return the Timeout error code instead of |
| 357 | if (wait_thread) { | 347 | // suspending the thread. |
| 348 | if (nano_seconds == 0) | ||
| 349 | return ERR_SYNC_TIMEOUT; | ||
| 358 | 350 | ||
| 359 | // Actually wait the current thread on each object if we decided to wait... | 351 | // Put the thread to sleep |
| 360 | std::vector<SharedPtr<Kernel::WaitObject>> wait_objects; | 352 | thread->status = THREADSTATUS_WAIT_SYNCH; |
| 361 | wait_objects.reserve(handle_count); | ||
| 362 | 353 | ||
| 363 | for (int i = 0; i < handle_count; ++i) { | 354 | // Add the thread to each of the objects' waiting threads. |
| 364 | auto object = Kernel::g_handle_table.GetWaitObject(handles[i]); | 355 | for (auto& object : objects) { |
| 365 | object->AddWaitingThread(Kernel::GetCurrentThread()); | 356 | object->AddWaitingThread(thread); |
| 366 | wait_objects.push_back(object); | 357 | // TODO(Subv): Perform things like update the mutex lock owner's priority to |
| 358 | // prevent priority inversion. Currently this is done in Mutex::ShouldWait, | ||
| 359 | // but it should be moved to a function that is called from here. | ||
| 367 | } | 360 | } |
| 368 | 361 | ||
| 369 | Kernel::WaitCurrentThread_WaitSynchronization(std::move(wait_objects), true, wait_all); | 362 | // Set the thread's waitlist to the list of objects passed to WaitSynchronizationN |
| 363 | thread->wait_objects = std::move(objects); | ||
| 370 | 364 | ||
| 371 | // Create an event to wake the thread up after the specified nanosecond delay has passed | 365 | // Create an event to wake the thread up after the specified nanosecond delay has passed |
| 372 | Kernel::GetCurrentThread()->WakeAfterDelay(nano_seconds); | 366 | thread->WakeAfterDelay(nano_seconds); |
| 373 | |||
| 374 | // NOTE: output of this SVC will be set later depending on how the thread resumes | ||
| 375 | return HLE::RESULT_INVALID; | ||
| 376 | } | ||
| 377 | 367 | ||
| 378 | // Acquire objects if we did not wait... | 368 | // This value gets set to -1 by default in this case, it is not modified after this. |
| 379 | for (int i = 0; i < handle_count; ++i) { | 369 | *out = -1; |
| 380 | auto object = Kernel::g_handle_table.GetWaitObject(handles[i]); | 370 | // Note: The output of this SVC will be set to RESULT_SUCCESS if the thread resumes due to |
| 371 | // a signal in one of its wait objects. | ||
| 372 | return ERR_SYNC_TIMEOUT; | ||
| 373 | } else { | ||
| 374 | // Find the first object that is acquirable in the provided list of objects | ||
| 375 | auto itr = std::find_if(objects.begin(), objects.end(), | ||
| 376 | [](const ObjectPtr& object) { return !object->ShouldWait(); }); | ||
| 381 | 377 | ||
| 382 | // Acquire the object if it is not waiting... | 378 | if (itr != objects.end()) { |
| 383 | if (!object->ShouldWait()) { | 379 | // We found a ready object, acquire it and set the result value |
| 380 | Kernel::WaitObject* object = itr->get(); | ||
| 384 | object->Acquire(); | 381 | object->Acquire(); |
| 382 | *out = std::distance(objects.begin(), itr); | ||
| 383 | return RESULT_SUCCESS; | ||
| 384 | } | ||
| 385 | |||
| 386 | // No objects were ready to be acquired, prepare to suspend the thread. | ||
| 387 | |||
| 388 | // If a timeout value of 0 was provided, just return the Timeout error code instead of | ||
| 389 | // suspending the thread. | ||
| 390 | if (nano_seconds == 0) | ||
| 391 | return ERR_SYNC_TIMEOUT; | ||
| 392 | |||
| 393 | // Put the thread to sleep | ||
| 394 | thread->status = THREADSTATUS_WAIT_SYNCH; | ||
| 385 | 395 | ||
| 386 | // If this was the first non-waiting object and 'wait_all' is false, don't acquire | 396 | // Clear the thread's waitlist, we won't use it for wait_all = false |
| 387 | // any other objects | 397 | thread->wait_objects.clear(); |
| 388 | if (!wait_all) | 398 | |
| 389 | break; | 399 | // Add the thread to each of the objects' waiting threads. |
| 400 | for (size_t i = 0; i < objects.size(); ++i) { | ||
| 401 | Kernel::WaitObject* object = objects[i].get(); | ||
| 402 | // Set the index of this object in the mapping of Objects -> index for this thread. | ||
| 403 | thread->wait_objects_index[object->GetObjectId()] = static_cast<int>(i); | ||
| 404 | object->AddWaitingThread(thread); | ||
| 405 | // TODO(Subv): Perform things like update the mutex lock owner's priority to | ||
| 406 | // prevent priority inversion. Currently this is done in Mutex::ShouldWait, | ||
| 407 | // but it should be moved to a function that is called from here. | ||
| 390 | } | 408 | } |
| 391 | } | ||
| 392 | 409 | ||
| 393 | // TODO(bunnei): If 'wait_all' is true, this is probably wrong. However, real hardware does | 410 | // Note: If no handles and no timeout were given, then the thread will deadlock, this is |
| 394 | // not seem to set it to any meaningful value. | 411 | // consistent with hardware behavior. |
| 395 | *out = handle_count != 0 ? (wait_all ? -1 : handle_index) : 0; | ||
| 396 | 412 | ||
| 397 | return RESULT_SUCCESS; | 413 | // Create an event to wake the thread up after the specified nanosecond delay has passed |
| 414 | thread->WakeAfterDelay(nano_seconds); | ||
| 415 | |||
| 416 | // Note: The output of this SVC will be set to RESULT_SUCCESS if the thread resumes due to a | ||
| 417 | // signal in one of its wait objects. | ||
| 418 | // Otherwise we retain the default value of timeout, and -1 in the out parameter | ||
| 419 | thread->wait_set_output = true; | ||
| 420 | *out = -1; | ||
| 421 | return ERR_SYNC_TIMEOUT; | ||
| 422 | } | ||
| 398 | } | 423 | } |
| 399 | 424 | ||
| 400 | /// Create an address arbiter (to allocate access to shared resources) | 425 | /// Create an address arbiter (to allocate access to shared resources) |
| @@ -1159,6 +1184,8 @@ void CallSVC(u32 immediate) { | |||
| 1159 | if (info) { | 1184 | if (info) { |
| 1160 | if (info->func) { | 1185 | if (info->func) { |
| 1161 | info->func(); | 1186 | info->func(); |
| 1187 | // TODO(Subv): Not all service functions should cause a reschedule in all cases. | ||
| 1188 | HLE::Reschedule(__func__); | ||
| 1162 | } else { | 1189 | } else { |
| 1163 | LOG_ERROR(Kernel_SVC, "unimplemented SVC function %s(..)", info->name); | 1190 | LOG_ERROR(Kernel_SVC, "unimplemented SVC function %s(..)", info->name); |
| 1164 | } | 1191 | } |