summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/hle/kernel/global_scheduler_context.cpp22
-rw-r--r--src/core/hle/kernel/global_scheduler_context.h8
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp26
-rw-r--r--src/core/hle/kernel/k_thread.cpp38
-rw-r--r--src/core/hle/kernel/k_thread.h8
5 files changed, 76 insertions, 26 deletions
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
index 65576b8c4..fd911a3a5 100644
--- a/src/core/hle/kernel/global_scheduler_context.cpp
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -49,4 +49,26 @@ bool GlobalSchedulerContext::IsLocked() const {
49 return scheduler_lock.IsLockedByCurrentThread(); 49 return scheduler_lock.IsLockedByCurrentThread();
50} 50}
51 51
52void GlobalSchedulerContext::RegisterDummyThreadForWakeup(KThread* thread) {
53 ASSERT(IsLocked());
54
55 woken_dummy_threads.insert(thread);
56}
57
58void GlobalSchedulerContext::UnregisterDummyThreadForWakeup(KThread* thread) {
59 ASSERT(IsLocked());
60
61 woken_dummy_threads.erase(thread);
62}
63
64void GlobalSchedulerContext::WakeupWaitingDummyThreads() {
65 ASSERT(IsLocked());
66
67 for (auto* thread : woken_dummy_threads) {
68 thread->DummyThreadEndWait();
69 }
70
71 woken_dummy_threads.clear();
72}
73
52} // namespace Kernel 74} // namespace Kernel
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
index 67bb9852d..220ed6192 100644
--- a/src/core/hle/kernel/global_scheduler_context.h
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -4,6 +4,7 @@
4#pragma once 4#pragma once
5 5
6#include <atomic> 6#include <atomic>
7#include <set>
7#include <vector> 8#include <vector>
8 9
9#include "common/common_types.h" 10#include "common/common_types.h"
@@ -58,6 +59,10 @@ public:
58 /// Returns true if the global scheduler lock is acquired 59 /// Returns true if the global scheduler lock is acquired
59 bool IsLocked() const; 60 bool IsLocked() const;
60 61
62 void UnregisterDummyThreadForWakeup(KThread* thread);
63 void RegisterDummyThreadForWakeup(KThread* thread);
64 void WakeupWaitingDummyThreads();
65
61 [[nodiscard]] LockType& SchedulerLock() { 66 [[nodiscard]] LockType& SchedulerLock() {
62 return scheduler_lock; 67 return scheduler_lock;
63 } 68 }
@@ -76,6 +81,9 @@ private:
76 KSchedulerPriorityQueue priority_queue; 81 KSchedulerPriorityQueue priority_queue;
77 LockType scheduler_lock; 82 LockType scheduler_lock;
78 83
84 /// Lists dummy threads pending wakeup on lock release
85 std::set<KThread*> woken_dummy_threads;
86
79 /// Lists all thread ids that aren't deleted/etc. 87 /// Lists all thread ids that aren't deleted/etc.
80 std::vector<KThread*> thread_list; 88 std::vector<KThread*> thread_list;
81 std::mutex global_list_guard; 89 std::mutex global_list_guard;
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index c34ce7a17..b1cabbca0 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -81,8 +81,8 @@ void KScheduler::RescheduleCurrentHLEThread(KernelCore& kernel) {
81 // HACK: we cannot schedule from this thread, it is not a core thread 81 // HACK: we cannot schedule from this thread, it is not a core thread
82 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1); 82 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
83 83
84 // Special case to ensure dummy threads that are waiting block 84 // Ensure dummy threads that are waiting block.
85 GetCurrentThread(kernel).IfDummyThreadTryWait(); 85 GetCurrentThread(kernel).DummyThreadBeginWait();
86 86
87 ASSERT(GetCurrentThread(kernel).GetState() != ThreadState::Waiting); 87 ASSERT(GetCurrentThread(kernel).GetState() != ThreadState::Waiting);
88 GetCurrentThread(kernel).EnableDispatch(); 88 GetCurrentThread(kernel).EnableDispatch();
@@ -314,6 +314,16 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
314 idle_cores &= ~(1ULL << core_id); 314 idle_cores &= ~(1ULL << core_id);
315 } 315 }
316 316
317 // HACK: any waiting dummy threads can wake up now.
318 kernel.GlobalSchedulerContext().WakeupWaitingDummyThreads();
319
320 // HACK: if we are a dummy thread, and we need to go sleep, indicate
321 // that for when the lock is released.
322 KThread* const cur_thread = GetCurrentThreadPointer(kernel);
323 if (cur_thread->IsDummyThread() && cur_thread->GetState() != ThreadState::Runnable) {
324 cur_thread->RequestDummyThreadWait();
325 }
326
317 return cores_needing_scheduling; 327 return cores_needing_scheduling;
318} 328}
319 329
@@ -531,11 +541,23 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, Threa
531 GetPriorityQueue(kernel).Remove(thread); 541 GetPriorityQueue(kernel).Remove(thread);
532 IncrementScheduledCount(thread); 542 IncrementScheduledCount(thread);
533 SetSchedulerUpdateNeeded(kernel); 543 SetSchedulerUpdateNeeded(kernel);
544
545 if (thread->IsDummyThread()) {
546 // HACK: if this is a dummy thread, it should no longer wake up when the
547 // scheduler lock is released.
548 kernel.GlobalSchedulerContext().UnregisterDummyThreadForWakeup(thread);
549 }
534 } else if (cur_state == ThreadState::Runnable) { 550 } else if (cur_state == ThreadState::Runnable) {
535 // If we're now runnable, then we weren't previously, and we should add. 551 // If we're now runnable, then we weren't previously, and we should add.
536 GetPriorityQueue(kernel).PushBack(thread); 552 GetPriorityQueue(kernel).PushBack(thread);
537 IncrementScheduledCount(thread); 553 IncrementScheduledCount(thread);
538 SetSchedulerUpdateNeeded(kernel); 554 SetSchedulerUpdateNeeded(kernel);
555
556 if (thread->IsDummyThread()) {
557 // HACK: if this is a dummy thread, it should wake up when the scheduler
558 // lock is released.
559 kernel.GlobalSchedulerContext().RegisterDummyThreadForWakeup(thread);
560 }
539 } 561 }
540} 562}
541 563
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index b7bfcdce3..d57b42fdf 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -148,7 +148,9 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
148 physical_affinity_mask.SetAffinity(phys_core, true); 148 physical_affinity_mask.SetAffinity(phys_core, true);
149 149
150 // Set the thread state. 150 // Set the thread state.
151 thread_state = (type == ThreadType::Main) ? ThreadState::Runnable : ThreadState::Initialized; 151 thread_state = (type == ThreadType::Main || type == ThreadType::Dummy)
152 ? ThreadState::Runnable
153 : ThreadState::Initialized;
152 154
153 // Set TLS address. 155 // Set TLS address.
154 tls_address = 0; 156 tls_address = 0;
@@ -1174,30 +1176,29 @@ Result KThread::Sleep(s64 timeout) {
1174 R_SUCCEED(); 1176 R_SUCCEED();
1175} 1177}
1176 1178
1177void KThread::IfDummyThreadTryWait() { 1179void KThread::RequestDummyThreadWait() {
1178 if (!IsDummyThread()) { 1180 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
1179 return; 1181 ASSERT(this->IsDummyThread());
1180 }
1181 1182
1182 if (GetState() != ThreadState::Waiting) { 1183 // We will block when the scheduler lock is released.
1183 return; 1184 dummy_thread_runnable.store(false);
1184 } 1185}
1185 1186
1187void KThread::DummyThreadBeginWait() {
1188 ASSERT(this->IsDummyThread());
1186 ASSERT(!kernel.IsPhantomModeForSingleCore()); 1189 ASSERT(!kernel.IsPhantomModeForSingleCore());
1187 1190
1188 // Block until we are no longer waiting. 1191 // Block until runnable is no longer false.
1189 std::unique_lock lk(dummy_wait_lock); 1192 dummy_thread_runnable.wait(false);
1190 dummy_wait_cv.wait(
1191 lk, [&] { return GetState() != ThreadState::Waiting || kernel.IsShuttingDown(); });
1192} 1193}
1193 1194
1194void KThread::IfDummyThreadEndWait() { 1195void KThread::DummyThreadEndWait() {
1195 if (!IsDummyThread()) { 1196 ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
1196 return; 1197 ASSERT(this->IsDummyThread());
1197 }
1198 1198
1199 // Wake up the waiting thread. 1199 // Wake up the waiting thread.
1200 dummy_wait_cv.notify_one(); 1200 dummy_thread_runnable.store(true);
1201 dummy_thread_runnable.notify_one();
1201} 1202}
1202 1203
1203void KThread::BeginWait(KThreadQueue* queue) { 1204void KThread::BeginWait(KThreadQueue* queue) {
@@ -1231,9 +1232,6 @@ void KThread::EndWait(Result wait_result_) {
1231 } 1232 }
1232 1233
1233 wait_queue->EndWait(this, wait_result_); 1234 wait_queue->EndWait(this, wait_result_);
1234
1235 // Special case for dummy threads to wakeup if necessary.
1236 IfDummyThreadEndWait();
1237 } 1235 }
1238} 1236}
1239 1237
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index e2a27d603..30aa10c9a 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -643,8 +643,9 @@ public:
643 // therefore will not block on guest kernel synchronization primitives. These methods handle 643 // therefore will not block on guest kernel synchronization primitives. These methods handle
644 // blocking as needed. 644 // blocking as needed.
645 645
646 void IfDummyThreadTryWait(); 646 void RequestDummyThreadWait();
647 void IfDummyThreadEndWait(); 647 void DummyThreadBeginWait();
648 void DummyThreadEndWait();
648 649
649 [[nodiscard]] uintptr_t GetArgument() const { 650 [[nodiscard]] uintptr_t GetArgument() const {
650 return argument; 651 return argument;
@@ -777,8 +778,7 @@ private:
777 bool is_single_core{}; 778 bool is_single_core{};
778 ThreadType thread_type{}; 779 ThreadType thread_type{};
779 StepState step_state{}; 780 StepState step_state{};
780 std::mutex dummy_wait_lock; 781 std::atomic<bool> dummy_thread_runnable{true};
781 std::condition_variable dummy_wait_cv;
782 782
783 // For debugging 783 // For debugging
784 std::vector<KSynchronizationObject*> wait_objects_for_debugging; 784 std::vector<KSynchronizationObject*> wait_objects_for_debugging;