summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar bunnei2021-01-20 13:42:27 -0800
committerGravatar bunnei2021-01-28 21:42:26 -0800
commitcdd14b03e5c8e29bc6cd11bbde0ef726d2f166ce (patch)
tree987f6cb5d3f1955dc88f5ac2c1d5c1329d787fc4
parentkernel: svc_types: Add ThreadActivity. (diff)
downloadyuzu-cdd14b03e5c8e29bc6cd11bbde0ef726d2f166ce.tar.gz
yuzu-cdd14b03e5c8e29bc6cd11bbde0ef726d2f166ce.tar.xz
yuzu-cdd14b03e5c8e29bc6cd11bbde0ef726d2f166ce.zip
hle: kernel: Recode implementation of KThread to be more accurate.
-rw-r--r--src/core/cpu_manager.cpp3
-rw-r--r--src/core/hle/kernel/hle_ipc.h3
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp144
-rw-r--r--src/core/hle/kernel/k_scheduler.h24
-rw-r--r--src/core/hle/kernel/k_thread.cpp993
-rw-r--r--src/core/hle/kernel/k_thread.h590
-rw-r--r--src/core/hle/kernel/kernel.cpp10
-rw-r--r--src/core/hle/kernel/process.cpp74
-rw-r--r--src/core/hle/kernel/process.h63
-rw-r--r--src/core/hle/kernel/server_session.cpp2
-rw-r--r--src/core/hle/kernel/svc.cpp355
-rw-r--r--src/core/hle/kernel/svc_types.h6
-rw-r--r--src/core/hle/kernel/svc_wrap.h56
-rw-r--r--src/yuzu/debugger/wait_tree.cpp24
14 files changed, 1562 insertions, 785 deletions
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp
index 719258250..9bbb82b97 100644
--- a/src/core/cpu_manager.cpp
+++ b/src/core/cpu_manager.cpp
@@ -279,8 +279,7 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
279 { 279 {
280 auto& scheduler = system.Kernel().Scheduler(current_core); 280 auto& scheduler = system.Kernel().Scheduler(current_core);
281 scheduler.Reload(scheduler.GetCurrentThread()); 281 scheduler.Reload(scheduler.GetCurrentThread());
282 auto* currrent_thread2 = scheduler.GetCurrentThread(); 282 if (!scheduler.IsIdle()) {
283 if (!currrent_thread2->IsKernelThread()) {
284 idle_count = 0; 283 idle_count = 0;
285 } 284 }
286 } 285 }
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index b8a746882..9f764c79a 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -126,9 +126,6 @@ public:
126 return server_session; 126 return server_session;
127 } 127 }
128 128
129 using WakeupCallback = std::function<void(
130 std::shared_ptr<KThread> thread, HLERequestContext& context, ThreadWakeupReason reason)>;
131
132 /// Populates this context with data from the requesting process/thread. 129 /// Populates this context with data from the requesting process/thread.
133 ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table, 130 ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table,
134 u32_le* src_cmdbuf); 131 u32_le* src_cmdbuf);
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 233022023..5bdbd9a9b 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -31,11 +31,15 @@ static void IncrementScheduledCount(Kernel::KThread* thread) {
31 } 31 }
32} 32}
33 33
34void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, 34void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule) {
35 Core::EmuThreadHandle global_thread) { 35 auto scheduler = kernel.CurrentScheduler();
36 const u32 current_core = global_thread.host_handle; 36
37 bool must_context_switch = global_thread.guest_handle != InvalidHandle && 37 u32 current_core{0xF};
38 (current_core < Core::Hardware::NUM_CPU_CORES); 38 bool must_context_switch{};
39 if (scheduler) {
40 current_core = scheduler->core_id;
41 must_context_switch = true;
42 }
39 43
40 while (cores_pending_reschedule != 0) { 44 while (cores_pending_reschedule != 0) {
41 const auto core = static_cast<u32>(std::countr_zero(cores_pending_reschedule)); 45 const auto core = static_cast<u32>(std::countr_zero(cores_pending_reschedule));
@@ -58,26 +62,25 @@ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedul
58 62
59u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { 63u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
60 std::scoped_lock lock{guard}; 64 std::scoped_lock lock{guard};
61 if (KThread* prev_highest_thread = this->state.highest_priority_thread; 65 if (KThread* prev_highest_thread = state.highest_priority_thread;
62 prev_highest_thread != highest_thread) { 66 prev_highest_thread != highest_thread) {
63 if (prev_highest_thread != nullptr) { 67 if (prev_highest_thread != nullptr) {
64 IncrementScheduledCount(prev_highest_thread); 68 IncrementScheduledCount(prev_highest_thread);
65 prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks()); 69 prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks());
66 } 70 }
67 if (this->state.should_count_idle) { 71 if (state.should_count_idle) {
68 if (highest_thread != nullptr) { 72 if (highest_thread != nullptr) {
69 // if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) { 73 if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) {
70 // process->SetRunningThread(this->core_id, highest_thread, 74 process->SetRunningThread(core_id, highest_thread, state.idle_count);
71 // this->state.idle_count); 75 }
72 //}
73 } else { 76 } else {
74 this->state.idle_count++; 77 state.idle_count++;
75 } 78 }
76 } 79 }
77 80
78 this->state.highest_priority_thread = highest_thread; 81 state.highest_priority_thread = highest_thread;
79 this->state.needs_scheduling = true; 82 state.needs_scheduling = true;
80 return (1ULL << this->core_id); 83 return (1ULL << core_id);
81 } else { 84 } else {
82 return 0; 85 return 0;
83 } 86 }
@@ -99,7 +102,20 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
99 KThread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id)); 102 KThread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
100 if (top_thread != nullptr) { 103 if (top_thread != nullptr) {
101 // If the thread has no waiters, we need to check if the process has a thread pinned. 104 // If the thread has no waiters, we need to check if the process has a thread pinned.
102 // TODO(bunnei): Implement thread pinning 105 if (top_thread->GetNumKernelWaiters() == 0) {
106 if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) {
107 if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id));
108 pinned != nullptr && pinned != top_thread) {
109 // We prefer our parent's pinned thread if possible. However, we also don't
110 // want to schedule un-runnable threads.
111 if (pinned->GetRawState() == ThreadState::Runnable) {
112 top_thread = pinned;
113 } else {
114 top_thread = nullptr;
115 }
116 }
117 }
118 }
103 } else { 119 } else {
104 idle_cores |= (1ULL << core_id); 120 idle_cores |= (1ULL << core_id);
105 } 121 }
@@ -182,6 +198,19 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
182 return cores_needing_scheduling; 198 return cores_needing_scheduling;
183} 199}
184 200
201void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) {
202 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
203 for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; ++i) {
204 // Get an atomic reference to the core scheduler's previous thread.
205 std::atomic_ref<KThread*> prev_thread(kernel.Scheduler(static_cast<s32>(i)).prev_thread);
206 static_assert(std::atomic_ref<KThread*>::is_always_lock_free);
207
208 // Atomically clear the previous thread if it's our target.
209 KThread* compare = thread;
210 prev_thread.compare_exchange_strong(compare, nullptr);
211 }
212}
213
185void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state) { 214void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state) {
186 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 215 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
187 216
@@ -352,12 +381,14 @@ void KScheduler::DisableScheduling(KernelCore& kernel) {
352 } 381 }
353} 382}
354 383
355void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, 384void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
356 Core::EmuThreadHandle global_thread) {
357 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { 385 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
358 scheduler->GetCurrentThread()->EnableDispatch(); 386 ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1);
387 if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) {
388 scheduler->GetCurrentThread()->EnableDispatch();
389 }
359 } 390 }
360 RescheduleCores(kernel, cores_needing_scheduling, global_thread); 391 RescheduleCores(kernel, cores_needing_scheduling);
361} 392}
362 393
363u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { 394u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
@@ -372,15 +403,13 @@ KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) {
372 return kernel.GlobalSchedulerContext().priority_queue; 403 return kernel.GlobalSchedulerContext().priority_queue;
373} 404}
374 405
375void KScheduler::YieldWithoutCoreMigration() { 406void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) {
376 auto& kernel = system.Kernel();
377
378 // Validate preconditions. 407 // Validate preconditions.
379 ASSERT(CanSchedule(kernel)); 408 ASSERT(CanSchedule(kernel));
380 ASSERT(kernel.CurrentProcess() != nullptr); 409 ASSERT(kernel.CurrentProcess() != nullptr);
381 410
382 // Get the current thread and process. 411 // Get the current thread and process.
383 KThread& cur_thread = *GetCurrentThread(); 412 KThread& cur_thread = Kernel::GetCurrentThread(kernel);
384 Process& cur_process = *kernel.CurrentProcess(); 413 Process& cur_process = *kernel.CurrentProcess();
385 414
386 // If the thread's yield count matches, there's nothing for us to do. 415 // If the thread's yield count matches, there's nothing for us to do.
@@ -413,15 +442,13 @@ void KScheduler::YieldWithoutCoreMigration() {
413 } 442 }
414} 443}
415 444
416void KScheduler::YieldWithCoreMigration() { 445void KScheduler::YieldWithCoreMigration(KernelCore& kernel) {
417 auto& kernel = system.Kernel();
418
419 // Validate preconditions. 446 // Validate preconditions.
420 ASSERT(CanSchedule(kernel)); 447 ASSERT(CanSchedule(kernel));
421 ASSERT(kernel.CurrentProcess() != nullptr); 448 ASSERT(kernel.CurrentProcess() != nullptr);
422 449
423 // Get the current thread and process. 450 // Get the current thread and process.
424 KThread& cur_thread = *GetCurrentThread(); 451 KThread& cur_thread = Kernel::GetCurrentThread(kernel);
425 Process& cur_process = *kernel.CurrentProcess(); 452 Process& cur_process = *kernel.CurrentProcess();
426 453
427 // If the thread's yield count matches, there's nothing for us to do. 454 // If the thread's yield count matches, there's nothing for us to do.
@@ -503,15 +530,13 @@ void KScheduler::YieldWithCoreMigration() {
503 } 530 }
504} 531}
505 532
506void KScheduler::YieldToAnyThread() { 533void KScheduler::YieldToAnyThread(KernelCore& kernel) {
507 auto& kernel = system.Kernel();
508
509 // Validate preconditions. 534 // Validate preconditions.
510 ASSERT(CanSchedule(kernel)); 535 ASSERT(CanSchedule(kernel));
511 ASSERT(kernel.CurrentProcess() != nullptr); 536 ASSERT(kernel.CurrentProcess() != nullptr);
512 537
513 // Get the current thread and process. 538 // Get the current thread and process.
514 KThread& cur_thread = *GetCurrentThread(); 539 KThread& cur_thread = Kernel::GetCurrentThread(kernel);
515 Process& cur_process = *kernel.CurrentProcess(); 540 Process& cur_process = *kernel.CurrentProcess();
516 541
517 // If the thread's yield count matches, there's nothing for us to do. 542 // If the thread's yield count matches, there's nothing for us to do.
@@ -581,15 +606,14 @@ void KScheduler::YieldToAnyThread() {
581 } 606 }
582} 607}
583 608
584KScheduler::KScheduler(Core::System& system, std::size_t core_id) 609KScheduler::KScheduler(Core::System& system, s32 core_id) : system(system), core_id(core_id) {
585 : system(system), core_id(core_id) {
586 switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this); 610 switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this);
587 this->state.needs_scheduling = true; 611 state.needs_scheduling = true;
588 this->state.interrupt_task_thread_runnable = false; 612 state.interrupt_task_thread_runnable = false;
589 this->state.should_count_idle = false; 613 state.should_count_idle = false;
590 this->state.idle_count = 0; 614 state.idle_count = 0;
591 this->state.idle_thread_stack = nullptr; 615 state.idle_thread_stack = nullptr;
592 this->state.highest_priority_thread = nullptr; 616 state.highest_priority_thread = nullptr;
593} 617}
594 618
595KScheduler::~KScheduler() = default; 619KScheduler::~KScheduler() = default;
@@ -613,7 +637,7 @@ void KScheduler::RescheduleCurrentCore() {
613 phys_core.ClearInterrupt(); 637 phys_core.ClearInterrupt();
614 } 638 }
615 guard.lock(); 639 guard.lock();
616 if (this->state.needs_scheduling) { 640 if (state.needs_scheduling) {
617 Schedule(); 641 Schedule();
618 } else { 642 } else {
619 guard.unlock(); 643 guard.unlock();
@@ -625,32 +649,34 @@ void KScheduler::OnThreadStart() {
625} 649}
626 650
627void KScheduler::Unload(KThread* thread) { 651void KScheduler::Unload(KThread* thread) {
652 LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
653
628 if (thread) { 654 if (thread) {
629 thread->SetIsRunning(false); 655 if (thread->IsCallingSvc()) {
630 if (thread->IsContinuousOnSVC()) {
631 system.ArmInterface(core_id).ExceptionalExit(); 656 system.ArmInterface(core_id).ExceptionalExit();
632 thread->SetContinuousOnSVC(false); 657 thread->ClearIsCallingSvc();
633 } 658 }
634 if (!thread->HasExited()) { 659 if (!thread->IsTerminationRequested()) {
660 prev_thread = thread;
661
635 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); 662 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
636 cpu_core.SaveContext(thread->GetContext32()); 663 cpu_core.SaveContext(thread->GetContext32());
637 cpu_core.SaveContext(thread->GetContext64()); 664 cpu_core.SaveContext(thread->GetContext64());
638 // Save the TPIDR_EL0 system register in case it was modified. 665 // Save the TPIDR_EL0 system register in case it was modified.
639 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); 666 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
640 cpu_core.ClearExclusiveState(); 667 cpu_core.ClearExclusiveState();
668 } else {
669 prev_thread = nullptr;
641 } 670 }
642 thread->context_guard.unlock();
643 } 671 }
644} 672}
645 673
646void KScheduler::Reload(KThread* thread) { 674void KScheduler::Reload(KThread* thread) {
675 LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread ? thread->GetName() : "nullptr");
676
647 if (thread) { 677 if (thread) {
648 ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); 678 ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
649 679
650 // Cancel any outstanding wakeup events for this thread
651 thread->SetIsRunning(true);
652 thread->SetWasRunning(false);
653
654 auto* const thread_owner_process = thread->GetOwnerProcess(); 680 auto* const thread_owner_process = thread->GetOwnerProcess();
655 if (thread_owner_process != nullptr) { 681 if (thread_owner_process != nullptr) {
656 system.Kernel().MakeCurrentProcess(thread_owner_process); 682 system.Kernel().MakeCurrentProcess(thread_owner_process);
@@ -676,7 +702,7 @@ void KScheduler::ScheduleImpl() {
676 KThread* previous_thread = current_thread; 702 KThread* previous_thread = current_thread;
677 current_thread = state.highest_priority_thread; 703 current_thread = state.highest_priority_thread;
678 704
679 this->state.needs_scheduling = false; 705 state.needs_scheduling = false;
680 706
681 if (current_thread == previous_thread) { 707 if (current_thread == previous_thread) {
682 guard.unlock(); 708 guard.unlock();
@@ -714,7 +740,7 @@ void KScheduler::SwitchToCurrent() {
714 { 740 {
715 std::scoped_lock lock{guard}; 741 std::scoped_lock lock{guard};
716 current_thread = state.highest_priority_thread; 742 current_thread = state.highest_priority_thread;
717 this->state.needs_scheduling = false; 743 state.needs_scheduling = false;
718 } 744 }
719 const auto is_switch_pending = [this] { 745 const auto is_switch_pending = [this] {
720 std::scoped_lock lock{guard}; 746 std::scoped_lock lock{guard};
@@ -722,13 +748,10 @@ void KScheduler::SwitchToCurrent() {
722 }; 748 };
723 do { 749 do {
724 if (current_thread != nullptr) { 750 if (current_thread != nullptr) {
725 current_thread->context_guard.lock();
726 if (current_thread->GetRawState() != ThreadState::Runnable) { 751 if (current_thread->GetRawState() != ThreadState::Runnable) {
727 current_thread->context_guard.unlock();
728 break; 752 break;
729 } 753 }
730 if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) { 754 if (static_cast<u32>(current_thread->GetActiveCore()) != core_id) {
731 current_thread->context_guard.unlock();
732 break; 755 break;
733 } 756 }
734 } 757 }
@@ -749,7 +772,7 @@ void KScheduler::UpdateLastContextSwitchTime(KThread* thread, Process* process)
749 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; 772 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
750 773
751 if (thread != nullptr) { 774 if (thread != nullptr) {
752 thread->UpdateCPUTimeTicks(update_ticks); 775 thread->AddCpuTime(core_id, update_ticks);
753 } 776 }
754 777
755 if (process != nullptr) { 778 if (process != nullptr) {
@@ -763,15 +786,10 @@ void KScheduler::Initialize() {
763 std::string name = "Idle Thread Id:" + std::to_string(core_id); 786 std::string name = "Idle Thread Id:" + std::to_string(core_id);
764 std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc(); 787 std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
765 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); 788 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
766 auto thread_res = KThread::Create(system, ThreadType::Kernel, name, 0, 789 auto thread_res = KThread::Create(system, ThreadType::Main, name, 0,
767 KThread::IdleThreadPriority, 0, static_cast<u32>(core_id), 0, 790 KThread::IdleThreadPriority, 0, static_cast<u32>(core_id), 0,
768 nullptr, std::move(init_func), init_func_parameter); 791 nullptr, std::move(init_func), init_func_parameter);
769 idle_thread = thread_res.Unwrap().get(); 792 idle_thread = thread_res.Unwrap().get();
770
771 {
772 KScopedSchedulerLock lock{system.Kernel()};
773 idle_thread->SetState(ThreadState::Runnable);
774 }
775} 793}
776 794
777KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel) 795KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 157373934..2308a55be 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -33,15 +33,14 @@ class KThread;
33 33
34class KScheduler final { 34class KScheduler final {
35public: 35public:
36 explicit KScheduler(Core::System& system, std::size_t core_id); 36 explicit KScheduler(Core::System& system, s32 core_id);
37 ~KScheduler(); 37 ~KScheduler();
38 38
39 /// Reschedules to the next available thread (call after current thread is suspended) 39 /// Reschedules to the next available thread (call after current thread is suspended)
40 void RescheduleCurrentCore(); 40 void RescheduleCurrentCore();
41 41
42 /// Reschedules cores pending reschedule, to be called on EnableScheduling. 42 /// Reschedules cores pending reschedule, to be called on EnableScheduling.
43 static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, 43 static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule);
44 Core::EmuThreadHandle global_thread);
45 44
46 /// The next two are for SingleCore Only. 45 /// The next two are for SingleCore Only.
47 /// Unload current thread before preempting core. 46 /// Unload current thread before preempting core.
@@ -53,6 +52,11 @@ public:
53 /// Gets the current running thread 52 /// Gets the current running thread
54 [[nodiscard]] KThread* GetCurrentThread() const; 53 [[nodiscard]] KThread* GetCurrentThread() const;
55 54
55 /// Returns true if the scheduler is idle
56 [[nodiscard]] bool IsIdle() const {
57 return GetCurrentThread() == idle_thread;
58 }
59
56 /// Gets the timestamp for the last context switch in ticks. 60 /// Gets the timestamp for the last context switch in ticks.
57 [[nodiscard]] u64 GetLastContextSwitchTicks() const; 61 [[nodiscard]] u64 GetLastContextSwitchTicks() const;
58 62
@@ -79,7 +83,7 @@ public:
79 * 83 *
80 * @note This operation can be redundant and no scheduling is changed if marked as so. 84 * @note This operation can be redundant and no scheduling is changed if marked as so.
81 */ 85 */
82 void YieldWithoutCoreMigration(); 86 static void YieldWithoutCoreMigration(KernelCore& kernel);
83 87
84 /** 88 /**
85 * Takes a thread and moves it to the back of the it's priority list. 89 * Takes a thread and moves it to the back of the it's priority list.
@@ -88,7 +92,7 @@ public:
88 * 92 *
89 * @note This operation can be redundant and no scheduling is changed if marked as so. 93 * @note This operation can be redundant and no scheduling is changed if marked as so.
90 */ 94 */
91 void YieldWithCoreMigration(); 95 static void YieldWithCoreMigration(KernelCore& kernel);
92 96
93 /** 97 /**
94 * Takes a thread and moves it out of the scheduling queue. 98 * Takes a thread and moves it out of the scheduling queue.
@@ -97,7 +101,9 @@ public:
97 * 101 *
98 * @note This operation can be redundant and no scheduling is changed if marked as so. 102 * @note This operation can be redundant and no scheduling is changed if marked as so.
99 */ 103 */
100 void YieldToAnyThread(); 104 static void YieldToAnyThread(KernelCore& kernel);
105
106 static void ClearPreviousThread(KernelCore& kernel, KThread* thread);
101 107
102 /// Notify the scheduler a thread's status has changed. 108 /// Notify the scheduler a thread's status has changed.
103 static void OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state); 109 static void OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state);
@@ -114,8 +120,7 @@ public:
114 static void SetSchedulerUpdateNeeded(KernelCore& kernel); 120 static void SetSchedulerUpdateNeeded(KernelCore& kernel);
115 static void ClearSchedulerUpdateNeeded(KernelCore& kernel); 121 static void ClearSchedulerUpdateNeeded(KernelCore& kernel);
116 static void DisableScheduling(KernelCore& kernel); 122 static void DisableScheduling(KernelCore& kernel);
117 static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, 123 static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling);
118 Core::EmuThreadHandle global_thread);
119 [[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel); 124 [[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
120 125
121private: 126private:
@@ -168,6 +173,7 @@ private:
168 static void OnSwitch(void* this_scheduler); 173 static void OnSwitch(void* this_scheduler);
169 void SwitchToCurrent(); 174 void SwitchToCurrent();
170 175
176 KThread* prev_thread{};
171 KThread* current_thread{}; 177 KThread* current_thread{};
172 KThread* idle_thread{}; 178 KThread* idle_thread{};
173 179
@@ -186,7 +192,7 @@ private:
186 192
187 Core::System& system; 193 Core::System& system;
188 u64 last_context_switch_time{}; 194 u64 last_context_switch_time{};
189 const std::size_t core_id; 195 const s32 core_id;
190 196
191 Common::SpinLock guard{}; 197 Common::SpinLock guard{};
192}; 198};
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index e5be849bb..f021b0550 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -1,4 +1,4 @@
1// Copyright 2014 Citra Emulator Project / PPSSPP Project 1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
@@ -8,10 +8,12 @@
8#include <vector> 8#include <vector>
9 9
10#include "common/assert.h" 10#include "common/assert.h"
11#include "common/bit_util.h"
11#include "common/common_funcs.h" 12#include "common/common_funcs.h"
12#include "common/common_types.h" 13#include "common/common_types.h"
13#include "common/fiber.h" 14#include "common/fiber.h"
14#include "common/logging/log.h" 15#include "common/logging/log.h"
16#include "common/scope_exit.h"
15#include "common/thread_queue_list.h" 17#include "common/thread_queue_list.h"
16#include "core/core.h" 18#include "core/core.h"
17#include "core/cpu_manager.h" 19#include "core/cpu_manager.h"
@@ -22,10 +24,12 @@
22#include "core/hle/kernel/k_scheduler.h" 24#include "core/hle/kernel/k_scheduler.h"
23#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 25#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
24#include "core/hle/kernel/k_thread.h" 26#include "core/hle/kernel/k_thread.h"
27#include "core/hle/kernel/k_thread_queue.h"
25#include "core/hle/kernel/kernel.h" 28#include "core/hle/kernel/kernel.h"
26#include "core/hle/kernel/memory/memory_layout.h" 29#include "core/hle/kernel/memory/memory_layout.h"
27#include "core/hle/kernel/object.h" 30#include "core/hle/kernel/object.h"
28#include "core/hle/kernel/process.h" 31#include "core/hle/kernel/process.h"
32#include "core/hle/kernel/resource_limit.h"
29#include "core/hle/kernel/svc_results.h" 33#include "core/hle/kernel/svc_results.h"
30#include "core/hle/kernel/time_manager.h" 34#include "core/hle/kernel/time_manager.h"
31#include "core/hle/result.h" 35#include "core/hle/result.h"
@@ -36,185 +40,734 @@
36#include "core/arm/dynarmic/arm_dynarmic_64.h" 40#include "core/arm/dynarmic/arm_dynarmic_64.h"
37#endif 41#endif
38 42
39namespace Kernel { 43namespace {
44static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
45 u32 entry_point, u32 arg) {
46 context = {};
47 context.cpu_registers[0] = arg;
48 context.cpu_registers[15] = entry_point;
49 context.cpu_registers[13] = stack_top;
50}
40 51
41bool KThread::IsSignaled() const { 52static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top,
42 return signaled; 53 VAddr entry_point, u64 arg) {
54 context = {};
55 context.cpu_registers[0] = arg;
56 context.pc = entry_point;
57 context.sp = stack_top;
58 // TODO(merry): Perform a hardware test to determine the below value.
59 context.fpcr = 0;
43} 60}
61} // namespace
62
63namespace Kernel {
44 64
45KThread::KThread(KernelCore& kernel) : KSynchronizationObject{kernel} {} 65KThread::KThread(KernelCore& kernel)
66 : KSynchronizationObject{kernel}, activity_pause_lock{kernel} {}
46KThread::~KThread() = default; 67KThread::~KThread() = default;
47 68
48void KThread::Stop() { 69ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
49 { 70 s32 virt_core, Process* owner, ThreadType type) {
50 KScopedSchedulerLock lock(kernel); 71 // Assert parameters are valid.
51 SetState(ThreadState::Terminated); 72 ASSERT((type == ThreadType::Main) ||
52 signaled = true; 73 (Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
53 NotifyAvailable(); 74 ASSERT((owner != nullptr) || (type != ThreadType::User));
54 kernel.GlobalHandleTable().Close(global_handle); 75 ASSERT(0 <= virt_core && virt_core < static_cast<s32>(Common::BitSize<u64>()));
76
77 // Convert the virtual core to a physical core.
78 const s32 phys_core = Core::Hardware::VirtualToPhysicalCoreMap[virt_core];
79 ASSERT(0 <= phys_core && phys_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
80
81 // First, clear the TLS address.
82 tls_address = {};
83
84 // Next, assert things based on the type.
85 switch (type) {
86 case ThreadType::Main:
87 ASSERT(arg == 0);
88 [[fallthrough]];
89 case ThreadType::HighPriority:
90 [[fallthrough]];
91 case ThreadType::User:
92 ASSERT(((owner == nullptr) ||
93 (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
94 ASSERT(((owner == nullptr) ||
95 (owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask()));
96 break;
97 case ThreadType::Kernel:
98 UNIMPLEMENTED();
99 break;
100 default:
101 UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
102 break;
103 }
104
105 // Set the ideal core ID and affinity mask.
106 virtual_ideal_core_id = virt_core;
107 physical_ideal_core_id = phys_core;
108 virtual_affinity_mask = (static_cast<u64>(1) << virt_core);
109 physical_affinity_mask.SetAffinity(phys_core, true);
110
111 // Set the thread state.
112 thread_state = (type == ThreadType::Main) ? ThreadState::Runnable : ThreadState::Initialized;
113
114 // Set TLS address.
115 tls_address = 0;
116
117 // Set parent and condvar tree.
118 parent = nullptr;
119 condvar_tree = nullptr;
120
121 // Set sync booleans.
122 signaled = false;
123 termination_requested = false;
124 wait_cancelled = false;
125 cancellable = false;
126
127 // Set core ID and wait result.
128 core_id = phys_core;
129 wait_result = Svc::ResultNoSynchronizationObject;
130
131 // Set priorities.
132 priority = prio;
133 base_priority = prio;
134
135 // Set sync object and waiting lock to null.
136 synced_object = nullptr;
137
138 // Initialize sleeping queue.
139 sleeping_queue = nullptr;
140
141 // Set suspend flags.
142 suspend_request_flags = 0;
143 suspend_allowed_flags = static_cast<u32>(ThreadState::SuspendFlagMask);
144
145 // We're neither debug attached, nor are we nesting our priority inheritance.
146 debug_attached = false;
147 priority_inheritance_count = 0;
148
149 // We haven't been scheduled, and we have done no light IPC.
150 schedule_count = -1;
151 last_scheduled_tick = 0;
152 light_ipc_data = nullptr;
153
154 // We're not waiting for a lock, and we haven't disabled migration.
155 lock_owner = nullptr;
156 num_core_migration_disables = 0;
157
158 // We have no waiters, but we do have an entrypoint.
159 num_kernel_waiters = 0;
160
161 // Set our current core id.
162 current_core_id = phys_core;
163
164 // We haven't released our resource limit hint, and we've spent no time on the cpu.
165 resource_limit_release_hint = false;
166 cpu_time = 0;
167
168 // Clear our stack parameters.
169 std::memset(static_cast<void*>(std::addressof(GetStackParameters())), 0,
170 sizeof(StackParameters));
171
172 // Setup the TLS, if needed.
173 if (type == ThreadType::User) {
174 tls_address = owner->CreateTLSRegion();
175 }
176
177 // Set parent, if relevant.
178 if (owner != nullptr) {
179 parent = owner;
180 parent->IncrementThreadCount();
181 }
55 182
56 if (owner_process) { 183 // Initialize thread context.
57 owner_process->UnregisterThread(this); 184 ResetThreadContext64(thread_context_64, user_stack_top, func, arg);
185 ResetThreadContext32(thread_context_32, static_cast<u32>(user_stack_top),
186 static_cast<u32>(func), static_cast<u32>(arg));
58 187
59 // Mark the TLS slot in the thread's page as free. 188 // Setup the stack parameters.
60 owner_process->FreeTLSRegion(tls_address); 189 StackParameters& sp = GetStackParameters();
190 sp.cur_thread = this;
191 sp.disable_count = 1;
192 SetInExceptionHandler();
193
194 // Set thread ID.
195 thread_id = kernel.CreateNewThreadID();
196
197 // We initialized!
198 initialized = true;
199
200 // Register ourselves with our parent process.
201 if (parent != nullptr) {
202 parent->RegisterThread(this);
203 if (parent->IsSuspended()) {
204 RequestSuspend(SuspendType::Process);
61 } 205 }
62 has_exited = true;
63 } 206 }
64 global_handle = 0;
65}
66 207
67void KThread::Wakeup() { 208 return RESULT_SUCCESS;
68 KScopedSchedulerLock lock(kernel);
69 SetState(ThreadState::Runnable);
70} 209}
71 210
72ResultCode KThread::Start() { 211ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
73 KScopedSchedulerLock lock(kernel); 212 VAddr user_stack_top, s32 prio, s32 core, Process* owner,
74 SetState(ThreadState::Runnable); 213 ThreadType type) {
214 // Initialize the thread.
215 R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
216
75 return RESULT_SUCCESS; 217 return RESULT_SUCCESS;
76} 218}
77 219
78void KThread::CancelWait() { 220void KThread::Finalize() {
79 KScopedSchedulerLock lock(kernel); 221 // If the thread has an owner process, unregister it.
80 if (GetState() != ThreadState::Waiting || !is_cancellable) { 222 if (parent != nullptr) {
81 is_sync_cancelled = true; 223 parent->UnregisterThread(this);
82 return; 224 }
225
226 // If the thread has a local region, delete it.
227 if (tls_address != 0) {
228 parent->FreeTLSRegion(tls_address);
229 }
230
231 // Release any waiters.
232 {
233 ASSERT(lock_owner == nullptr);
234 KScopedSchedulerLock sl{kernel};
235
236 auto it = waiter_list.begin();
237 while (it != waiter_list.end()) {
238 // The thread shouldn't be a kernel waiter.
239 it->SetLockOwner(nullptr);
240 it->SetSyncedObject(nullptr, Svc::ResultInvalidState);
241 it->Wakeup();
242 it = waiter_list.erase(it);
243 }
244 }
245
246 // Decrement the parent process's thread count.
247 if (parent != nullptr) {
248 parent->DecrementThreadCount();
83 } 249 }
84 // TODO(Blinkhawk): Implement cancel of server session
85 is_sync_cancelled = false;
86 SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED);
87 SetState(ThreadState::Runnable);
88} 250}
89 251
90static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, 252bool KThread::IsSignaled() const {
91 u32 entry_point, u32 arg) { 253 return signaled;
92 context = {};
93 context.cpu_registers[0] = arg;
94 context.cpu_registers[15] = entry_point;
95 context.cpu_registers[13] = stack_top;
96} 254}
97 255
98static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top, 256void KThread::Wakeup() {
99 VAddr entry_point, u64 arg) { 257 KScopedSchedulerLock sl{kernel};
100 context = {}; 258
101 context.cpu_registers[0] = arg; 259 if (GetState() == ThreadState::Waiting) {
102 context.pc = entry_point; 260 if (sleeping_queue != nullptr) {
103 context.sp = stack_top; 261 sleeping_queue->WakeupThread(this);
104 // TODO(merry): Perform a hardware test to determine the below value. 262 } else {
105 context.fpcr = 0; 263 SetState(ThreadState::Runnable);
264 }
265 }
106} 266}
107 267
108std::shared_ptr<Common::Fiber>& KThread::GetHostContext() { 268void KThread::StartTermination() {
109 return host_context; 269 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
270
271 // Release user exception and unpin, if relevant.
272 if (parent != nullptr) {
273 parent->ReleaseUserException(this);
274 if (parent->GetPinnedThread(GetCurrentCoreId(kernel)) == this) {
275 parent->UnpinCurrentThread();
276 }
277 }
278
279 // Set state to terminated.
280 SetState(ThreadState::Terminated);
281
282 // Clear the thread's status as running in parent.
283 if (parent != nullptr) {
284 parent->ClearRunningThread(this);
285 }
286
287 // Signal.
288 signaled = true;
289 NotifyAvailable();
290
291 // Clear previous thread in KScheduler.
292 KScheduler::ClearPreviousThread(kernel, this);
293
294 // Register terminated dpc flag.
295 RegisterDpc(DpcFlag::Terminated);
110} 296}
111 297
112ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, ThreadType type_flags, 298void KThread::Pin() {
113 std::string name, VAddr entry_point, 299 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
114 u32 priority, u64 arg, s32 processor_id, 300
115 VAddr stack_top, Process* owner_process) { 301 // Set ourselves as pinned.
116 std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc(); 302 GetStackParameters().is_pinned = true;
117 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); 303
118 return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top, 304 // Disable core migration.
119 owner_process, std::move(init_func), init_func_parameter); 305 ASSERT(num_core_migration_disables == 0);
306 {
307 ++num_core_migration_disables;
308
309 // Save our ideal state to restore when we're unpinned.
310 original_physical_ideal_core_id = physical_ideal_core_id;
311 original_physical_affinity_mask = physical_affinity_mask;
312
313 // Bind ourselves to this core.
314 const s32 active_core = GetActiveCore();
315 const s32 current_core = GetCurrentCoreId(kernel);
316
317 SetActiveCore(current_core);
318 physical_ideal_core_id = current_core;
319 physical_affinity_mask.SetAffinityMask(1ULL << current_core);
320
321 if (active_core != current_core || physical_affinity_mask.GetAffinityMask() !=
322 original_physical_affinity_mask.GetAffinityMask()) {
323 KScheduler::OnThreadAffinityMaskChanged(kernel, this, original_physical_affinity_mask,
324 active_core);
325 }
326 }
327
328 // Disallow performing thread suspension.
329 {
330 // Update our allow flags.
331 suspend_allowed_flags &= ~(1 << (static_cast<u32>(SuspendType::Thread) +
332 static_cast<u32>(ThreadState::SuspendShift)));
333
334 // Update our state.
335 const ThreadState old_state = thread_state;
336 thread_state = static_cast<ThreadState>(GetSuspendFlags() |
337 static_cast<u32>(old_state & ThreadState::Mask));
338 if (thread_state != old_state) {
339 KScheduler::OnThreadStateChanged(kernel, this, old_state);
340 }
341 }
342
343 // TODO(bunnei): Update our SVC access permissions.
344 ASSERT(parent != nullptr);
120} 345}
121 346
122ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, ThreadType type_flags, 347void KThread::Unpin() {
123 std::string name, VAddr entry_point, 348 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
124 u32 priority, u64 arg, s32 processor_id, 349
125 VAddr stack_top, Process* owner_process, 350 // Set ourselves as unpinned.
126 std::function<void(void*)>&& thread_start_func, 351 GetStackParameters().is_pinned = false;
127 void* thread_start_parameter) { 352
128 auto& kernel = system.Kernel(); 353 // Enable core migration.
354 ASSERT(num_core_migration_disables == 1);
355 {
356 --num_core_migration_disables;
357
358 // Restore our original state.
359 const KAffinityMask old_mask = physical_affinity_mask;
129 360
130 if (owner_process) { 361 physical_ideal_core_id = original_physical_ideal_core_id;
131 if (!system.Memory().IsValidVirtualAddress(*owner_process, entry_point)) { 362 physical_affinity_mask = original_physical_affinity_mask;
132 LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point); 363
133 // TODO (bunnei): Find the correct error code to use here 364 if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
134 return RESULT_UNKNOWN; 365 const s32 active_core = GetActiveCore();
366
367 if (!physical_affinity_mask.GetAffinity(active_core)) {
368 if (physical_ideal_core_id >= 0) {
369 SetActiveCore(physical_ideal_core_id);
370 } else {
371 SetActiveCore(static_cast<s32>(
372 Common::BitSize<u64>() - 1 -
373 std::countl_zero(physical_affinity_mask.GetAffinityMask())));
374 }
375 }
376 KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core);
135 } 377 }
136 } 378 }
137 379
138 std::shared_ptr<KThread> thread = std::make_shared<KThread>(kernel); 380 // Allow performing thread suspension (if termination hasn't been requested).
381 {
382 // Update our allow flags.
383 if (!IsTerminationRequested()) {
384 suspend_allowed_flags |= (1 << (static_cast<u32>(SuspendType::Thread) +
385 static_cast<u32>(ThreadState::SuspendShift)));
386 }
139 387
140 thread->thread_id = kernel.CreateNewThreadID(); 388 // Update our state.
141 thread->thread_state = ThreadState::Initialized; 389 const ThreadState old_state = thread_state;
142 thread->entry_point = entry_point; 390 thread_state = static_cast<ThreadState>(GetSuspendFlags() |
143 thread->stack_top = stack_top; 391 static_cast<u32>(old_state & ThreadState::Mask));
144 thread->disable_count = 1; 392 if (thread_state != old_state) {
145 thread->tpidr_el0 = 0; 393 KScheduler::OnThreadStateChanged(kernel, this, old_state);
146 thread->current_priority = priority; 394 }
147 thread->base_priority = priority; 395 }
148 thread->lock_owner = nullptr;
149 thread->schedule_count = -1;
150 thread->last_scheduled_tick = 0;
151 thread->processor_id = processor_id;
152 thread->ideal_core = processor_id;
153 thread->affinity_mask.SetAffinity(processor_id, true);
154 thread->name = std::move(name);
155 thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap();
156 thread->owner_process = owner_process;
157 thread->type = type_flags;
158 thread->signaled = false;
159 396
160 auto& scheduler = kernel.GlobalSchedulerContext(); 397 // TODO(bunnei): Update our SVC access permissions.
161 scheduler.AddThread(thread); 398 ASSERT(parent != nullptr);
399
400 // Resume any threads that began waiting on us while we were pinned.
401 for (auto it = pinned_waiter_list.begin(); it != pinned_waiter_list.end(); ++it) {
402 if (it->GetState() == ThreadState::Waiting) {
403 it->SetState(ThreadState::Runnable);
404 }
405 }
406}
407
408ResultCode KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
409 KScopedSchedulerLock sl{kernel};
410
411 // Get the virtual mask.
412 *out_ideal_core = virtual_ideal_core_id;
413 *out_affinity_mask = virtual_affinity_mask;
414
415 return RESULT_SUCCESS;
416}
162 417
163 if (owner_process) { 418ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
164 thread->tls_address = thread->owner_process->CreateTLSRegion(); 419 KScopedSchedulerLock sl{kernel};
165 thread->owner_process->RegisterThread(thread.get()); 420 ASSERT(num_core_migration_disables >= 0);
421
422 // Select between core mask and original core mask.
423 if (num_core_migration_disables == 0) {
424 *out_ideal_core = physical_ideal_core_id;
425 *out_affinity_mask = physical_affinity_mask.GetAffinityMask();
166 } else { 426 } else {
167 thread->tls_address = 0; 427 *out_ideal_core = original_physical_ideal_core_id;
428 *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask();
168 } 429 }
169 430
170 ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top), 431 return RESULT_SUCCESS;
171 static_cast<u32>(entry_point), static_cast<u32>(arg)); 432}
172 ResetThreadContext64(thread->context_64, stack_top, entry_point, arg);
173 433
174 thread->host_context = 434ResultCode KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) {
175 std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter); 435 ASSERT(parent != nullptr);
436 ASSERT(v_affinity_mask != 0);
437 KScopedLightLock lk{activity_pause_lock};
176 438
177 return MakeResult<std::shared_ptr<KThread>>(std::move(thread)); 439 // Set the core mask.
440 u64 p_affinity_mask = 0;
441 {
442 KScopedSchedulerLock sl{kernel};
443 ASSERT(num_core_migration_disables >= 0);
444
445 // If the core id is no-update magic, preserve the ideal core id.
446 if (core_id == Svc::IdealCoreNoUpdate) {
447 core_id = virtual_ideal_core_id;
448 R_UNLESS(((1ULL << core_id) & v_affinity_mask) != 0, Svc::ResultInvalidCombination);
449 }
450
451 // Set the virtual core/affinity mask.
452 virtual_ideal_core_id = core_id;
453 virtual_affinity_mask = v_affinity_mask;
454
455 // Translate the virtual core to a physical core.
456 if (core_id >= 0) {
457 core_id = Core::Hardware::VirtualToPhysicalCoreMap[core_id];
458 }
459
460 // Translate the virtual affinity mask to a physical one.
461 while (v_affinity_mask != 0) {
462 const u64 next = std::countr_zero(v_affinity_mask);
463 v_affinity_mask &= ~(1ULL << next);
464 p_affinity_mask |= (1ULL << Core::Hardware::VirtualToPhysicalCoreMap[next]);
465 }
466
467 // If we haven't disabled migration, perform an affinity change.
468 if (num_core_migration_disables == 0) {
469 const KAffinityMask old_mask = physical_affinity_mask;
470
471 // Set our new ideals.
472 physical_ideal_core_id = core_id;
473 physical_affinity_mask.SetAffinityMask(p_affinity_mask);
474
475 if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
476 const s32 active_core = GetActiveCore();
477
478 if (active_core >= 0 && !physical_affinity_mask.GetAffinity(active_core)) {
479 const s32 new_core = static_cast<s32>(
480 physical_ideal_core_id >= 0
481 ? physical_ideal_core_id
482 : Common::BitSize<u64>() - 1 -
483 std::countl_zero(physical_affinity_mask.GetAffinityMask()));
484 SetActiveCore(new_core);
485 }
486 KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core);
487 }
488 } else {
489 // Otherwise, we edit the original affinity for restoration later.
490 original_physical_ideal_core_id = core_id;
491 original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
492 }
493 }
494
495 // Update the pinned waiter list.
496 {
497 bool retry_update = false;
498 bool thread_is_pinned = false;
499 do {
500 // Lock the scheduler.
501 KScopedSchedulerLock sl{kernel};
502
503 // Don't do any further management if our termination has been requested.
504 R_SUCCEED_IF(IsTerminationRequested());
505
506 // By default, we won't need to retry.
507 retry_update = false;
508
509 // Check if the thread is currently running.
510 bool thread_is_current = false;
511 s32 thread_core;
512 for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES);
513 ++thread_core) {
514 if (kernel.Scheduler(thread_core).GetCurrentThread() == this) {
515 thread_is_current = true;
516 break;
517 }
518 }
519
520 // If the thread is currently running, check whether it's no longer allowed under the
521 // new mask.
522 if (thread_is_current && ((1ULL << thread_core) & p_affinity_mask) == 0) {
523 // If the thread is pinned, we want to wait until it's not pinned.
524 if (GetStackParameters().is_pinned) {
525 // Verify that the current thread isn't terminating.
526 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
527 Svc::ResultTerminationRequested);
528
529 // Note that the thread was pinned.
530 thread_is_pinned = true;
531
532 // Wait until the thread isn't pinned any more.
533 pinned_waiter_list.push_back(GetCurrentThread(kernel));
534 GetCurrentThread(kernel).SetState(ThreadState::Waiting);
535 } else {
536 // If the thread isn't pinned, release the scheduler lock and retry until it's
537 // not current.
538 retry_update = true;
539 }
540 }
541 } while (retry_update);
542
543 // If the thread was pinned, it no longer is, and we should remove the current thread from
544 // our waiter list.
545 if (thread_is_pinned) {
546 // Lock the scheduler.
547 KScopedSchedulerLock sl{kernel};
548
549 // Remove from the list.
550 pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
551 }
552 }
553
554 return RESULT_SUCCESS;
178} 555}
179 556
180void KThread::SetBasePriority(u32 priority) { 557void KThread::SetBasePriority(s32 value) {
181 ASSERT(Svc::HighestThreadPriority <= priority && priority <= Svc::LowestThreadPriority); 558 ASSERT(Svc::HighestThreadPriority <= value && value <= Svc::LowestThreadPriority);
182 559
183 KScopedSchedulerLock lock(kernel); 560 KScopedSchedulerLock sl{kernel};
184 561
185 // Change our base priority. 562 // Change our base priority.
186 base_priority = priority; 563 base_priority = value;
187 564
188 // Perform a priority restoration. 565 // Perform a priority restoration.
189 RestorePriority(kernel, this); 566 RestorePriority(kernel, this);
190} 567}
191 568
192void KThread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) { 569void KThread::RequestSuspend(SuspendType type) {
193 signaling_object = object; 570 KScopedSchedulerLock sl{kernel};
194 signaling_result = result;
195}
196 571
197VAddr KThread::GetCommandBufferAddress() const { 572 // Note the request in our flags.
198 // Offset from the start of TLS at which the IPC command buffer begins. 573 suspend_request_flags |=
199 constexpr u64 command_header_offset = 0x80; 574 (1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)));
200 return GetTLSAddress() + command_header_offset; 575
576 // Try to perform the suspend.
577 TrySuspend();
201} 578}
202 579
203void KThread::SetState(ThreadState state) { 580void KThread::Resume(SuspendType type) {
204 KScopedSchedulerLock sl(kernel); 581 KScopedSchedulerLock sl{kernel};
205 582
206 // Clear debugging state 583 // Clear the request in our flags.
207 SetMutexWaitAddressForDebugging({}); 584 suspend_request_flags &=
208 SetWaitReasonForDebugging({}); 585 ~(1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)));
209 586
587 // Update our state.
210 const ThreadState old_state = thread_state; 588 const ThreadState old_state = thread_state;
211 thread_state = 589 thread_state = static_cast<ThreadState>(GetSuspendFlags() |
212 static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)); 590 static_cast<u32>(old_state & ThreadState::Mask));
213 if (thread_state != old_state) { 591 if (thread_state != old_state) {
214 KScheduler::OnThreadStateChanged(kernel, this, old_state); 592 KScheduler::OnThreadStateChanged(kernel, this, old_state);
215 } 593 }
216} 594}
217 595
596void KThread::WaitCancel() {
597 KScopedSchedulerLock sl{kernel};
598
599 // Check if we're waiting and cancellable.
600 if (GetState() == ThreadState::Waiting && cancellable) {
601 if (sleeping_queue != nullptr) {
602 sleeping_queue->WakeupThread(this);
603 wait_cancelled = true;
604 } else {
605 SetSyncedObject(nullptr, Svc::ResultCancelled);
606 SetState(ThreadState::Runnable);
607 wait_cancelled = false;
608 }
609 } else {
610 // Otherwise, note that we cancelled a wait.
611 wait_cancelled = true;
612 }
613}
614
615void KThread::TrySuspend() {
616 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
617 ASSERT(IsSuspendRequested());
618
619 // Ensure that we have no waiters.
620 if (GetNumKernelWaiters() > 0) {
621 return;
622 }
623 ASSERT(GetNumKernelWaiters() == 0);
624
625 // Perform the suspend.
626 Suspend();
627}
628
629void KThread::Suspend() {
630 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
631 ASSERT(IsSuspendRequested());
632
633 // Set our suspend flags in state.
634 const auto old_state = thread_state;
635 thread_state = static_cast<ThreadState>(GetSuspendFlags()) | (old_state & ThreadState::Mask);
636
637 // Note the state change in scheduler.
638 KScheduler::OnThreadStateChanged(kernel, this, old_state);
639}
640
641void KThread::Continue() {
642 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
643
644 // Clear our suspend flags in state.
645 const auto old_state = thread_state;
646 thread_state = old_state & ThreadState::Mask;
647
648 // Note the state change in scheduler.
649 KScheduler::OnThreadStateChanged(kernel, this, old_state);
650}
651
652ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
653 // Lock ourselves.
654 KScopedLightLock lk(activity_pause_lock);
655
656 // Set the activity.
657 {
658 // Lock the scheduler.
659 KScopedSchedulerLock sl{kernel};
660
661 // Verify our state.
662 const auto cur_state = GetState();
663 R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable),
664 Svc::ResultInvalidState);
665
666 // Either pause or resume.
667 if (activity == Svc::ThreadActivity::Paused) {
668 // Verify that we're not suspended.
669 R_UNLESS(!IsSuspendRequested(SuspendType::Thread), Svc::ResultInvalidState);
670
671 // Suspend.
672 RequestSuspend(SuspendType::Thread);
673 } else {
674 ASSERT(activity == Svc::ThreadActivity::Runnable);
675
676 // Verify that we're suspended.
677 R_UNLESS(IsSuspendRequested(SuspendType::Thread), Svc::ResultInvalidState);
678
679 // Resume.
680 Resume(SuspendType::Thread);
681 }
682 }
683
684 // If the thread is now paused, update the pinned waiter list.
685 if (activity == Svc::ThreadActivity::Paused) {
686 bool thread_is_pinned = false;
687 bool thread_is_current;
688 do {
689 // Lock the scheduler.
690 KScopedSchedulerLock sl{kernel};
691
692 // Don't do any further management if our termination has been requested.
693 R_SUCCEED_IF(IsTerminationRequested());
694
695 // Check whether the thread is pinned.
696 if (GetStackParameters().is_pinned) {
697 // Verify that the current thread isn't terminating.
698 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
699 Svc::ResultTerminationRequested);
700
701 // Note that the thread was pinned and not current.
702 thread_is_pinned = true;
703 thread_is_current = false;
704
705 // Wait until the thread isn't pinned any more.
706 pinned_waiter_list.push_back(GetCurrentThread(kernel));
707 GetCurrentThread(kernel).SetState(ThreadState::Waiting);
708 } else {
709 // Check if the thread is currently running.
710 // If it is, we'll need to retry.
711 thread_is_current = false;
712
713 for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
714 if (kernel.Scheduler(i).GetCurrentThread() == this) {
715 thread_is_current = true;
716 break;
717 }
718 }
719 }
720 } while (thread_is_current);
721
722 // If the thread was pinned, it no longer is, and we should remove the current thread from
723 // our waiter list.
724 if (thread_is_pinned) {
725 // Lock the scheduler.
726 KScopedSchedulerLock sl{kernel};
727
728 // Remove from the list.
729 pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
730 }
731 }
732
733 return RESULT_SUCCESS;
734}
735
736ResultCode KThread::GetThreadContext3(std::vector<u8>& out) {
737 // Lock ourselves.
738 KScopedLightLock lk{activity_pause_lock};
739
740 // Get the context.
741 {
742 // Lock the scheduler.
743 KScopedSchedulerLock sl{kernel};
744
745 // Verify that we're suspended.
746 R_UNLESS(IsSuspendRequested(SuspendType::Thread), Svc::ResultInvalidState);
747
748 // If we're not terminating, get the thread's user context.
749 if (!IsTerminationRequested()) {
750 if (parent->Is64BitProcess()) {
751 // Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
752 auto context = GetContext64();
753 context.pstate &= 0xFF0FFE20;
754
755 out.resize(sizeof(context));
756 std::memcpy(out.data(), &context, sizeof(context));
757 } else {
758 // Mask away mode bits, interrupt bits, IL bit, and other reserved bits.
759 auto context = GetContext32();
760 context.cpsr &= 0xFF0FFE20;
761
762 out.resize(sizeof(context));
763 std::memcpy(out.data(), &context, sizeof(context));
764 }
765 }
766 }
767
768 return RESULT_SUCCESS;
769}
770
218void KThread::AddWaiterImpl(KThread* thread) { 771void KThread::AddWaiterImpl(KThread* thread) {
219 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 772 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
220 773
@@ -345,104 +898,150 @@ KThread* KThread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
345 return next_lock_owner; 898 return next_lock_owner;
346} 899}
347 900
348ResultCode KThread::SetActivity(ThreadActivity value) { 901ResultCode KThread::Run() {
349 KScopedSchedulerLock lock(kernel); 902 while (true) {
903 KScopedSchedulerLock lk{kernel};
350 904
351 auto sched_status = GetState(); 905 // If either this thread or the current thread are requesting termination, note it.
906 R_UNLESS(!IsTerminationRequested(), Svc::ResultTerminationRequested);
907 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
908 Svc::ResultTerminationRequested);
352 909
353 if (sched_status != ThreadState::Runnable && sched_status != ThreadState::Waiting) { 910 // Ensure our thread state is correct.
354 return ERR_INVALID_STATE; 911 R_UNLESS(GetState() == ThreadState::Initialized, Svc::ResultInvalidState);
355 }
356 912
357 if (IsTerminationRequested()) { 913 // If the current thread has been asked to suspend, suspend it and retry.
914 if (GetCurrentThread(kernel).IsSuspended()) {
915 GetCurrentThread(kernel).Suspend();
916 continue;
917 }
918
919 // If we're not a kernel thread and we've been asked to suspend, suspend ourselves.
920 if (IsUserThread() && IsSuspended()) {
921 Suspend();
922 }
923
924 // Set our state and finish.
925 SetState(ThreadState::Runnable);
358 return RESULT_SUCCESS; 926 return RESULT_SUCCESS;
359 } 927 }
928}
360 929
361 if (value == ThreadActivity::Paused) { 930void KThread::Exit() {
362 if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) != 0) { 931 ASSERT(this == GetCurrentThreadPointer(kernel));
363 return ERR_INVALID_STATE; 932
364 } 933 // Release the thread resource hint from parent.
365 AddSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag); 934 if (parent != nullptr) {
366 } else { 935 // TODO(bunnei): Hint that the resource is about to be released.
367 if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) == 0) { 936 resource_limit_release_hint = true;
368 return ERR_INVALID_STATE; 937 }
369 } 938
370 RemoveSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag); 939 // Perform termination.
940 {
941 KScopedSchedulerLock sl{kernel};
942
943 // Disallow all suspension.
944 suspend_allowed_flags = 0;
945
946 // Start termination.
947 StartTermination();
371 } 948 }
372 return RESULT_SUCCESS;
373} 949}
374 950
375ResultCode KThread::Sleep(s64 nanoseconds) { 951ResultCode KThread::Sleep(s64 timeout) {
376 Handle event_handle{}; 952 ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
953 ASSERT(this == GetCurrentThreadPointer(kernel));
954 ASSERT(timeout > 0);
955
377 { 956 {
378 KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); 957 // Setup the scheduling lock and sleep.
958 KScopedSchedulerLockAndSleep slp{kernel, this, timeout};
959
960 // Check if the thread should terminate.
961 if (IsTerminationRequested()) {
962 slp.CancelSleep();
963 return Svc::ResultTerminationRequested;
964 }
965
966 // Mark the thread as waiting.
379 SetState(ThreadState::Waiting); 967 SetState(ThreadState::Waiting);
380 SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); 968 SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
381 } 969 }
382 970
383 if (event_handle != InvalidHandle) { 971 // The lock/sleep is done.
384 auto& time_manager = kernel.TimeManager(); 972
385 time_manager.UnscheduleTimeEvent(event_handle); 973 // Cancel the timer.
386 } 974 kernel.TimeManager().UnscheduleTimeEvent(this);
975
387 return RESULT_SUCCESS; 976 return RESULT_SUCCESS;
388} 977}
389 978
390void KThread::AddSchedulingFlag(ThreadSchedFlags flag) { 979void KThread::SetState(ThreadState state) {
391 const auto old_state = GetRawState(); 980 KScopedSchedulerLock sl{kernel};
392 pausing_state |= static_cast<u32>(flag); 981
393 const auto base_scheduling = GetState(); 982 // Clear debugging state
394 thread_state = base_scheduling | static_cast<ThreadState>(pausing_state); 983 SetMutexWaitAddressForDebugging({});
395 KScheduler::OnThreadStateChanged(kernel, this, old_state); 984 SetWaitReasonForDebugging({});
985
986 const ThreadState old_state = thread_state;
987 thread_state =
988 static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask));
989 if (thread_state != old_state) {
990 KScheduler::OnThreadStateChanged(kernel, this, old_state);
991 }
396} 992}
397 993
398void KThread::RemoveSchedulingFlag(ThreadSchedFlags flag) { 994std::shared_ptr<Common::Fiber>& KThread::GetHostContext() {
399 const auto old_state = GetRawState(); 995 return host_context;
400 pausing_state &= ~static_cast<u32>(flag);
401 const auto base_scheduling = GetState();
402 thread_state = base_scheduling | static_cast<ThreadState>(pausing_state);
403 KScheduler::OnThreadStateChanged(kernel, this, old_state);
404} 996}
405 997
406ResultCode KThread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { 998ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, ThreadType type_flags,
407 KScopedSchedulerLock lock(kernel); 999 std::string name, VAddr entry_point,
408 const auto HighestSetCore = [](u64 mask, u32 max_cores) { 1000 u32 priority, u64 arg, s32 processor_id,
409 for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) { 1001 VAddr stack_top, Process* owner_process) {
410 if (((mask >> core) & 1) != 0) { 1002 std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
411 return core; 1003 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
412 } 1004 return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top,
413 } 1005 owner_process, std::move(init_func), init_func_parameter);
414 return -1; 1006}
415 }; 1007
416 1008ResultVal<std::shared_ptr<KThread>> KThread::Create(Core::System& system, ThreadType type_flags,
417 const bool use_override = affinity_override_count != 0; 1009 std::string name, VAddr entry_point,
418 if (new_core == Svc::IdealCoreNoUpdate) { 1010 u32 priority, u64 arg, s32 processor_id,
419 new_core = use_override ? ideal_core_override : ideal_core; 1011 VAddr stack_top, Process* owner_process,
420 if ((new_affinity_mask & (1ULL << new_core)) == 0) { 1012 std::function<void(void*)>&& thread_start_func,
421 LOG_ERROR(Kernel, "New affinity mask is incorrect! new_core={}, new_affinity_mask={}", 1013 void* thread_start_parameter) {
422 new_core, new_affinity_mask); 1014 auto& kernel = system.Kernel();
423 return ERR_INVALID_COMBINATION; 1015
424 } 1016 std::shared_ptr<KThread> thread = std::make_shared<KThread>(kernel);
425 } 1017
426 if (use_override) { 1018 thread->InitializeThread(thread.get(), entry_point, arg, stack_top, priority, processor_id,
427 ideal_core_override = new_core; 1019 owner_process, type_flags);
428 } else { 1020 thread->name = name;
429 const auto old_affinity_mask = affinity_mask; 1021
430 affinity_mask.SetAffinityMask(new_affinity_mask); 1022 auto& scheduler = kernel.GlobalSchedulerContext();
431 ideal_core = new_core; 1023 scheduler.AddThread(thread);
432 if (old_affinity_mask.GetAffinityMask() != new_affinity_mask) { 1024
433 const s32 old_core = processor_id; 1025 thread->host_context =
434 if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) { 1026 std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
435 if (static_cast<s32>(ideal_core) < 0) { 1027
436 processor_id = HighestSetCore(affinity_mask.GetAffinityMask(), 1028 return MakeResult<std::shared_ptr<KThread>>(std::move(thread));
437 Core::Hardware::NUM_CPU_CORES); 1029}
438 } else { 1030
439 processor_id = ideal_core; 1031KThread* GetCurrentThreadPointer(KernelCore& kernel) {
440 } 1032 if (!kernel.CurrentScheduler()) {
441 } 1033 // We are not called from a core thread
442 KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_affinity_mask, old_core); 1034 return {};
443 }
444 } 1035 }
445 return RESULT_SUCCESS; 1036 return kernel.CurrentScheduler()->GetCurrentThread();
1037}
1038
1039KThread& GetCurrentThread(KernelCore& kernel) {
1040 return *GetCurrentThreadPointer(kernel);
1041}
1042
1043s32 GetCurrentCoreId(KernelCore& kernel) {
1044 return GetCurrentThread(kernel).GetCurrentCore();
446} 1045}
447 1046
448} // namespace Kernel 1047} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index ef2313f87..7845821ba 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -1,11 +1,10 @@
1// Copyright 2014 Citra Emulator Project / PPSSPP Project 1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#pragma once 5#pragma once
6 6
7#include <array> 7#include <array>
8#include <functional>
9#include <span> 8#include <span>
10#include <string> 9#include <string>
11#include <utility> 10#include <utility>
@@ -18,9 +17,11 @@
18#include "common/spin_lock.h" 17#include "common/spin_lock.h"
19#include "core/arm/arm_interface.h" 18#include "core/arm/arm_interface.h"
20#include "core/hle/kernel/k_affinity_mask.h" 19#include "core/hle/kernel/k_affinity_mask.h"
20#include "core/hle/kernel/k_light_lock.h"
21#include "core/hle/kernel/k_synchronization_object.h" 21#include "core/hle/kernel/k_synchronization_object.h"
22#include "core/hle/kernel/object.h" 22#include "core/hle/kernel/object.h"
23#include "core/hle/kernel/svc_common.h" 23#include "core/hle/kernel/svc_common.h"
24#include "core/hle/kernel/svc_types.h"
24#include "core/hle/result.h" 25#include "core/hle/result.h"
25 26
26namespace Common { 27namespace Common {
@@ -38,6 +39,9 @@ class GlobalSchedulerContext;
38class KernelCore; 39class KernelCore;
39class Process; 40class Process;
40class KScheduler; 41class KScheduler;
42class KThreadQueue;
43
44using KThreadFunction = VAddr;
41 45
42enum class ThreadType : u32 { 46enum class ThreadType : u32 {
43 Main = 0, 47 Main = 0,
@@ -47,6 +51,16 @@ enum class ThreadType : u32 {
47}; 51};
48DECLARE_ENUM_FLAG_OPERATORS(ThreadType); 52DECLARE_ENUM_FLAG_OPERATORS(ThreadType);
49 53
54enum class SuspendType : u32 {
55 Process = 0,
56 Thread = 1,
57 Debug = 2,
58 Backtrace = 3,
59 Init = 4,
60
61 Count,
62};
63
50enum class ThreadState : u16 { 64enum class ThreadState : u16 {
51 Initialized = 0, 65 Initialized = 0,
52 Waiting = 1, 66 Waiting = 1,
@@ -66,21 +80,9 @@ enum class ThreadState : u16 {
66}; 80};
67DECLARE_ENUM_FLAG_OPERATORS(ThreadState); 81DECLARE_ENUM_FLAG_OPERATORS(ThreadState);
68 82
69enum class ThreadWakeupReason { 83enum class DpcFlag : u32 {
70 Signal, // The thread was woken up by WakeupAllWaitingThreads due to an object signal. 84 Terminating = (1 << 0),
71 Timeout // The thread was woken up due to a wait timeout. 85 Terminated = (1 << 1),
72};
73
74enum class ThreadActivity : u32 {
75 Normal = 0,
76 Paused = 1,
77};
78
79enum class ThreadSchedFlags : u32 {
80 ProcessPauseFlag = 1 << 4,
81 ThreadPauseFlag = 1 << 5,
82 ProcessDebugPauseFlag = 1 << 6,
83 KernelInitPauseFlag = 1 << 8,
84}; 86};
85 87
86enum class ThreadWaitReasonForDebugging : u32 { 88enum class ThreadWaitReasonForDebugging : u32 {
@@ -93,21 +95,25 @@ enum class ThreadWaitReasonForDebugging : u32 {
93 Suspended, ///< Thread is waiting due to process suspension 95 Suspended, ///< Thread is waiting due to process suspension
94}; 96};
95 97
98[[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel);
99[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel);
100[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel);
101
96class KThread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> { 102class KThread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> {
97 friend class KScheduler; 103 friend class KScheduler;
98 friend class Process; 104 friend class Process;
99 105
100public: 106public:
101 static constexpr s32 DefaultThreadPriority = 44; 107 static constexpr s32 DefaultThreadPriority = 44;
102 static constexpr s32 IdleThreadPriority = 64; 108 static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1;
103 109
104 explicit KThread(KernelCore& kernel); 110 explicit KThread(KernelCore& kernel);
105 ~KThread() override; 111 ~KThread() override;
106 112
107 using MutexWaitingThreads = std::vector<std::shared_ptr<KThread>>; 113public:
108
109 using ThreadContext32 = Core::ARM_Interface::ThreadContext32; 114 using ThreadContext32 = Core::ARM_Interface::ThreadContext32;
110 using ThreadContext64 = Core::ARM_Interface::ThreadContext64; 115 using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
116 using WaiterList = boost::intrusive::list<KThread>;
111 117
112 /** 118 /**
113 * Creates and returns a new thread. The new thread is immediately scheduled 119 * Creates and returns a new thread. The new thread is immediately scheduled
@@ -121,10 +127,9 @@ public:
121 * @param owner_process The parent process for the thread, if null, it's a kernel thread 127 * @param owner_process The parent process for the thread, if null, it's a kernel thread
122 * @return A shared pointer to the newly created thread 128 * @return A shared pointer to the newly created thread
123 */ 129 */
124 static ResultVal<std::shared_ptr<KThread>> Create(Core::System& system, ThreadType type_flags, 130 [[nodiscard]] static ResultVal<std::shared_ptr<KThread>> Create(
125 std::string name, VAddr entry_point, 131 Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
126 u32 priority, u64 arg, s32 processor_id, 132 u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process);
127 VAddr stack_top, Process* owner_process);
128 133
129 /** 134 /**
130 * Creates and returns a new thread. The new thread is immediately scheduled 135 * Creates and returns a new thread. The new thread is immediately scheduled
@@ -140,12 +145,12 @@ public:
140 * @param thread_start_parameter The parameter which will passed to host context on init 145 * @param thread_start_parameter The parameter which will passed to host context on init
141 * @return A shared pointer to the newly created thread 146 * @return A shared pointer to the newly created thread
142 */ 147 */
143 static ResultVal<std::shared_ptr<KThread>> Create( 148 [[nodiscard]] static ResultVal<std::shared_ptr<KThread>> Create(
144 Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point, 149 Core::System& system, ThreadType type_flags, std::string name, VAddr entry_point,
145 u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process, 150 u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process,
146 std::function<void(void*)>&& thread_start_func, void* thread_start_parameter); 151 std::function<void(void*)>&& thread_start_func, void* thread_start_parameter);
147 152
148 std::string GetName() const override { 153 [[nodiscard]] std::string GetName() const override {
149 return name; 154 return name;
150 } 155 }
151 156
@@ -153,12 +158,12 @@ public:
153 name = std::move(new_name); 158 name = std::move(new_name);
154 } 159 }
155 160
156 std::string GetTypeName() const override { 161 [[nodiscard]] std::string GetTypeName() const override {
157 return "Thread"; 162 return "Thread";
158 } 163 }
159 164
160 static constexpr HandleType HANDLE_TYPE = HandleType::Thread; 165 static constexpr HandleType HANDLE_TYPE = HandleType::Thread;
161 HandleType GetHandleType() const override { 166 [[nodiscard]] HandleType GetHandleType() const override {
162 return HANDLE_TYPE; 167 return HANDLE_TYPE;
163 } 168 }
164 169
@@ -167,15 +172,15 @@ public:
167 * @return The current thread's priority 172 * @return The current thread's priority
168 */ 173 */
169 [[nodiscard]] s32 GetPriority() const { 174 [[nodiscard]] s32 GetPriority() const {
170 return current_priority; 175 return priority;
171 } 176 }
172 177
173 /** 178 /**
174 * Sets the thread's current priority. 179 * Sets the thread's current priority.
175 * @param priority The new priority. 180 * @param priority The new priority.
176 */ 181 */
177 void SetPriority(s32 priority) { 182 void SetPriority(s32 value) {
178 current_priority = priority; 183 priority = value;
179 } 184 }
180 185
181 /** 186 /**
@@ -187,15 +192,6 @@ public:
187 } 192 }
188 193
189 /** 194 /**
190 * Sets the thread's nominal priority.
191 * @param priority The new priority.
192 */
193 void SetBasePriority(u32 priority);
194
195 /// Changes the core that the thread is running or scheduled to run on.
196 [[nodiscard]] ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
197
198 /**
199 * Gets the thread's thread ID 195 * Gets the thread's thread ID
200 * @return The thread's ID 196 * @return The thread's ID
201 */ 197 */
@@ -203,46 +199,67 @@ public:
203 return thread_id; 199 return thread_id;
204 } 200 }
205 201
206 /// Resumes a thread from waiting 202 void ContinueIfHasKernelWaiters() {
203 if (GetNumKernelWaiters() > 0) {
204 Continue();
205 }
206 }
207
207 void Wakeup(); 208 void Wakeup();
208 209
209 ResultCode Start(); 210 void SetBasePriority(s32 value);
210 211
211 virtual bool IsSignaled() const override; 212 [[nodiscard]] ResultCode Run();
212 213
213 /// Cancels a waiting operation that this thread may or may not be within. 214 void Exit();
214 ///
215 /// When the thread is within a waiting state, this will set the thread's
216 /// waiting result to signal a canceled wait. The function will then resume
217 /// this thread.
218 ///
219 void CancelWait();
220 215
221 void SetSynchronizationResults(KSynchronizationObject* object, ResultCode result); 216 [[nodiscard]] u32 GetSuspendFlags() const {
217 return suspend_allowed_flags & suspend_request_flags;
218 }
222 219
223 void SetSyncedObject(KSynchronizationObject* object, ResultCode result) { 220 [[nodiscard]] bool IsSuspended() const {
224 SetSynchronizationResults(object, result); 221 return GetSuspendFlags() != 0;
225 } 222 }
226 223
227 ResultCode GetWaitResult(KSynchronizationObject** out) const { 224 [[nodiscard]] bool IsSuspendRequested(SuspendType type) const {
228 *out = signaling_object; 225 return (suspend_request_flags &
229 return signaling_result; 226 (1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)))) !=
227 0;
230 } 228 }
231 229
232 ResultCode GetSignalingResult() const { 230 [[nodiscard]] bool IsSuspendRequested() const {
233 return signaling_result; 231 return suspend_request_flags != 0;
234 } 232 }
235 233
236 /** 234 void RequestSuspend(SuspendType type);
237 * Stops a thread, invalidating it from further use 235
238 */ 236 void Resume(SuspendType type);
239 void Stop(); 237
238 void TrySuspend();
239
240 void Continue();
241
242 void Suspend();
243
244 void Finalize() override;
245
246 bool IsSignaled() const override;
247
248 void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) {
249 synced_object = obj;
250 wait_result = wait_res;
251 }
252
253 [[nodiscard]] ResultCode GetWaitResult(KSynchronizationObject** out) const {
254 *out = synced_object;
255 return wait_result;
256 }
240 257
241 /* 258 /*
242 * Returns the Thread Local Storage address of the current thread 259 * Returns the Thread Local Storage address of the current thread
243 * @returns VAddr of the thread's TLS 260 * @returns VAddr of the thread's TLS
244 */ 261 */
245 VAddr GetTLSAddress() const { 262 [[nodiscard]] VAddr GetTLSAddress() const {
246 return tls_address; 263 return tls_address;
247 } 264 }
248 265
@@ -250,62 +267,45 @@ public:
250 * Returns the value of the TPIDR_EL0 Read/Write system register for this thread. 267 * Returns the value of the TPIDR_EL0 Read/Write system register for this thread.
251 * @returns The value of the TPIDR_EL0 register. 268 * @returns The value of the TPIDR_EL0 register.
252 */ 269 */
253 u64 GetTPIDR_EL0() const { 270 [[nodiscard]] u64 GetTPIDR_EL0() const {
254 return tpidr_el0; 271 return thread_context_64.tpidr;
255 } 272 }
256 273
257 /// Sets the value of the TPIDR_EL0 Read/Write system register for this thread. 274 /// Sets the value of the TPIDR_EL0 Read/Write system register for this thread.
258 void SetTPIDR_EL0(u64 value) { 275 void SetTPIDR_EL0(u64 value) {
259 tpidr_el0 = value; 276 thread_context_64.tpidr = value;
277 thread_context_32.tpidr = static_cast<u32>(value);
260 } 278 }
261 279
262 /* 280 [[nodiscard]] ThreadContext32& GetContext32() {
263 * Returns the address of the current thread's command buffer, located in the TLS. 281 return thread_context_32;
264 * @returns VAddr of the thread's command buffer.
265 */
266 VAddr GetCommandBufferAddress() const;
267
268 ThreadContext32& GetContext32() {
269 return context_32;
270 }
271
272 const ThreadContext32& GetContext32() const {
273 return context_32;
274 } 282 }
275 283
276 ThreadContext64& GetContext64() { 284 [[nodiscard]] const ThreadContext32& GetContext32() const {
277 return context_64; 285 return thread_context_32;
278 } 286 }
279 287
280 const ThreadContext64& GetContext64() const { 288 [[nodiscard]] ThreadContext64& GetContext64() {
281 return context_64; 289 return thread_context_64;
282 } 290 }
283 291
284 bool IsKernelThread() const { 292 [[nodiscard]] const ThreadContext64& GetContext64() const {
285 return type == ThreadType::Kernel; 293 return thread_context_64;
286 } 294 }
287 295
288 bool WasRunning() const { 296 [[nodiscard]] std::shared_ptr<Common::Fiber>& GetHostContext();
289 return was_running;
290 }
291
292 void SetWasRunning(bool value) {
293 was_running = value;
294 }
295 297
296 std::shared_ptr<Common::Fiber>& GetHostContext(); 298 [[nodiscard]] ThreadState GetState() const {
297
298 ThreadState GetState() const {
299 return thread_state & ThreadState::Mask; 299 return thread_state & ThreadState::Mask;
300 } 300 }
301 301
302 ThreadState GetRawState() const { 302 [[nodiscard]] ThreadState GetRawState() const {
303 return thread_state; 303 return thread_state;
304 } 304 }
305 305
306 void SetState(ThreadState state); 306 void SetState(ThreadState state);
307 307
308 s64 GetLastScheduledTick() const { 308 [[nodiscard]] s64 GetLastScheduledTick() const {
309 return last_scheduled_tick; 309 return last_scheduled_tick;
310 } 310 }
311 311
@@ -313,43 +313,44 @@ public:
313 last_scheduled_tick = tick; 313 last_scheduled_tick = tick;
314 } 314 }
315 315
316 u64 GetTotalCPUTimeTicks() const { 316 void AddCpuTime([[maybe_unused]] s32 core_id_, s64 amount) {
317 return total_cpu_time_ticks; 317 cpu_time += amount;
318 // TODO(bunnei): Debug kernels track per-core tick counts. Should we?
318 } 319 }
319 320
320 void UpdateCPUTimeTicks(u64 ticks) { 321 [[nodiscard]] s64 GetCpuTime() const {
321 total_cpu_time_ticks += ticks; 322 return cpu_time;
322 } 323 }
323 324
324 s32 GetProcessorID() const { 325 [[nodiscard]] s32 GetActiveCore() const {
325 return processor_id; 326 return core_id;
326 } 327 }
327 328
328 s32 GetActiveCore() const { 329 void SetActiveCore(s32 core) {
329 return GetProcessorID(); 330 core_id = core;
330 } 331 }
331 332
332 void SetProcessorID(s32 new_core) { 333 [[nodiscard]] s32 GetCurrentCore() const {
333 processor_id = new_core; 334 return current_core_id;
334 } 335 }
335 336
336 void SetActiveCore(s32 new_core) { 337 void SetCurrentCore(s32 core) {
337 processor_id = new_core; 338 current_core_id = core;
338 } 339 }
339 340
340 Process* GetOwnerProcess() { 341 [[nodiscard]] Process* GetOwnerProcess() {
341 return owner_process; 342 return parent;
342 } 343 }
343 344
344 const Process* GetOwnerProcess() const { 345 [[nodiscard]] const Process* GetOwnerProcess() const {
345 return owner_process; 346 return parent;
346 } 347 }
347 348
348 const MutexWaitingThreads& GetMutexWaitingThreads() const { 349 [[nodiscard]] bool IsUserThread() const {
349 return wait_mutex_threads; 350 return parent != nullptr;
350 } 351 }
351 352
352 KThread* GetLockOwner() const { 353 [[nodiscard]] KThread* GetLockOwner() const {
353 return lock_owner; 354 return lock_owner;
354 } 355 }
355 356
@@ -357,20 +358,21 @@ public:
357 lock_owner = owner; 358 lock_owner = owner;
358 } 359 }
359 360
360 u32 GetIdealCore() const { 361 [[nodiscard]] const KAffinityMask& GetAffinityMask() const {
361 return ideal_core; 362 return physical_affinity_mask;
362 } 363 }
363 364
364 const KAffinityMask& GetAffinityMask() const { 365 [[nodiscard]] ResultCode GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
365 return affinity_mask; 366
366 } 367 [[nodiscard]] ResultCode GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask);
367 368
368 ResultCode SetActivity(ThreadActivity value); 369 [[nodiscard]] ResultCode SetCoreMask(s32 core_id, u64 v_affinity_mask);
369 370
370 /// Sleeps this thread for the given amount of nanoseconds. 371 [[nodiscard]] ResultCode SetActivity(Svc::ThreadActivity activity);
371 ResultCode Sleep(s64 nanoseconds);
372 372
373 s64 GetYieldScheduleCount() const { 373 [[nodiscard]] ResultCode Sleep(s64 timeout);
374
375 [[nodiscard]] s64 GetYieldScheduleCount() const {
374 return schedule_count; 376 return schedule_count;
375 } 377 }
376 378
@@ -378,56 +380,49 @@ public:
378 schedule_count = count; 380 schedule_count = count;
379 } 381 }
380 382
381 bool IsRunning() const { 383 void WaitCancel();
382 return is_running;
383 }
384
385 void SetIsRunning(bool value) {
386 is_running = value;
387 }
388
389 bool IsWaitCancelled() const {
390 return is_sync_cancelled;
391 }
392 384
393 void ClearWaitCancelled() { 385 [[nodiscard]] bool IsWaitCancelled() const {
394 is_sync_cancelled = false; 386 return wait_cancelled;
395 } 387 }
396 388
397 Handle GetGlobalHandle() const { 389 [[nodiscard]] void ClearWaitCancelled() {
398 return global_handle; 390 wait_cancelled = false;
399 } 391 }
400 392
401 bool IsCancellable() const { 393 [[nodiscard]] bool IsCancellable() const {
402 return is_cancellable; 394 return cancellable;
403 } 395 }
404 396
405 void SetCancellable() { 397 void SetCancellable() {
406 is_cancellable = true; 398 cancellable = true;
407 } 399 }
408 400
409 void ClearCancellable() { 401 void ClearCancellable() {
410 is_cancellable = false; 402 cancellable = false;
411 }
412
413 bool IsTerminationRequested() const {
414 return will_be_terminated || GetRawState() == ThreadState::Terminated;
415 } 403 }
416 404
417 bool IsPaused() const { 405 [[nodiscard]] bool IsTerminationRequested() const {
418 return pausing_state != 0; 406 return termination_requested || GetRawState() == ThreadState::Terminated;
419 } 407 }
420 408
421 bool IsContinuousOnSVC() const { 409 struct StackParameters {
422 return is_continuous_on_svc; 410 u8 svc_permission[0x10];
423 } 411 std::atomic<u8> dpc_flags;
412 u8 current_svc_id;
413 bool is_calling_svc;
414 bool is_in_exception_handler;
415 bool is_pinned;
416 s32 disable_count;
417 KThread* cur_thread;
418 };
424 419
425 void SetContinuousOnSVC(bool is_continuous) { 420 [[nodiscard]] StackParameters& GetStackParameters() {
426 is_continuous_on_svc = is_continuous; 421 return stack_parameters;
427 } 422 }
428 423
429 bool HasExited() const { 424 [[nodiscard]] const StackParameters& GetStackParameters() const {
430 return has_exited; 425 return stack_parameters;
431 } 426 }
432 427
433 class QueueEntry { 428 class QueueEntry {
@@ -457,26 +452,78 @@ public:
457 KThread* next{}; 452 KThread* next{};
458 }; 453 };
459 454
460 QueueEntry& GetPriorityQueueEntry(s32 core) { 455 [[nodiscard]] QueueEntry& GetPriorityQueueEntry(s32 core) {
461 return per_core_priority_queue_entry[core]; 456 return per_core_priority_queue_entry[core];
462 } 457 }
463 458
464 const QueueEntry& GetPriorityQueueEntry(s32 core) const { 459 [[nodiscard]] const QueueEntry& GetPriorityQueueEntry(s32 core) const {
465 return per_core_priority_queue_entry[core]; 460 return per_core_priority_queue_entry[core];
466 } 461 }
467 462
468 s32 GetDisableDispatchCount() const { 463 void SetSleepingQueue(KThreadQueue* q) {
469 return disable_count; 464 sleeping_queue = q;
465 }
466
467 [[nodiscard]] s32 GetDisableDispatchCount() const {
468 return this->GetStackParameters().disable_count;
470 } 469 }
471 470
472 void DisableDispatch() { 471 void DisableDispatch() {
473 ASSERT(GetDisableDispatchCount() >= 0); 472 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
474 disable_count++; 473 this->GetStackParameters().disable_count++;
475 } 474 }
476 475
477 void EnableDispatch() { 476 void EnableDispatch() {
478 ASSERT(GetDisableDispatchCount() > 0); 477 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
479 disable_count--; 478 this->GetStackParameters().disable_count--;
479 }
480
481 void Pin();
482
483 void Unpin();
484
485 void SetInExceptionHandler() {
486 this->GetStackParameters().is_in_exception_handler = true;
487 }
488
489 void ClearInExceptionHandler() {
490 this->GetStackParameters().is_in_exception_handler = false;
491 }
492
493 [[nodiscard]] bool IsInExceptionHandler() const {
494 return this->GetStackParameters().is_in_exception_handler;
495 }
496
497 void SetIsCallingSvc() {
498 this->GetStackParameters().is_calling_svc = true;
499 }
500
501 void ClearIsCallingSvc() {
502 this->GetStackParameters().is_calling_svc = false;
503 }
504
505 [[nodiscard]] bool IsCallingSvc() const {
506 return this->GetStackParameters().is_calling_svc;
507 }
508
509 [[nodiscard]] u8 GetSvcId() const {
510 return this->GetStackParameters().current_svc_id;
511 }
512
513 void RegisterDpc(DpcFlag flag) {
514 this->GetStackParameters().dpc_flags |= static_cast<u8>(flag);
515 }
516
517 void ClearDpc(DpcFlag flag) {
518 this->GetStackParameters().dpc_flags &= ~static_cast<u8>(flag);
519 }
520
521 [[nodiscard]] u8 GetDpc() const {
522 return this->GetStackParameters().dpc_flags;
523 }
524
525 [[nodiscard]] bool HasDpc() const {
526 return this->GetDpc() != 0;
480 } 527 }
481 528
482 void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) { 529 void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) {
@@ -507,10 +554,16 @@ public:
507 return mutex_wait_address_for_debugging; 554 return mutex_wait_address_for_debugging;
508 } 555 }
509 556
557 [[nodiscard]] s32 GetIdealCoreForDebugging() const {
558 return virtual_ideal_core_id;
559 }
560
510 void AddWaiter(KThread* thread); 561 void AddWaiter(KThread* thread);
511 562
512 void RemoveWaiter(KThread* thread); 563 void RemoveWaiter(KThread* thread);
513 564
565 [[nodiscard]] ResultCode GetThreadContext3(std::vector<u8>& out);
566
514 [[nodiscard]] KThread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key); 567 [[nodiscard]] KThread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
515 568
516 [[nodiscard]] VAddr GetAddressKey() const { 569 [[nodiscard]] VAddr GetAddressKey() const {
@@ -530,6 +583,22 @@ public:
530 address_key_value = val; 583 address_key_value = val;
531 } 584 }
532 585
586 [[nodiscard]] bool HasWaiters() const {
587 return !waiter_list.empty();
588 }
589
590 [[nodiscard]] s32 GetNumKernelWaiters() const {
591 return num_kernel_waiters;
592 }
593
594 [[nodiscard]] u64 GetConditionVariableKey() const {
595 return condvar_key;
596 }
597
598 [[nodiscard]] u64 GetAddressArbiterKey() const {
599 return condvar_key;
600 }
601
533private: 602private:
534 static constexpr size_t PriorityInheritanceCountMax = 10; 603 static constexpr size_t PriorityInheritanceCountMax = 10;
535 union SyncObjectBuffer { 604 union SyncObjectBuffer {
@@ -560,8 +629,8 @@ private:
560 std::same_as<T, KThread> || 629 std::same_as<T, KThread> ||
561 std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs, 630 std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs,
562 const KThread& rhs) { 631 const KThread& rhs) {
563 const uintptr_t l_key = lhs.GetConditionVariableKey(); 632 const u64 l_key = lhs.GetConditionVariableKey();
564 const uintptr_t r_key = rhs.GetConditionVariableKey(); 633 const u64 r_key = rhs.GetConditionVariableKey();
565 634
566 if (l_key < r_key) { 635 if (l_key < r_key) {
567 // Sort first by key 636 // Sort first by key
@@ -575,26 +644,88 @@ private:
575 } 644 }
576 }; 645 };
577 646
578 Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{}; 647 void AddWaiterImpl(KThread* thread);
648
649 void RemoveWaiterImpl(KThread* thread);
579 650
651 void StartTermination();
652
653 [[nodiscard]] ResultCode Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top,
654 s32 prio, s32 virt_core, Process* owner, ThreadType type);
655
656 [[nodiscard]] static ResultCode InitializeThread(KThread* thread, KThreadFunction func,
657 uintptr_t arg, VAddr user_stack_top, s32 prio,
658 s32 core, Process* owner, ThreadType type);
659
660 static void RestorePriority(KernelCore& kernel, KThread* thread);
661
662 // For core KThread implementation
663 ThreadContext32 thread_context_32{};
664 ThreadContext64 thread_context_64{};
665 Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{};
666 s32 priority{};
580 using ConditionVariableThreadTreeTraits = 667 using ConditionVariableThreadTreeTraits =
581 Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert< 668 Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<
582 &KThread::condvar_arbiter_tree_node>; 669 &KThread::condvar_arbiter_tree_node>;
583 using ConditionVariableThreadTree = 670 using ConditionVariableThreadTree =
584 ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>; 671 ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
672 ConditionVariableThreadTree* condvar_tree{};
673 u64 condvar_key{};
674 u64 virtual_affinity_mask{};
675 KAffinityMask physical_affinity_mask{};
676 u64 thread_id{};
677 std::atomic<s64> cpu_time{};
678 KSynchronizationObject* synced_object{};
679 VAddr address_key{};
680 Process* parent{};
681 VAddr kernel_stack_top{};
682 u32* light_ipc_data{};
683 VAddr tls_address{};
684 KLightLock activity_pause_lock;
685 s64 schedule_count{};
686 s64 last_scheduled_tick{};
687 std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
688 KThreadQueue* sleeping_queue{};
689 WaiterList waiter_list{};
690 WaiterList pinned_waiter_list{};
691 KThread* lock_owner{};
692 u32 address_key_value{};
693 u32 suspend_request_flags{};
694 u32 suspend_allowed_flags{};
695 ResultCode wait_result{RESULT_SUCCESS};
696 s32 base_priority{};
697 s32 physical_ideal_core_id{};
698 s32 virtual_ideal_core_id{};
699 s32 num_kernel_waiters{};
700 s32 current_core_id{};
701 s32 core_id{};
702 KAffinityMask original_physical_affinity_mask{};
703 s32 original_physical_ideal_core_id{};
704 s32 num_core_migration_disables{};
705 ThreadState thread_state{};
706 std::atomic<bool> termination_requested{};
707 bool wait_cancelled{};
708 bool cancellable{};
709 bool signaled{};
710 bool initialized{};
711 bool debug_attached{};
712 s8 priority_inheritance_count{};
713 bool resource_limit_release_hint{};
714 StackParameters stack_parameters{};
585 715
586public: 716 // For emulation
587 using ConditionVariableThreadTreeType = ConditionVariableThreadTree; 717 std::shared_ptr<Common::Fiber> host_context{};
588 718
589 [[nodiscard]] uintptr_t GetConditionVariableKey() const { 719 // For debugging
590 return condvar_key; 720 std::vector<KSynchronizationObject*> wait_objects_for_debugging;
591 } 721 VAddr mutex_wait_address_for_debugging{};
722 ThreadWaitReasonForDebugging wait_reason_for_debugging{};
723 std::string name;
592 724
593 [[nodiscard]] uintptr_t GetAddressArbiterKey() const { 725public:
594 return condvar_key; 726 using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
595 }
596 727
597 void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, uintptr_t cv_key, 728 void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key,
598 u32 value) { 729 u32 value) {
599 condvar_tree = tree; 730 condvar_tree = tree;
600 condvar_key = cv_key; 731 condvar_key = cv_key;
@@ -610,7 +741,7 @@ public:
610 return condvar_tree != nullptr; 741 return condvar_tree != nullptr;
611 } 742 }
612 743
613 void SetAddressArbiter(ConditionVariableThreadTree* tree, uintptr_t address) { 744 void SetAddressArbiter(ConditionVariableThreadTree* tree, u64 address) {
614 condvar_tree = tree; 745 condvar_tree = tree;
615 condvar_key = address; 746 condvar_key = address;
616 } 747 }
@@ -626,111 +757,6 @@ public:
626 [[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const { 757 [[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const {
627 return condvar_tree; 758 return condvar_tree;
628 } 759 }
629
630 [[nodiscard]] bool HasWaiters() const {
631 return !waiter_list.empty();
632 }
633
634private:
635 void AddSchedulingFlag(ThreadSchedFlags flag);
636 void RemoveSchedulingFlag(ThreadSchedFlags flag);
637 void AddWaiterImpl(KThread* thread);
638 void RemoveWaiterImpl(KThread* thread);
639 static void RestorePriority(KernelCore& kernel, KThread* thread);
640
641 Common::SpinLock context_guard{};
642 ThreadContext32 context_32{};
643 ThreadContext64 context_64{};
644 std::shared_ptr<Common::Fiber> host_context{};
645
646 ThreadState thread_state = ThreadState::Initialized;
647
648 u64 thread_id = 0;
649
650 VAddr entry_point = 0;
651 VAddr stack_top = 0;
652 std::atomic_int disable_count = 0;
653
654 ThreadType type;
655
656 /// Nominal thread priority, as set by the emulated application.
657 /// The nominal priority is the thread priority without priority
658 /// inheritance taken into account.
659 s32 base_priority{};
660
661 /// Current thread priority. This may change over the course of the
662 /// thread's lifetime in order to facilitate priority inheritance.
663 s32 current_priority{};
664
665 u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
666 s64 schedule_count{};
667 s64 last_scheduled_tick{};
668
669 s32 processor_id = 0;
670
671 VAddr tls_address = 0; ///< Virtual address of the Thread Local Storage of the thread
672 u64 tpidr_el0 = 0; ///< TPIDR_EL0 read/write system register.
673
674 /// Process that owns this thread
675 Process* owner_process;
676
677 /// Objects that the thread is waiting on, in the same order as they were
678 /// passed to WaitSynchronization. This is used for debugging only.
679 std::vector<KSynchronizationObject*> wait_objects_for_debugging;
680
681 /// The current mutex wait address. This is used for debugging only.
682 VAddr mutex_wait_address_for_debugging{};
683
684 /// The reason the thread is waiting. This is used for debugging only.
685 ThreadWaitReasonForDebugging wait_reason_for_debugging{};
686
687 KSynchronizationObject* signaling_object;
688 ResultCode signaling_result{RESULT_SUCCESS};
689
690 /// List of threads that are waiting for a mutex that is held by this thread.
691 MutexWaitingThreads wait_mutex_threads;
692
693 /// Thread that owns the lock that this thread is waiting for.
694 KThread* lock_owner{};
695
696 /// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
697 Handle global_handle = 0;
698
699 KScheduler* scheduler = nullptr;
700
701 std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
702
703 u32 ideal_core{0xFFFFFFFF};
704 KAffinityMask affinity_mask{};
705
706 s32 ideal_core_override = -1;
707 u32 affinity_override_count = 0;
708
709 u32 pausing_state = 0;
710 bool is_running = false;
711 bool is_cancellable = false;
712 bool is_sync_cancelled = false;
713
714 bool is_continuous_on_svc = false;
715
716 bool will_be_terminated = false;
717 bool has_exited = false;
718
719 bool was_running = false;
720
721 bool signaled{};
722
723 ConditionVariableThreadTree* condvar_tree{};
724 uintptr_t condvar_key{};
725 VAddr address_key{};
726 u32 address_key_value{};
727 s32 num_kernel_waiters{};
728
729 using WaiterList = boost::intrusive::list<KThread>;
730 WaiterList waiter_list{};
731 WaiterList pinned_waiter_list{};
732
733 std::string name;
734}; 760};
735 761
736} // namespace Kernel 762} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 39d5122f5..d61659453 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -117,14 +117,14 @@ struct KernelCore::Impl {
117 void InitializePhysicalCores() { 117 void InitializePhysicalCores() {
118 exclusive_monitor = 118 exclusive_monitor =
119 Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); 119 Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
120 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 120 for (s32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
121 schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i); 121 schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i);
122 cores.emplace_back(i, system, *schedulers[i], interrupts); 122 cores.emplace_back(i, system, *schedulers[i], interrupts);
123 } 123 }
124 } 124 }
125 125
126 void InitializeSchedulers() { 126 void InitializeSchedulers() {
127 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 127 for (s32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
128 cores[i].Scheduler().Initialize(); 128 cores[i].Scheduler().Initialize();
129 } 129 }
130 } 130 }
@@ -169,9 +169,9 @@ struct KernelCore::Impl {
169 std::string name = "Suspend Thread Id:" + std::to_string(i); 169 std::string name = "Suspend Thread Id:" + std::to_string(i);
170 std::function<void(void*)> init_func = Core::CpuManager::GetSuspendThreadStartFunc(); 170 std::function<void(void*)> init_func = Core::CpuManager::GetSuspendThreadStartFunc();
171 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); 171 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
172 auto thread_res = KThread::Create(system, ThreadType::Kernel, std::move(name), 0, 0, 0, 172 auto thread_res = KThread::Create(system, ThreadType::HighPriority, std::move(name), 0,
173 static_cast<u32>(i), 0, nullptr, std::move(init_func), 173 0, 0, static_cast<u32>(i), 0, nullptr,
174 init_func_parameter); 174 std::move(init_func), init_func_parameter);
175 175
176 suspend_threads[i] = std::move(thread_res).Unwrap(); 176 suspend_threads[i] = std::move(thread_res).Unwrap();
177 } 177 }
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 819e275ff..9f4583b49 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -136,6 +136,23 @@ std::shared_ptr<ResourceLimit> Process::GetResourceLimit() const {
136 return resource_limit; 136 return resource_limit;
137} 137}
138 138
139void Process::IncrementThreadCount() {
140 ASSERT(num_threads >= 0);
141 ++num_created_threads;
142
143 if (const auto count = ++num_threads; count > peak_num_threads) {
144 peak_num_threads = count;
145 }
146}
147
148void Process::DecrementThreadCount() {
149 ASSERT(num_threads > 0);
150
151 if (const auto count = --num_threads; count == 0) {
152 UNIMPLEMENTED_MSG("Process termination is not implemented!");
153 }
154}
155
139u64 Process::GetTotalPhysicalMemoryAvailable() const { 156u64 Process::GetTotalPhysicalMemoryAvailable() const {
140 const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) + 157 const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) +
141 page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size + 158 page_table->GetTotalHeapSize() + GetSystemResourceSize() + image_size +
@@ -161,6 +178,61 @@ u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
161 return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); 178 return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
162} 179}
163 180
181bool Process::ReleaseUserException(KThread* thread) {
182 KScopedSchedulerLock sl{kernel};
183
184 if (exception_thread == thread) {
185 exception_thread = nullptr;
186
187 // Remove waiter thread.
188 s32 num_waiters{};
189 KThread* next = thread->RemoveWaiterByKey(
190 std::addressof(num_waiters),
191 reinterpret_cast<uintptr_t>(std::addressof(exception_thread)));
192 if (next != nullptr) {
193 if (next->GetState() == ThreadState::Waiting) {
194 next->SetState(ThreadState::Runnable);
195 } else {
196 KScheduler::SetSchedulerUpdateNeeded(kernel);
197 }
198 }
199
200 return true;
201 } else {
202 return false;
203 }
204}
205
206void Process::PinCurrentThread() {
207 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
208
209 // Get the current thread.
210 const s32 core_id = GetCurrentCoreId(kernel);
211 KThread* cur_thread = GetCurrentThreadPointer(kernel);
212
213 // Pin it.
214 PinThread(core_id, cur_thread);
215 cur_thread->Pin();
216
217 // An update is needed.
218 KScheduler::SetSchedulerUpdateNeeded(kernel);
219}
220
221void Process::UnpinCurrentThread() {
222 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
223
224 // Get the current thread.
225 const s32 core_id = GetCurrentCoreId(kernel);
226 KThread* cur_thread = GetCurrentThreadPointer(kernel);
227
228 // Unpin it.
229 cur_thread->Unpin();
230 UnpinThread(core_id, cur_thread);
231
232 // An update is needed.
233 KScheduler::SetSchedulerUpdateNeeded(kernel);
234}
235
164void Process::RegisterThread(const KThread* thread) { 236void Process::RegisterThread(const KThread* thread) {
165 thread_list.push_back(thread); 237 thread_list.push_back(thread);
166} 238}
@@ -278,7 +350,7 @@ void Process::PrepareForTermination() {
278 ASSERT_MSG(thread->GetState() == ThreadState::Waiting, 350 ASSERT_MSG(thread->GetState() == ThreadState::Waiting,
279 "Exiting processes with non-waiting threads is currently unimplemented"); 351 "Exiting processes with non-waiting threads is currently unimplemented");
280 352
281 thread->Stop(); 353 thread->Exit();
282 } 354 }
283 }; 355 };
284 356
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index 917babfb4..11d78f3a8 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -217,6 +217,14 @@ public:
217 return is_64bit_process; 217 return is_64bit_process;
218 } 218 }
219 219
220 [[nodiscard]] bool IsSuspended() const {
221 return is_suspended;
222 }
223
224 void SetSuspended(bool suspended) {
225 is_suspended = suspended;
226 }
227
220 /// Gets the total running time of the process instance in ticks. 228 /// Gets the total running time of the process instance in ticks.
221 u64 GetCPUTimeTicks() const { 229 u64 GetCPUTimeTicks() const {
222 return total_process_running_time_ticks; 230 return total_process_running_time_ticks;
@@ -237,6 +245,33 @@ public:
237 ++schedule_count; 245 ++schedule_count;
238 } 246 }
239 247
248 void IncrementThreadCount();
249 void DecrementThreadCount();
250
251 void SetRunningThread(s32 core, KThread* thread, u64 idle_count) {
252 running_threads[core] = thread;
253 running_thread_idle_counts[core] = idle_count;
254 }
255
256 void ClearRunningThread(KThread* thread) {
257 for (size_t i = 0; i < running_threads.size(); ++i) {
258 if (running_threads[i] == thread) {
259 running_threads[i] = nullptr;
260 }
261 }
262 }
263
264 [[nodiscard]] KThread* GetRunningThread(s32 core) const {
265 return running_threads[core];
266 }
267
268 bool ReleaseUserException(KThread* thread);
269
270 [[nodiscard]] KThread* GetPinnedThread(s32 core_id) const {
271 ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
272 return pinned_threads[core_id];
273 }
274
240 /// Gets 8 bytes of random data for svcGetInfo RandomEntropy 275 /// Gets 8 bytes of random data for svcGetInfo RandomEntropy
241 u64 GetRandomEntropy(std::size_t index) const { 276 u64 GetRandomEntropy(std::size_t index) const {
242 return random_entropy.at(index); 277 return random_entropy.at(index);
@@ -310,6 +345,9 @@ public:
310 345
311 void Finalize() override {} 346 void Finalize() override {}
312 347
348 void PinCurrentThread();
349 void UnpinCurrentThread();
350
313 /////////////////////////////////////////////////////////////////////////////////////////////// 351 ///////////////////////////////////////////////////////////////////////////////////////////////
314 // Thread-local storage management 352 // Thread-local storage management
315 353
@@ -320,6 +358,20 @@ public:
320 void FreeTLSRegion(VAddr tls_address); 358 void FreeTLSRegion(VAddr tls_address);
321 359
322private: 360private:
361 void PinThread(s32 core_id, KThread* thread) {
362 ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
363 ASSERT(thread != nullptr);
364 ASSERT(pinned_threads[core_id] == nullptr);
365 pinned_threads[core_id] = thread;
366 }
367
368 void UnpinThread(s32 core_id, KThread* thread) {
369 ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
370 ASSERT(thread != nullptr);
371 ASSERT(pinned_threads[core_id] == thread);
372 pinned_threads[core_id] = nullptr;
373 }
374
323 /// Changes the process status. If the status is different 375 /// Changes the process status. If the status is different
324 /// from the current process status, then this will trigger 376 /// from the current process status, then this will trigger
325 /// a process signal. 377 /// a process signal.
@@ -408,6 +460,17 @@ private:
408 s64 schedule_count{}; 460 s64 schedule_count{};
409 461
410 bool is_signaled{}; 462 bool is_signaled{};
463 bool is_suspended{};
464
465 std::atomic<s32> num_created_threads{};
466 std::atomic<u16> num_threads{};
467 u16 peak_num_threads{};
468
469 std::array<KThread*, Core::Hardware::NUM_CPU_CORES> running_threads{};
470 std::array<u64, Core::Hardware::NUM_CPU_CORES> running_thread_idle_counts{};
471 std::array<KThread*, Core::Hardware::NUM_CPU_CORES> pinned_threads{};
472
473 KThread* exception_thread{};
411 474
412 /// System context 475 /// System context
413 Core::System& system; 476 Core::System& system;
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp
index 280c9b5f6..790dbb998 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/server_session.cpp
@@ -154,7 +154,7 @@ ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) {
154 KScopedSchedulerLock lock(kernel); 154 KScopedSchedulerLock lock(kernel);
155 if (!context.IsThreadWaiting()) { 155 if (!context.IsThreadWaiting()) {
156 context.GetThread().Wakeup(); 156 context.GetThread().Wakeup();
157 context.GetThread().SetSynchronizationResults(nullptr, result); 157 context.GetThread().SetSyncedObject(nullptr, result);
158 } 158 }
159 } 159 }
160 160
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 2512bfd98..dbef854f8 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -351,7 +351,8 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
351 session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); 351 session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming());
352 } 352 }
353 353
354 return thread->GetSignalingResult(); 354 KSynchronizationObject* dummy{};
355 return thread->GetWaitResult(std::addressof(dummy));
355} 356}
356 357
357static ResultCode SendSyncRequest32(Core::System& system, Handle handle) { 358static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
@@ -359,27 +360,26 @@ static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
359} 360}
360 361
361/// Get the ID for the specified thread. 362/// Get the ID for the specified thread.
362static ResultCode GetThreadId(Core::System& system, u64* thread_id, Handle thread_handle) { 363static ResultCode GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) {
363 LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle); 364 LOG_TRACE(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
364 365
366 // Get the thread from its handle.
365 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 367 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
366 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); 368 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
367 if (!thread) { 369 R_UNLESS(thread, Svc::ResultInvalidHandle);
368 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", thread_handle);
369 return ERR_INVALID_HANDLE;
370 }
371 370
372 *thread_id = thread->GetThreadID(); 371 // Get the thread's id.
372 *out_thread_id = thread->GetThreadID();
373 return RESULT_SUCCESS; 373 return RESULT_SUCCESS;
374} 374}
375 375
376static ResultCode GetThreadId32(Core::System& system, u32* thread_id_low, u32* thread_id_high, 376static ResultCode GetThreadId32(Core::System& system, u32* out_thread_id_low,
377 Handle thread_handle) { 377 u32* out_thread_id_high, Handle thread_handle) {
378 u64 thread_id{}; 378 u64 out_thread_id{};
379 const ResultCode result{GetThreadId(system, &thread_id, thread_handle)}; 379 const ResultCode result{GetThreadId(system, &out_thread_id, thread_handle)};
380 380
381 *thread_id_low = static_cast<u32>(thread_id >> 32); 381 *out_thread_id_low = static_cast<u32>(out_thread_id >> 32);
382 *thread_id_high = static_cast<u32>(thread_id & std::numeric_limits<u32>::max()); 382 *out_thread_id_high = static_cast<u32>(out_thread_id & std::numeric_limits<u32>::max());
383 383
384 return result; 384 return result;
385} 385}
@@ -473,15 +473,13 @@ static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u
473static ResultCode CancelSynchronization(Core::System& system, Handle thread_handle) { 473static ResultCode CancelSynchronization(Core::System& system, Handle thread_handle) {
474 LOG_TRACE(Kernel_SVC, "called thread=0x{:X}", thread_handle); 474 LOG_TRACE(Kernel_SVC, "called thread=0x{:X}", thread_handle);
475 475
476 // Get the thread from its handle.
476 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 477 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
477 std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); 478 std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
478 if (!thread) { 479 R_UNLESS(thread, Svc::ResultInvalidHandle);
479 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
480 thread_handle);
481 return ERR_INVALID_HANDLE;
482 }
483 480
484 thread->CancelWait(); 481 // Cancel the thread's wait.
482 thread->WaitCancel();
485 return RESULT_SUCCESS; 483 return RESULT_SUCCESS;
486} 484}
487 485
@@ -630,7 +628,7 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
630 handle_debug_buffer(info1, info2); 628 handle_debug_buffer(info1, info2);
631 629
632 auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread(); 630 auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
633 const auto thread_processor_id = current_thread->GetProcessorID(); 631 const auto thread_processor_id = current_thread->GetActiveCore();
634 system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace(); 632 system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
635 } 633 }
636} 634}
@@ -888,7 +886,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
888 const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks(); 886 const u64 prev_ctx_ticks = scheduler.GetLastContextSwitchTicks();
889 u64 out_ticks = 0; 887 u64 out_ticks = 0;
890 if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) { 888 if (same_thread && info_sub_id == 0xFFFFFFFFFFFFFFFF) {
891 const u64 thread_ticks = current_thread->GetTotalCPUTimeTicks(); 889 const u64 thread_ticks = current_thread->GetCpuTime();
892 890
893 out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); 891 out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
894 } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { 892 } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) {
@@ -1025,127 +1023,109 @@ static ResultCode UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size
1025 return UnmapPhysicalMemory(system, addr, size); 1023 return UnmapPhysicalMemory(system, addr, size);
1026} 1024}
1027 1025
1028/// Sets the thread activity 1026constexpr bool IsValidThreadActivity(Svc::ThreadActivity thread_activity) {
1029static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 activity) { 1027 switch (thread_activity) {
1030 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", handle, activity); 1028 case Svc::ThreadActivity::Runnable:
1031 if (activity > static_cast<u32>(ThreadActivity::Paused)) { 1029 case Svc::ThreadActivity::Paused:
1032 return ERR_INVALID_ENUM_VALUE; 1030 return true;
1031 default:
1032 return false;
1033 } 1033 }
1034}
1034 1035
1035 const auto* current_process = system.Kernel().CurrentProcess(); 1036/// Sets the thread activity
1036 const std::shared_ptr<KThread> thread = current_process->GetHandleTable().Get<KThread>(handle); 1037static ResultCode SetThreadActivity(Core::System& system, Handle thread_handle,
1037 if (!thread) { 1038 Svc::ThreadActivity thread_activity) {
1038 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle); 1039 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, activity=0x{:08X}", thread_handle,
1039 return ERR_INVALID_HANDLE; 1040 thread_activity);
1040 }
1041 1041
1042 if (thread->GetOwnerProcess() != current_process) { 1042 // Validate the activity.
1043 LOG_ERROR(Kernel_SVC, 1043 R_UNLESS(IsValidThreadActivity(thread_activity), Svc::ResultInvalidEnumValue);
1044 "The current process does not own the current thread, thread_handle={:08X} "
1045 "thread_pid={}, "
1046 "current_process_pid={}",
1047 handle, thread->GetOwnerProcess()->GetProcessID(),
1048 current_process->GetProcessID());
1049 return ERR_INVALID_HANDLE;
1050 }
1051 1044
1052 if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) { 1045 // Get the thread from its handle.
1053 LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread"); 1046 auto& kernel = system.Kernel();
1054 return ERR_BUSY; 1047 const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
1055 } 1048 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
1049 R_UNLESS(thread, Svc::ResultInvalidHandle);
1050
1051 // Check that the activity is being set on a non-current thread for the current process.
1052 R_UNLESS(thread->GetOwnerProcess() == kernel.CurrentProcess(), Svc::ResultInvalidHandle);
1053 R_UNLESS(thread.get() != GetCurrentThreadPointer(kernel), Svc::ResultBusy);
1054
1055 // Set the activity.
1056 R_TRY(thread->SetActivity(thread_activity));
1056 1057
1057 return thread->SetActivity(static_cast<ThreadActivity>(activity)); 1058 return RESULT_SUCCESS;
1058} 1059}
1059 1060
1060static ResultCode SetThreadActivity32(Core::System& system, Handle handle, u32 activity) { 1061static ResultCode SetThreadActivity32(Core::System& system, Handle thread_handle,
1061 return SetThreadActivity(system, handle, activity); 1062 Svc::ThreadActivity thread_activity) {
1063 return SetThreadActivity(system, thread_handle, thread_activity);
1062} 1064}
1063 1065
1064/// Gets the thread context 1066/// Gets the thread context
1065static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, Handle handle) { 1067static ResultCode GetThreadContext(Core::System& system, VAddr out_context, Handle thread_handle) {
1066 LOG_DEBUG(Kernel_SVC, "called, context=0x{:08X}, thread=0x{:X}", thread_context, handle); 1068 LOG_DEBUG(Kernel_SVC, "called, out_context=0x{:08X}, thread_handle=0x{:X}", out_context,
1069 thread_handle);
1067 1070
1071 // Get the thread from its handle.
1068 const auto* current_process = system.Kernel().CurrentProcess(); 1072 const auto* current_process = system.Kernel().CurrentProcess();
1069 const std::shared_ptr<KThread> thread = current_process->GetHandleTable().Get<KThread>(handle); 1073 const std::shared_ptr<KThread> thread =
1070 if (!thread) { 1074 current_process->GetHandleTable().Get<KThread>(thread_handle);
1071 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle); 1075 R_UNLESS(thread, Svc::ResultInvalidHandle);
1072 return ERR_INVALID_HANDLE;
1073 }
1074 1076
1075 if (thread->GetOwnerProcess() != current_process) { 1077 // Require the handle be to a non-current thread in the current process.
1076 LOG_ERROR(Kernel_SVC, 1078 R_UNLESS(thread->GetOwnerProcess() == current_process, Svc::ResultInvalidHandle);
1077 "The current process does not own the current thread, thread_handle={:08X} " 1079 R_UNLESS(thread.get() != system.Kernel().CurrentScheduler()->GetCurrentThread(),
1078 "thread_pid={}, " 1080 Svc::ResultBusy);
1079 "current_process_pid={}",
1080 handle, thread->GetOwnerProcess()->GetProcessID(),
1081 current_process->GetProcessID());
1082 return ERR_INVALID_HANDLE;
1083 }
1084 1081
1085 if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) { 1082 // Get the thread context.
1086 LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread"); 1083 std::vector<u8> context;
1087 return ERR_BUSY; 1084 R_TRY(thread->GetThreadContext3(context));
1088 }
1089 1085
1090 Core::ARM_Interface::ThreadContext64 ctx = thread->GetContext64(); 1086 // Copy the thread context to user space.
1091 // Mask away mode bits, interrupt bits, IL bit, and other reserved bits. 1087 system.Memory().WriteBlock(out_context, context.data(), context.size());
1092 ctx.pstate &= 0xFF0FFE20;
1093 1088
1094 // If 64-bit, we can just write the context registers directly and we're good.
1095 // However, if 32-bit, we have to ensure some registers are zeroed out.
1096 if (!current_process->Is64BitProcess()) {
1097 std::fill(ctx.cpu_registers.begin() + 15, ctx.cpu_registers.end(), 0);
1098 std::fill(ctx.vector_registers.begin() + 16, ctx.vector_registers.end(), u128{});
1099 }
1100
1101 system.Memory().WriteBlock(thread_context, &ctx, sizeof(ctx));
1102 return RESULT_SUCCESS; 1089 return RESULT_SUCCESS;
1103} 1090}
1104 1091
1105static ResultCode GetThreadContext32(Core::System& system, u32 thread_context, Handle handle) { 1092static ResultCode GetThreadContext32(Core::System& system, u32 out_context, Handle thread_handle) {
1106 return GetThreadContext(system, thread_context, handle); 1093 return GetThreadContext(system, out_context, thread_handle);
1107} 1094}
1108 1095
1109/// Gets the priority for the specified thread 1096/// Gets the priority for the specified thread
1110static ResultCode GetThreadPriority(Core::System& system, u32* priority, Handle handle) { 1097static ResultCode GetThreadPriority(Core::System& system, u32* out_priority, Handle handle) {
1111 LOG_TRACE(Kernel_SVC, "called"); 1098 LOG_TRACE(Kernel_SVC, "called");
1112 1099
1100 // Get the thread from its handle.
1113 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1101 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1114 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle); 1102 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle);
1115 if (!thread) { 1103 R_UNLESS(thread, Svc::ResultInvalidHandle);
1116 *priority = 0;
1117 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle);
1118 return ERR_INVALID_HANDLE;
1119 }
1120 1104
1121 *priority = thread->GetPriority(); 1105 // Get the thread's priority.
1106 *out_priority = thread->GetPriority();
1122 return RESULT_SUCCESS; 1107 return RESULT_SUCCESS;
1123} 1108}
1124 1109
1125static ResultCode GetThreadPriority32(Core::System& system, u32* priority, Handle handle) { 1110static ResultCode GetThreadPriority32(Core::System& system, u32* out_priority, Handle handle) {
1126 return GetThreadPriority(system, priority, handle); 1111 return GetThreadPriority(system, out_priority, handle);
1127} 1112}
1128 1113
1129/// Sets the priority for the specified thread 1114/// Sets the priority for the specified thread
1130static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 priority) { 1115static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 priority) {
1131 LOG_TRACE(Kernel_SVC, "called"); 1116 LOG_TRACE(Kernel_SVC, "called");
1132 1117
1133 if (priority > Svc::LowestThreadPriority) { 1118 // Validate the priority.
1134 LOG_ERROR(Kernel_SVC, "An invalid priority was specified {} for thread_handle={:08X}", 1119 R_UNLESS(Svc::HighestThreadPriority <= priority && priority <= Svc::LowestThreadPriority,
1135 priority, handle); 1120 Svc::ResultInvalidPriority);
1136 return ERR_INVALID_THREAD_PRIORITY;
1137 }
1138
1139 const auto* const current_process = system.Kernel().CurrentProcess();
1140 1121
1141 std::shared_ptr<KThread> thread = current_process->GetHandleTable().Get<KThread>(handle); 1122 // Get the thread from its handle.
1142 if (!thread) { 1123 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1143 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, handle=0x{:08X}", handle); 1124 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(handle);
1144 return ERR_INVALID_HANDLE; 1125 R_UNLESS(thread, Svc::ResultInvalidHandle);
1145 }
1146 1126
1127 // Set the thread priority.
1147 thread->SetBasePriority(priority); 1128 thread->SetBasePriority(priority);
1148
1149 return RESULT_SUCCESS; 1129 return RESULT_SUCCESS;
1150} 1130}
1151 1131
@@ -1436,7 +1416,7 @@ static void ExitProcess(Core::System& system) {
1436 current_process->PrepareForTermination(); 1416 current_process->PrepareForTermination();
1437 1417
1438 // Kill the current thread 1418 // Kill the current thread
1439 system.Kernel().CurrentScheduler()->GetCurrentThread()->Stop(); 1419 system.Kernel().CurrentScheduler()->GetCurrentThread()->Exit();
1440} 1420}
1441 1421
1442static void ExitProcess32(Core::System& system) { 1422static void ExitProcess32(Core::System& system) {
@@ -1500,17 +1480,15 @@ static ResultCode CreateThread32(Core::System& system, Handle* out_handle, u32 p
1500static ResultCode StartThread(Core::System& system, Handle thread_handle) { 1480static ResultCode StartThread(Core::System& system, Handle thread_handle) {
1501 LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle); 1481 LOG_DEBUG(Kernel_SVC, "called thread=0x{:08X}", thread_handle);
1502 1482
1483 // Get the thread from its handle.
1503 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1484 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1504 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); 1485 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
1505 if (!thread) { 1486 R_UNLESS(thread, Svc::ResultInvalidHandle);
1506 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
1507 thread_handle);
1508 return ERR_INVALID_HANDLE;
1509 }
1510 1487
1511 ASSERT(thread->GetState() == ThreadState::Initialized); 1488 // Try to start the thread.
1489 R_TRY(thread->Run());
1512 1490
1513 return thread->Start(); 1491 return RESULT_SUCCESS;
1514} 1492}
1515 1493
1516static ResultCode StartThread32(Core::System& system, Handle thread_handle) { 1494static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
@@ -1523,7 +1501,7 @@ static void ExitThread(Core::System& system) {
1523 1501
1524 auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread(); 1502 auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
1525 system.GlobalSchedulerContext().RemoveThread(SharedFrom(current_thread)); 1503 system.GlobalSchedulerContext().RemoveThread(SharedFrom(current_thread));
1526 current_thread->Stop(); 1504 current_thread->Exit();
1527} 1505}
1528 1506
1529static void ExitThread32(Core::System& system) { 1507static void ExitThread32(Core::System& system) {
@@ -1532,34 +1510,28 @@ static void ExitThread32(Core::System& system) {
1532 1510
1533/// Sleep the current thread 1511/// Sleep the current thread
1534static void SleepThread(Core::System& system, s64 nanoseconds) { 1512static void SleepThread(Core::System& system, s64 nanoseconds) {
1535 LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds); 1513 auto& kernel = system.Kernel();
1514 const auto yield_type = static_cast<Svc::YieldType>(nanoseconds);
1536 1515
1537 enum class SleepType : s64 { 1516 LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
1538 YieldWithoutCoreMigration = 0,
1539 YieldWithCoreMigration = -1,
1540 YieldAndWaitForLoadBalancing = -2,
1541 };
1542 1517
1543 auto& scheduler = *system.Kernel().CurrentScheduler(); 1518 // When the input tick is positive, sleep.
1544 if (nanoseconds <= 0) { 1519 if (nanoseconds > 0) {
1545 switch (static_cast<SleepType>(nanoseconds)) { 1520 // Convert the timeout from nanoseconds to ticks.
1546 case SleepType::YieldWithoutCoreMigration: { 1521 // NOTE: Nintendo does not use this conversion logic in WaitSynchronization...
1547 scheduler.YieldWithoutCoreMigration(); 1522
1548 break; 1523 // Sleep.
1549 } 1524 // NOTE: Nintendo does not check the result of this sleep.
1550 case SleepType::YieldWithCoreMigration: { 1525 static_cast<void>(GetCurrentThread(kernel).Sleep(nanoseconds));
1551 scheduler.YieldWithCoreMigration(); 1526 } else if (yield_type == Svc::YieldType::WithoutCoreMigration) {
1552 break; 1527 KScheduler::YieldWithoutCoreMigration(kernel);
1553 } 1528 } else if (yield_type == Svc::YieldType::WithCoreMigration) {
1554 case SleepType::YieldAndWaitForLoadBalancing: { 1529 KScheduler::YieldWithCoreMigration(kernel);
1555 scheduler.YieldToAnyThread(); 1530 } else if (yield_type == Svc::YieldType::ToAnyThread) {
1556 break; 1531 KScheduler::YieldToAnyThread(kernel);
1557 }
1558 default:
1559 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
1560 }
1561 } else { 1532 } else {
1562 scheduler.GetCurrentThread()->Sleep(nanoseconds); 1533 // Nintendo does nothing at all if an otherwise invalid value is passed.
1534 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
1563 } 1535 }
1564} 1536}
1565 1537
@@ -1822,95 +1794,72 @@ static ResultCode CreateTransferMemory32(Core::System& system, Handle* handle, u
1822 return CreateTransferMemory(system, handle, addr, size, permissions); 1794 return CreateTransferMemory(system, handle, addr, size, permissions);
1823} 1795}
1824 1796
1825static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core, 1797static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, s32* out_core_id,
1826 u64* mask) { 1798 u64* out_affinity_mask) {
1827 LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle); 1799 LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle);
1828 1800
1801 // Get the thread from its handle.
1829 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 1802 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1830 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); 1803 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
1831 if (!thread) { 1804 R_UNLESS(thread, Svc::ResultInvalidHandle);
1832 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
1833 thread_handle);
1834 *core = 0;
1835 *mask = 0;
1836 return ERR_INVALID_HANDLE;
1837 }
1838 1805
1839 *core = thread->GetIdealCore(); 1806 // Get the core mask.
1840 *mask = thread->GetAffinityMask().GetAffinityMask(); 1807 R_TRY(thread->GetCoreMask(out_core_id, out_affinity_mask));
1841 1808
1842 return RESULT_SUCCESS; 1809 return RESULT_SUCCESS;
1843} 1810}
1844 1811
1845static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle, u32* core, 1812static ResultCode GetThreadCoreMask32(Core::System& system, Handle thread_handle, s32* out_core_id,
1846 u32* mask_low, u32* mask_high) { 1813 u32* out_affinity_mask_low, u32* out_affinity_mask_high) {
1847 u64 mask{}; 1814 u64 out_affinity_mask{};
1848 const auto result = GetThreadCoreMask(system, thread_handle, core, &mask); 1815 const auto result = GetThreadCoreMask(system, thread_handle, out_core_id, &out_affinity_mask);
1849 *mask_high = static_cast<u32>(mask >> 32); 1816 *out_affinity_mask_high = static_cast<u32>(out_affinity_mask >> 32);
1850 *mask_low = static_cast<u32>(mask); 1817 *out_affinity_mask_low = static_cast<u32>(out_affinity_mask);
1851 return result; 1818 return result;
1852} 1819}
1853 1820
1854static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, u32 core, 1821static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, s32 core_id,
1855 u64 affinity_mask) { 1822 u64 affinity_mask) {
1856 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core=0x{:X}, affinity_mask=0x{:016X}", 1823 LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, core_id=0x{:X}, affinity_mask=0x{:016X}",
1857 thread_handle, core, affinity_mask); 1824 thread_handle, core_id, affinity_mask);
1858
1859 const auto* const current_process = system.Kernel().CurrentProcess();
1860 1825
1861 if (core == static_cast<u32>(Svc::IdealCoreUseProcessValue)) { 1826 const auto& current_process = *system.Kernel().CurrentProcess();
1862 const u8 ideal_cpu_core = current_process->GetIdealCoreId();
1863 1827
1864 ASSERT(ideal_cpu_core != static_cast<u8>(Svc::IdealCoreUseProcessValue)); 1828 // Determine the core id/affinity mask.
1865 1829 if (core_id == Svc::IdealCoreUseProcessValue) {
1866 // Set the target CPU to the ideal core specified by the process. 1830 core_id = current_process.GetIdealCoreId();
1867 core = ideal_cpu_core; 1831 affinity_mask = (1ULL << core_id);
1868 affinity_mask = 1ULL << core;
1869 } else { 1832 } else {
1870 const u64 core_mask = current_process->GetCoreMask(); 1833 // Validate the affinity mask.
1871 1834 const u64 process_core_mask = current_process.GetCoreMask();
1872 if ((core_mask | affinity_mask) != core_mask) { 1835 R_UNLESS((affinity_mask | process_core_mask) == process_core_mask,
1873 LOG_ERROR( 1836 Svc::ResultInvalidCoreId);
1874 Kernel_SVC, 1837 R_UNLESS(affinity_mask != 0, Svc::ResultInvalidCombination);
1875 "Invalid processor ID specified (core_mask=0x{:08X}, affinity_mask=0x{:016X})", 1838
1876 core_mask, affinity_mask); 1839 // Validate the core id.
1877 return ERR_INVALID_PROCESSOR_ID; 1840 if (IsValidCoreId(core_id)) {
1878 } 1841 R_UNLESS(((1ULL << core_id) & affinity_mask) != 0, Svc::ResultInvalidCombination);
1879 1842 } else {
1880 if (affinity_mask == 0) { 1843 R_UNLESS(core_id == Svc::IdealCoreNoUpdate || core_id == Svc::IdealCoreDontCare,
1881 LOG_ERROR(Kernel_SVC, "Specfified affinity mask is zero."); 1844 Svc::ResultInvalidCoreId);
1882 return ERR_INVALID_COMBINATION;
1883 }
1884
1885 if (core < Core::Hardware::NUM_CPU_CORES) {
1886 if ((affinity_mask & (1ULL << core)) == 0) {
1887 LOG_ERROR(Kernel_SVC,
1888 "Core is not enabled for the current mask, core={}, mask={:016X}", core,
1889 affinity_mask);
1890 return ERR_INVALID_COMBINATION;
1891 }
1892 } else if (core != static_cast<u32>(Svc::IdealCoreDontCare) &&
1893 core != static_cast<u32>(Svc::IdealCoreNoUpdate)) {
1894 LOG_ERROR(Kernel_SVC, "Invalid processor ID specified (core={}).", core);
1895 return ERR_INVALID_PROCESSOR_ID;
1896 } 1845 }
1897 } 1846 }
1898 1847
1899 const auto& handle_table = current_process->GetHandleTable(); 1848 // Get the thread from its handle.
1849 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1900 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle); 1850 const std::shared_ptr<KThread> thread = handle_table.Get<KThread>(thread_handle);
1901 if (!thread) { 1851 R_UNLESS(thread, Svc::ResultInvalidHandle);
1902 LOG_ERROR(Kernel_SVC, "Thread handle does not exist, thread_handle=0x{:08X}",
1903 thread_handle);
1904 return ERR_INVALID_HANDLE;
1905 }
1906 1852
1907 return thread->SetCoreAndAffinityMask(core, affinity_mask); 1853 // Set the core mask.
1854 R_TRY(thread->SetCoreMask(core_id, affinity_mask));
1855
1856 return RESULT_SUCCESS;
1908} 1857}
1909 1858
1910static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core, 1859static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, s32 core_id,
1911 u32 affinity_mask_low, u32 affinity_mask_high) { 1860 u32 affinity_mask_low, u32 affinity_mask_high) {
1912 const auto affinity_mask = u64{affinity_mask_low} | (u64{affinity_mask_high} << 32); 1861 const auto affinity_mask = u64{affinity_mask_low} | (u64{affinity_mask_high} << 32);
1913 return SetThreadCoreMask(system, thread_handle, core, affinity_mask); 1862 return SetThreadCoreMask(system, thread_handle, core_id, affinity_mask);
1914} 1863}
1915 1864
1916static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle* read_handle) { 1865static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle* read_handle) {
@@ -2474,7 +2423,7 @@ void Call(Core::System& system, u32 immediate) {
2474 kernel.EnterSVCProfile(); 2423 kernel.EnterSVCProfile();
2475 2424
2476 auto* thread = kernel.CurrentScheduler()->GetCurrentThread(); 2425 auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
2477 thread->SetContinuousOnSVC(true); 2426 thread->SetIsCallingSvc();
2478 2427
2479 const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate) 2428 const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
2480 : GetSVCInfo32(immediate); 2429 : GetSVCInfo32(immediate);
@@ -2490,7 +2439,7 @@ void Call(Core::System& system, u32 immediate) {
2490 2439
2491 kernel.ExitSVCProfile(); 2440 kernel.ExitSVCProfile();
2492 2441
2493 if (!thread->IsContinuousOnSVC()) { 2442 if (!thread->IsCallingSvc()) {
2494 auto* host_context = thread->GetHostContext().get(); 2443 auto* host_context = thread->GetHostContext().get();
2495 host_context->Rewind(); 2444 host_context->Rewind();
2496 } 2445 }
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index ded55af9a..ec463b97c 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -77,6 +77,12 @@ enum class ArbitrationType : u32 {
77 WaitIfEqual = 2, 77 WaitIfEqual = 2,
78}; 78};
79 79
80enum class YieldType : s64 {
81 WithoutCoreMigration = 0,
82 WithCoreMigration = -1,
83 ToAnyThread = -2,
84};
85
80enum class ThreadActivity : u32 { 86enum class ThreadActivity : u32 {
81 Runnable = 0, 87 Runnable = 0,
82 Paused = 1, 88 Paused = 1,
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index a32750ed7..96afd544b 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -58,6 +58,14 @@ void SvcWrap64(Core::System& system) {
58 func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1))).raw); 58 func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1))).raw);
59} 59}
60 60
61// Used by SetThreadActivity
62template <ResultCode func(Core::System&, Handle, Svc::ThreadActivity)>
63void SvcWrap64(Core::System& system) {
64 FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
65 static_cast<Svc::ThreadActivity>(Param(system, 1)))
66 .raw);
67}
68
61template <ResultCode func(Core::System&, u32, u64, u64, u64)> 69template <ResultCode func(Core::System&, u32, u64, u64, u64)>
62void SvcWrap64(Core::System& system) { 70void SvcWrap64(Core::System& system) {
63 FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), 71 FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)), Param(system, 1),
@@ -158,9 +166,18 @@ void SvcWrap64(Core::System& system) {
158 .raw); 166 .raw);
159} 167}
160 168
161template <ResultCode func(Core::System&, u32, u32*, u64*)> 169// Used by SetThreadCoreMask
170template <ResultCode func(Core::System&, Handle, s32, u64)>
162void SvcWrap64(Core::System& system) { 171void SvcWrap64(Core::System& system) {
163 u32 param_1 = 0; 172 FuncReturn(system, func(system, static_cast<u32>(Param(system, 0)),
173 static_cast<s32>(Param(system, 1)), Param(system, 2))
174 .raw);
175}
176
177// Used by GetThreadCoreMask
178template <ResultCode func(Core::System&, Handle, s32*, u64*)>
179void SvcWrap64(Core::System& system) {
180 s32 param_1 = 0;
164 u64 param_2 = 0; 181 u64 param_2 = 0;
165 const ResultCode retval = func(system, static_cast<u32>(Param(system, 2)), &param_1, &param_2); 182 const ResultCode retval = func(system, static_cast<u32>(Param(system, 2)), &param_1, &param_2);
166 183
@@ -473,12 +490,35 @@ void SvcWrap32(Core::System& system) {
473 FuncReturn(system, retval); 490 FuncReturn(system, retval);
474} 491}
475 492
493// Used by GetThreadCoreMask32
494template <ResultCode func(Core::System&, Handle, s32*, u32*, u32*)>
495void SvcWrap32(Core::System& system) {
496 s32 param_1 = 0;
497 u32 param_2 = 0;
498 u32 param_3 = 0;
499
500 const u32 retval = func(system, Param32(system, 2), &param_1, &param_2, &param_3).raw;
501 system.CurrentArmInterface().SetReg(1, param_1);
502 system.CurrentArmInterface().SetReg(2, param_2);
503 system.CurrentArmInterface().SetReg(3, param_3);
504 FuncReturn(system, retval);
505}
506
476// Used by SignalProcessWideKey32 507// Used by SignalProcessWideKey32
477template <void func(Core::System&, u32, s32)> 508template <void func(Core::System&, u32, s32)>
478void SvcWrap32(Core::System& system) { 509void SvcWrap32(Core::System& system) {
479 func(system, static_cast<u32>(Param(system, 0)), static_cast<s32>(Param(system, 1))); 510 func(system, static_cast<u32>(Param(system, 0)), static_cast<s32>(Param(system, 1)));
480} 511}
481 512
513// Used by SetThreadActivity32
514template <ResultCode func(Core::System&, Handle, Svc::ThreadActivity)>
515void SvcWrap32(Core::System& system) {
516 const u32 retval = func(system, static_cast<Handle>(Param(system, 0)),
517 static_cast<Svc::ThreadActivity>(Param(system, 1)))
518 .raw;
519 FuncReturn(system, retval);
520}
521
482// Used by SetThreadPriority32 522// Used by SetThreadPriority32
483template <ResultCode func(Core::System&, Handle, u32)> 523template <ResultCode func(Core::System&, Handle, u32)>
484void SvcWrap32(Core::System& system) { 524void SvcWrap32(Core::System& system) {
@@ -487,7 +527,7 @@ void SvcWrap32(Core::System& system) {
487 FuncReturn(system, retval); 527 FuncReturn(system, retval);
488} 528}
489 529
490// Used by SetThreadCoreMask32 530// Used by SetMemoryAttribute32
491template <ResultCode func(Core::System&, Handle, u32, u32, u32)> 531template <ResultCode func(Core::System&, Handle, u32, u32, u32)>
492void SvcWrap32(Core::System& system) { 532void SvcWrap32(Core::System& system) {
493 const u32 retval = 533 const u32 retval =
@@ -497,6 +537,16 @@ void SvcWrap32(Core::System& system) {
497 FuncReturn(system, retval); 537 FuncReturn(system, retval);
498} 538}
499 539
540// Used by SetThreadCoreMask32
541template <ResultCode func(Core::System&, Handle, s32, u32, u32)>
542void SvcWrap32(Core::System& system) {
543 const u32 retval =
544 func(system, static_cast<Handle>(Param(system, 0)), static_cast<s32>(Param(system, 1)),
545 static_cast<u32>(Param(system, 2)), static_cast<u32>(Param(system, 3)))
546 .raw;
547 FuncReturn(system, retval);
548}
549
500// Used by WaitProcessWideKeyAtomic32 550// Used by WaitProcessWideKeyAtomic32
501template <ResultCode func(Core::System&, u32, u32, Handle, u32, u32)> 551template <ResultCode func(Core::System&, u32, u32, Handle, u32, u32)>
502void SvcWrap32(Core::System& system) { 552void SvcWrap32(Core::System& system) {
diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp
index d6354b01d..cbec692f9 100644
--- a/src/yuzu/debugger/wait_tree.cpp
+++ b/src/yuzu/debugger/wait_tree.cpp
@@ -235,12 +235,8 @@ QString WaitTreeThread::GetText() const {
235 QString status; 235 QString status;
236 switch (thread.GetState()) { 236 switch (thread.GetState()) {
237 case Kernel::ThreadState::Runnable: 237 case Kernel::ThreadState::Runnable:
238 if (!thread.IsPaused()) { 238 if (!thread.IsSuspended()) {
239 if (thread.WasRunning()) { 239 status = tr("runnable");
240 status = tr("running");
241 } else {
242 status = tr("ready");
243 }
244 } else { 240 } else {
245 status = tr("paused"); 241 status = tr("paused");
246 } 242 }
@@ -295,12 +291,8 @@ QColor WaitTreeThread::GetColor() const {
295 const auto& thread = static_cast<const Kernel::KThread&>(object); 291 const auto& thread = static_cast<const Kernel::KThread&>(object);
296 switch (thread.GetState()) { 292 switch (thread.GetState()) {
297 case Kernel::ThreadState::Runnable: 293 case Kernel::ThreadState::Runnable:
298 if (!thread.IsPaused()) { 294 if (!thread.IsSuspended()) {
299 if (thread.WasRunning()) { 295 return QColor(WaitTreeColors[0][color_index]);
300 return QColor(WaitTreeColors[0][color_index]);
301 } else {
302 return QColor(WaitTreeColors[1][color_index]);
303 }
304 } else { 296 } else {
305 return QColor(WaitTreeColors[2][color_index]); 297 return QColor(WaitTreeColors[2][color_index]);
306 } 298 }
@@ -334,18 +326,18 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
334 const auto& thread = static_cast<const Kernel::KThread&>(object); 326 const auto& thread = static_cast<const Kernel::KThread&>(object);
335 327
336 QString processor; 328 QString processor;
337 switch (thread.GetProcessorID()) { 329 switch (thread.GetActiveCore()) {
338 case Kernel::Svc::IdealCoreUseProcessValue: 330 case Kernel::Svc::IdealCoreUseProcessValue:
339 processor = tr("ideal"); 331 processor = tr("ideal");
340 break; 332 break;
341 default: 333 default:
342 processor = tr("core %1").arg(thread.GetProcessorID()); 334 processor = tr("core %1").arg(thread.GetActiveCore());
343 break; 335 break;
344 } 336 }
345 337
346 list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor))); 338 list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor)));
347 list.push_back( 339 list.push_back(std::make_unique<WaitTreeText>(
348 std::make_unique<WaitTreeText>(tr("ideal core = %1").arg(thread.GetIdealCore()))); 340 tr("ideal core = %1").arg(thread.GetIdealCoreForDebugging())));
349 list.push_back(std::make_unique<WaitTreeText>( 341 list.push_back(std::make_unique<WaitTreeText>(
350 tr("affinity mask = %1").arg(thread.GetAffinityMask().GetAffinityMask()))); 342 tr("affinity mask = %1").arg(thread.GetAffinityMask().GetAffinityMask())));
351 list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID()))); 343 list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID())));