summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp27
-rw-r--r--src/core/hle/kernel/address_arbiter.h3
-rw-r--r--src/core/hle/kernel/global_scheduler_context.cpp52
-rw-r--r--src/core/hle/kernel/global_scheduler_context.h81
-rw-r--r--src/core/hle/kernel/handle_table.cpp6
-rw-r--r--src/core/hle/kernel/hle_ipc.cpp41
-rw-r--r--src/core/hle/kernel/hle_ipc.h23
-rw-r--r--src/core/hle/kernel/k_affinity_mask.h58
-rw-r--r--src/core/hle/kernel/k_priority_queue.h451
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp784
-rw-r--r--src/core/hle/kernel/k_scheduler.h201
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h75
-rw-r--r--src/core/hle/kernel/k_scoped_lock.h41
-rw-r--r--src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h50
-rw-r--r--src/core/hle/kernel/kernel.cpp201
-rw-r--r--src/core/hle/kernel/kernel.h39
-rw-r--r--src/core/hle/kernel/memory/address_space_info.cpp2
-rw-r--r--src/core/hle/kernel/memory/memory_block.h20
-rw-r--r--src/core/hle/kernel/memory/memory_block_manager.h4
-rw-r--r--src/core/hle/kernel/memory/page_table.cpp19
-rw-r--r--src/core/hle/kernel/mutex.cpp12
-rw-r--r--src/core/hle/kernel/physical_core.cpp52
-rw-r--r--src/core/hle/kernel/physical_core.h44
-rw-r--r--src/core/hle/kernel/process.cpp17
-rw-r--r--src/core/hle/kernel/process.h13
-rw-r--r--src/core/hle/kernel/process_capability.cpp2
-rw-r--r--src/core/hle/kernel/readable_event.cpp4
-rw-r--r--src/core/hle/kernel/resource_limit.cpp4
-rw-r--r--src/core/hle/kernel/scheduler.cpp849
-rw-r--r--src/core/hle/kernel/scheduler.h318
-rw-r--r--src/core/hle/kernel/server_session.cpp36
-rw-r--r--src/core/hle/kernel/server_session.h12
-rw-r--r--src/core/hle/kernel/service_thread.cpp110
-rw-r--r--src/core/hle/kernel/service_thread.h28
-rw-r--r--src/core/hle/kernel/svc.cpp149
-rw-r--r--src/core/hle/kernel/svc_types.h4
-rw-r--r--src/core/hle/kernel/synchronization.cpp11
-rw-r--r--src/core/hle/kernel/synchronization_object.h3
-rw-r--r--src/core/hle/kernel/thread.cpp120
-rw-r--r--src/core/hle/kernel/thread.h120
-rw-r--r--src/core/hle/kernel/time_manager.cpp26
-rw-r--r--src/core/hle/kernel/time_manager.h2
42 files changed, 2449 insertions, 1665 deletions
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index b882eaa0f..20ffa7d47 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -12,8 +12,9 @@
12#include "core/hle/kernel/address_arbiter.h" 12#include "core/hle/kernel/address_arbiter.h"
13#include "core/hle/kernel/errors.h" 13#include "core/hle/kernel/errors.h"
14#include "core/hle/kernel/handle_table.h" 14#include "core/hle/kernel/handle_table.h"
15#include "core/hle/kernel/k_scheduler.h"
16#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
15#include "core/hle/kernel/kernel.h" 17#include "core/hle/kernel/kernel.h"
16#include "core/hle/kernel/scheduler.h"
17#include "core/hle/kernel/thread.h" 18#include "core/hle/kernel/thread.h"
18#include "core/hle/kernel/time_manager.h" 19#include "core/hle/kernel/time_manager.h"
19#include "core/hle/result.h" 20#include "core/hle/result.h"
@@ -58,7 +59,7 @@ ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 v
58} 59}
59 60
60ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) { 61ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
61 SchedulerLock lock(system.Kernel()); 62 KScopedSchedulerLock lock(system.Kernel());
62 const std::vector<std::shared_ptr<Thread>> waiting_threads = 63 const std::vector<std::shared_ptr<Thread>> waiting_threads =
63 GetThreadsWaitingOnAddress(address); 64 GetThreadsWaitingOnAddress(address);
64 WakeThreads(waiting_threads, num_to_wake); 65 WakeThreads(waiting_threads, num_to_wake);
@@ -67,7 +68,7 @@ ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
67 68
68ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, 69ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
69 s32 num_to_wake) { 70 s32 num_to_wake) {
70 SchedulerLock lock(system.Kernel()); 71 KScopedSchedulerLock lock(system.Kernel());
71 auto& memory = system.Memory(); 72 auto& memory = system.Memory();
72 73
73 // Ensure that we can write to the address. 74 // Ensure that we can write to the address.
@@ -92,7 +93,7 @@ ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32
92 93
93ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value, 94ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
94 s32 num_to_wake) { 95 s32 num_to_wake) {
95 SchedulerLock lock(system.Kernel()); 96 KScopedSchedulerLock lock(system.Kernel());
96 auto& memory = system.Memory(); 97 auto& memory = system.Memory();
97 98
98 // Ensure that we can write to the address. 99 // Ensure that we can write to the address.
@@ -153,11 +154,11 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
153 bool should_decrement) { 154 bool should_decrement) {
154 auto& memory = system.Memory(); 155 auto& memory = system.Memory();
155 auto& kernel = system.Kernel(); 156 auto& kernel = system.Kernel();
156 Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); 157 Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
157 158
158 Handle event_handle = InvalidHandle; 159 Handle event_handle = InvalidHandle;
159 { 160 {
160 SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); 161 KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
161 162
162 if (current_thread->IsPendingTermination()) { 163 if (current_thread->IsPendingTermination()) {
163 lock.CancelSleep(); 164 lock.CancelSleep();
@@ -210,7 +211,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
210 } 211 }
211 212
212 { 213 {
213 SchedulerLock lock(kernel); 214 KScopedSchedulerLock lock(kernel);
214 if (current_thread->IsWaitingForArbitration()) { 215 if (current_thread->IsWaitingForArbitration()) {
215 RemoveThread(SharedFrom(current_thread)); 216 RemoveThread(SharedFrom(current_thread));
216 current_thread->WaitForArbitration(false); 217 current_thread->WaitForArbitration(false);
@@ -223,11 +224,11 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
223ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) { 224ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
224 auto& memory = system.Memory(); 225 auto& memory = system.Memory();
225 auto& kernel = system.Kernel(); 226 auto& kernel = system.Kernel();
226 Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); 227 Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
227 228
228 Handle event_handle = InvalidHandle; 229 Handle event_handle = InvalidHandle;
229 { 230 {
230 SchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout); 231 KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
231 232
232 if (current_thread->IsPendingTermination()) { 233 if (current_thread->IsPendingTermination()) {
233 lock.CancelSleep(); 234 lock.CancelSleep();
@@ -265,7 +266,7 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t
265 } 266 }
266 267
267 { 268 {
268 SchedulerLock lock(kernel); 269 KScopedSchedulerLock lock(kernel);
269 if (current_thread->IsWaitingForArbitration()) { 270 if (current_thread->IsWaitingForArbitration()) {
270 RemoveThread(SharedFrom(current_thread)); 271 RemoveThread(SharedFrom(current_thread));
271 current_thread->WaitForArbitration(false); 272 current_thread->WaitForArbitration(false);
@@ -275,12 +276,6 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t
275 return current_thread->GetSignalingResult(); 276 return current_thread->GetSignalingResult();
276} 277}
277 278
278void AddressArbiter::HandleWakeupThread(std::shared_ptr<Thread> thread) {
279 ASSERT(thread->GetStatus() == ThreadStatus::WaitArb);
280 RemoveThread(thread);
281 thread->SetArbiterWaitAddress(0);
282}
283
284void AddressArbiter::InsertThread(std::shared_ptr<Thread> thread) { 279void AddressArbiter::InsertThread(std::shared_ptr<Thread> thread) {
285 const VAddr arb_addr = thread->GetArbiterWaitAddress(); 280 const VAddr arb_addr = thread->GetArbiterWaitAddress();
286 std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr]; 281 std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
diff --git a/src/core/hle/kernel/address_arbiter.h b/src/core/hle/kernel/address_arbiter.h
index 0b05d533c..b91edc67d 100644
--- a/src/core/hle/kernel/address_arbiter.h
+++ b/src/core/hle/kernel/address_arbiter.h
@@ -50,9 +50,6 @@ public:
50 /// Waits on an address with a particular arbitration type. 50 /// Waits on an address with a particular arbitration type.
51 ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns); 51 ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns);
52 52
53 /// Removes a thread from the container and resets its address arbiter adress to 0
54 void HandleWakeupThread(std::shared_ptr<Thread> thread);
55
56private: 53private:
57 /// Signals an address being waited on. 54 /// Signals an address being waited on.
58 ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake); 55 ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake);
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
new file mode 100644
index 000000000..a133e8ed0
--- /dev/null
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -0,0 +1,52 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <mutex>
6
7#include "common/assert.h"
8#include "core/core.h"
9#include "core/hle/kernel/global_scheduler_context.h"
10#include "core/hle/kernel/k_scheduler.h"
11#include "core/hle/kernel/kernel.h"
12
13namespace Kernel {
14
15GlobalSchedulerContext::GlobalSchedulerContext(KernelCore& kernel)
16 : kernel{kernel}, scheduler_lock{kernel} {}
17
18GlobalSchedulerContext::~GlobalSchedulerContext() = default;
19
20void GlobalSchedulerContext::AddThread(std::shared_ptr<Thread> thread) {
21 std::scoped_lock lock{global_list_guard};
22 thread_list.push_back(std::move(thread));
23}
24
25void GlobalSchedulerContext::RemoveThread(std::shared_ptr<Thread> thread) {
26 std::scoped_lock lock{global_list_guard};
27 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
28 thread_list.end());
29}
30
31void GlobalSchedulerContext::PreemptThreads() {
32 // The priority levels at which the global scheduler preempts threads every 10 ms. They are
33 // ordered from Core 0 to Core 3.
34 static constexpr std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities{
35 59,
36 59,
37 59,
38 63,
39 };
40
41 ASSERT(IsLocked());
42 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
43 const u32 priority = preemption_priorities[core_id];
44 kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority);
45 }
46}
47
48bool GlobalSchedulerContext::IsLocked() const {
49 return scheduler_lock.IsLockedByCurrentThread();
50}
51
52} // namespace Kernel
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
new file mode 100644
index 000000000..5c7b89290
--- /dev/null
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -0,0 +1,81 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <atomic>
8#include <vector>
9
10#include "common/common_types.h"
11#include "common/spin_lock.h"
12#include "core/hardware_properties.h"
13#include "core/hle/kernel/k_priority_queue.h"
14#include "core/hle/kernel/k_scheduler_lock.h"
15#include "core/hle/kernel/thread.h"
16
17namespace Kernel {
18
19class KernelCore;
20class SchedulerLock;
21
22using KSchedulerPriorityQueue =
23 KPriorityQueue<Thread, Core::Hardware::NUM_CPU_CORES, THREADPRIO_LOWEST, THREADPRIO_HIGHEST>;
24constexpr s32 HighestCoreMigrationAllowedPriority = 2;
25
26class GlobalSchedulerContext final {
27 friend class KScheduler;
28
29public:
30 using LockType = KAbstractSchedulerLock<KScheduler>;
31
32 explicit GlobalSchedulerContext(KernelCore& kernel);
33 ~GlobalSchedulerContext();
34
35 /// Adds a new thread to the scheduler
36 void AddThread(std::shared_ptr<Thread> thread);
37
38 /// Removes a thread from the scheduler
39 void RemoveThread(std::shared_ptr<Thread> thread);
40
41 /// Returns a list of all threads managed by the scheduler
42 [[nodiscard]] const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
43 return thread_list;
44 }
45
46 /**
47 * Rotates the scheduling queues of threads at a preemption priority and then does
48 * some core rebalancing. Preemption priorities can be found in the array
49 * 'preemption_priorities'.
50 *
51 * @note This operation happens every 10ms.
52 */
53 void PreemptThreads();
54
55 /// Returns true if the global scheduler lock is acquired
56 bool IsLocked() const;
57
58 [[nodiscard]] LockType& SchedulerLock() {
59 return scheduler_lock;
60 }
61
62 [[nodiscard]] const LockType& SchedulerLock() const {
63 return scheduler_lock;
64 }
65
66private:
67 friend class KScopedSchedulerLock;
68 friend class KScopedSchedulerLockAndSleep;
69
70 KernelCore& kernel;
71
72 std::atomic_bool scheduler_update_needed{};
73 KSchedulerPriorityQueue priority_queue;
74 LockType scheduler_lock;
75
76 /// Lists all thread ids that aren't deleted/etc.
77 std::vector<std::shared_ptr<Thread>> thread_list;
78 Common::SpinLock global_list_guard{};
79};
80
81} // namespace Kernel
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
index fb30b6f8b..40988b0fd 100644
--- a/src/core/hle/kernel/handle_table.cpp
+++ b/src/core/hle/kernel/handle_table.cpp
@@ -8,9 +8,9 @@
8#include "core/core.h" 8#include "core/core.h"
9#include "core/hle/kernel/errors.h" 9#include "core/hle/kernel/errors.h"
10#include "core/hle/kernel/handle_table.h" 10#include "core/hle/kernel/handle_table.h"
11#include "core/hle/kernel/k_scheduler.h"
11#include "core/hle/kernel/kernel.h" 12#include "core/hle/kernel/kernel.h"
12#include "core/hle/kernel/process.h" 13#include "core/hle/kernel/process.h"
13#include "core/hle/kernel/scheduler.h"
14#include "core/hle/kernel/thread.h" 14#include "core/hle/kernel/thread.h"
15 15
16namespace Kernel { 16namespace Kernel {
@@ -105,7 +105,7 @@ bool HandleTable::IsValid(Handle handle) const {
105 105
106std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const { 106std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
107 if (handle == CurrentThread) { 107 if (handle == CurrentThread) {
108 return SharedFrom(kernel.CurrentScheduler().GetCurrentThread()); 108 return SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
109 } else if (handle == CurrentProcess) { 109 } else if (handle == CurrentProcess) {
110 return SharedFrom(kernel.CurrentProcess()); 110 return SharedFrom(kernel.CurrentProcess());
111 } 111 }
@@ -118,7 +118,7 @@ std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const {
118 118
119void HandleTable::Clear() { 119void HandleTable::Clear() {
120 for (u16 i = 0; i < table_size; ++i) { 120 for (u16 i = 0; i < table_size; ++i) {
121 generations[i] = i + 1; 121 generations[i] = static_cast<u16>(i + 1);
122 objects[i] = nullptr; 122 objects[i] = nullptr;
123 } 123 }
124 next_free_slot = 0; 124 next_free_slot = 0;
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp
index 81f85643b..83decf6cf 100644
--- a/src/core/hle/kernel/hle_ipc.cpp
+++ b/src/core/hle/kernel/hle_ipc.cpp
@@ -17,11 +17,12 @@
17#include "core/hle/kernel/errors.h" 17#include "core/hle/kernel/errors.h"
18#include "core/hle/kernel/handle_table.h" 18#include "core/hle/kernel/handle_table.h"
19#include "core/hle/kernel/hle_ipc.h" 19#include "core/hle/kernel/hle_ipc.h"
20#include "core/hle/kernel/k_scheduler.h"
21#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
20#include "core/hle/kernel/kernel.h" 22#include "core/hle/kernel/kernel.h"
21#include "core/hle/kernel/object.h" 23#include "core/hle/kernel/object.h"
22#include "core/hle/kernel/process.h" 24#include "core/hle/kernel/process.h"
23#include "core/hle/kernel/readable_event.h" 25#include "core/hle/kernel/readable_event.h"
24#include "core/hle/kernel/scheduler.h"
25#include "core/hle/kernel/server_session.h" 26#include "core/hle/kernel/server_session.h"
26#include "core/hle/kernel/thread.h" 27#include "core/hle/kernel/thread.h"
27#include "core/hle/kernel/time_manager.h" 28#include "core/hle/kernel/time_manager.h"
@@ -45,44 +46,6 @@ void SessionRequestHandler::ClientDisconnected(
45 boost::range::remove_erase(connected_sessions, server_session); 46 boost::range::remove_erase(connected_sessions, server_session);
46} 47}
47 48
48std::shared_ptr<WritableEvent> HLERequestContext::SleepClientThread(
49 const std::string& reason, u64 timeout, WakeupCallback&& callback,
50 std::shared_ptr<WritableEvent> writable_event) {
51 // Put the client thread to sleep until the wait event is signaled or the timeout expires.
52
53 if (!writable_event) {
54 // Create event if not provided
55 const auto pair = WritableEvent::CreateEventPair(kernel, "HLE Pause Event: " + reason);
56 writable_event = pair.writable;
57 }
58
59 {
60 Handle event_handle = InvalidHandle;
61 SchedulerLockAndSleep lock(kernel, event_handle, thread.get(), timeout);
62 thread->SetHLECallback(
63 [context = *this, callback](std::shared_ptr<Thread> thread) mutable -> bool {
64 ThreadWakeupReason reason = thread->GetSignalingResult() == RESULT_TIMEOUT
65 ? ThreadWakeupReason::Timeout
66 : ThreadWakeupReason::Signal;
67 callback(thread, context, reason);
68 context.WriteToOutgoingCommandBuffer(*thread);
69 return true;
70 });
71 const auto readable_event{writable_event->GetReadableEvent()};
72 writable_event->Clear();
73 thread->SetHLESyncObject(readable_event.get());
74 thread->SetStatus(ThreadStatus::WaitHLEEvent);
75 thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
76 readable_event->AddWaitingThread(thread);
77 lock.Release();
78 thread->SetHLETimeEvent(event_handle);
79 }
80
81 is_thread_waiting = true;
82
83 return writable_event;
84}
85
86HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory, 49HLERequestContext::HLERequestContext(KernelCore& kernel, Core::Memory::Memory& memory,
87 std::shared_ptr<ServerSession> server_session, 50 std::shared_ptr<ServerSession> server_session,
88 std::shared_ptr<Thread> thread) 51 std::shared_ptr<Thread> thread)
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index f3277b766..b112e1ebd 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -24,6 +24,10 @@ namespace Core::Memory {
24class Memory; 24class Memory;
25} 25}
26 26
27namespace IPC {
28class ResponseBuilder;
29}
30
27namespace Service { 31namespace Service {
28class ServiceFrameworkBase; 32class ServiceFrameworkBase;
29} 33}
@@ -125,23 +129,6 @@ public:
125 using WakeupCallback = std::function<void( 129 using WakeupCallback = std::function<void(
126 std::shared_ptr<Thread> thread, HLERequestContext& context, ThreadWakeupReason reason)>; 130 std::shared_ptr<Thread> thread, HLERequestContext& context, ThreadWakeupReason reason)>;
127 131
128 /**
129 * Puts the specified guest thread to sleep until the returned event is signaled or until the
130 * specified timeout expires.
131 * @param reason Reason for pausing the thread, to be used for debugging purposes.
132 * @param timeout Timeout in nanoseconds after which the thread will be awoken and the callback
133 * invoked with a Timeout reason.
134 * @param callback Callback to be invoked when the thread is resumed. This callback must write
135 * the entire command response once again, regardless of the state of it before this function
136 * was called.
137 * @param writable_event Event to use to wake up the thread. If unspecified, an event will be
138 * created.
139 * @returns Event that when signaled will resume the thread and call the callback function.
140 */
141 std::shared_ptr<WritableEvent> SleepClientThread(
142 const std::string& reason, u64 timeout, WakeupCallback&& callback,
143 std::shared_ptr<WritableEvent> writable_event = nullptr);
144
145 /// Populates this context with data from the requesting process/thread. 132 /// Populates this context with data from the requesting process/thread.
146 ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table, 133 ResultCode PopulateFromIncomingCommandBuffer(const HandleTable& handle_table,
147 u32_le* src_cmdbuf); 134 u32_le* src_cmdbuf);
@@ -287,6 +274,8 @@ public:
287 } 274 }
288 275
289private: 276private:
277 friend class IPC::ResponseBuilder;
278
290 void ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf, bool incoming); 279 void ParseCommandBuffer(const HandleTable& handle_table, u32_le* src_cmdbuf, bool incoming);
291 280
292 std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf; 281 std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
diff --git a/src/core/hle/kernel/k_affinity_mask.h b/src/core/hle/kernel/k_affinity_mask.h
new file mode 100644
index 000000000..dd73781cd
--- /dev/null
+++ b/src/core/hle/kernel/k_affinity_mask.h
@@ -0,0 +1,58 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7
8#pragma once
9
10#include "common/assert.h"
11#include "common/common_types.h"
12#include "core/hardware_properties.h"
13
14namespace Kernel {
15
16class KAffinityMask {
17public:
18 constexpr KAffinityMask() = default;
19
20 [[nodiscard]] constexpr u64 GetAffinityMask() const {
21 return this->mask;
22 }
23
24 constexpr void SetAffinityMask(u64 new_mask) {
25 ASSERT((new_mask & ~AllowedAffinityMask) == 0);
26 this->mask = new_mask;
27 }
28
29 [[nodiscard]] constexpr bool GetAffinity(s32 core) const {
30 return this->mask & GetCoreBit(core);
31 }
32
33 constexpr void SetAffinity(s32 core, bool set) {
34 ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
35
36 if (set) {
37 this->mask |= GetCoreBit(core);
38 } else {
39 this->mask &= ~GetCoreBit(core);
40 }
41 }
42
43 constexpr void SetAll() {
44 this->mask = AllowedAffinityMask;
45 }
46
47private:
48 [[nodiscard]] static constexpr u64 GetCoreBit(s32 core) {
49 ASSERT(0 <= core && core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
50 return (1ULL << core);
51 }
52
53 static constexpr u64 AllowedAffinityMask = (1ULL << Core::Hardware::NUM_CPU_CORES) - 1;
54
55 u64 mask{};
56};
57
58} // namespace Kernel
diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h
new file mode 100644
index 000000000..99fb8fe93
--- /dev/null
+++ b/src/core/hle/kernel/k_priority_queue.h
@@ -0,0 +1,451 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7
8#pragma once
9
10#include <array>
11#include <concepts>
12
13#include "common/assert.h"
14#include "common/bit_set.h"
15#include "common/bit_util.h"
16#include "common/common_types.h"
17#include "common/concepts.h"
18
19namespace Kernel {
20
21class Thread;
22
23template <typename T>
24concept KPriorityQueueAffinityMask = !std::is_reference_v<T> && requires(T & t) {
25 { t.GetAffinityMask() }
26 ->Common::ConvertibleTo<u64>;
27 {t.SetAffinityMask(std::declval<u64>())};
28
29 { t.GetAffinity(std::declval<int32_t>()) }
30 ->std::same_as<bool>;
31 {t.SetAffinity(std::declval<int32_t>(), std::declval<bool>())};
32 {t.SetAll()};
33};
34
35template <typename T>
36concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
37 {typename T::QueueEntry()};
38 {(typename T::QueueEntry()).Initialize()};
39 {(typename T::QueueEntry()).SetPrev(std::addressof(t))};
40 {(typename T::QueueEntry()).SetNext(std::addressof(t))};
41 { (typename T::QueueEntry()).GetNext() }
42 ->std::same_as<T*>;
43 { (typename T::QueueEntry()).GetPrev() }
44 ->std::same_as<T*>;
45 { t.GetPriorityQueueEntry(std::declval<s32>()) }
46 ->std::same_as<typename T::QueueEntry&>;
47
48 {t.GetAffinityMask()};
49 { typename std::remove_cvref<decltype(t.GetAffinityMask())>::type() }
50 ->KPriorityQueueAffinityMask;
51
52 { t.GetActiveCore() }
53 ->Common::ConvertibleTo<s32>;
54 { t.GetPriority() }
55 ->Common::ConvertibleTo<s32>;
56};
57
58template <typename Member, size_t _NumCores, int LowestPriority, int HighestPriority>
59requires KPriorityQueueMember<Member> class KPriorityQueue {
60public:
61 using AffinityMaskType = typename std::remove_cv_t<
62 typename std::remove_reference<decltype(std::declval<Member>().GetAffinityMask())>::type>;
63
64 static_assert(LowestPriority >= 0);
65 static_assert(HighestPriority >= 0);
66 static_assert(LowestPriority >= HighestPriority);
67 static constexpr size_t NumPriority = LowestPriority - HighestPriority + 1;
68 static constexpr size_t NumCores = _NumCores;
69
70 static constexpr bool IsValidCore(s32 core) {
71 return 0 <= core && core < static_cast<s32>(NumCores);
72 }
73
74 static constexpr bool IsValidPriority(s32 priority) {
75 return HighestPriority <= priority && priority <= LowestPriority + 1;
76 }
77
78private:
79 using Entry = typename Member::QueueEntry;
80
81public:
82 class KPerCoreQueue {
83 private:
84 std::array<Entry, NumCores> root{};
85
86 public:
87 constexpr KPerCoreQueue() {
88 for (auto& per_core_root : root) {
89 per_core_root.Initialize();
90 }
91 }
92
93 constexpr bool PushBack(s32 core, Member* member) {
94 // Get the entry associated with the member.
95 Entry& member_entry = member->GetPriorityQueueEntry(core);
96
97 // Get the entry associated with the end of the queue.
98 Member* tail = this->root[core].GetPrev();
99 Entry& tail_entry =
100 (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core];
101
102 // Link the entries.
103 member_entry.SetPrev(tail);
104 member_entry.SetNext(nullptr);
105 tail_entry.SetNext(member);
106 this->root[core].SetPrev(member);
107
108 return tail == nullptr;
109 }
110
111 constexpr bool PushFront(s32 core, Member* member) {
112 // Get the entry associated with the member.
113 Entry& member_entry = member->GetPriorityQueueEntry(core);
114
115 // Get the entry associated with the front of the queue.
116 Member* head = this->root[core].GetNext();
117 Entry& head_entry =
118 (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core];
119
120 // Link the entries.
121 member_entry.SetPrev(nullptr);
122 member_entry.SetNext(head);
123 head_entry.SetPrev(member);
124 this->root[core].SetNext(member);
125
126 return (head == nullptr);
127 }
128
129 constexpr bool Remove(s32 core, Member* member) {
130 // Get the entry associated with the member.
131 Entry& member_entry = member->GetPriorityQueueEntry(core);
132
133 // Get the entries associated with next and prev.
134 Member* prev = member_entry.GetPrev();
135 Member* next = member_entry.GetNext();
136 Entry& prev_entry =
137 (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : this->root[core];
138 Entry& next_entry =
139 (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core];
140
141 // Unlink.
142 prev_entry.SetNext(next);
143 next_entry.SetPrev(prev);
144
145 return (this->GetFront(core) == nullptr);
146 }
147
148 constexpr Member* GetFront(s32 core) const {
149 return this->root[core].GetNext();
150 }
151 };
152
153 class KPriorityQueueImpl {
154 public:
155 constexpr KPriorityQueueImpl() = default;
156
157 constexpr void PushBack(s32 priority, s32 core, Member* member) {
158 ASSERT(IsValidCore(core));
159 ASSERT(IsValidPriority(priority));
160
161 if (priority > LowestPriority) {
162 return;
163 }
164
165 if (this->queues[priority].PushBack(core, member)) {
166 this->available_priorities[core].SetBit(priority);
167 }
168 }
169
170 constexpr void PushFront(s32 priority, s32 core, Member* member) {
171 ASSERT(IsValidCore(core));
172 ASSERT(IsValidPriority(priority));
173
174 if (priority > LowestPriority) {
175 return;
176 }
177
178 if (this->queues[priority].PushFront(core, member)) {
179 this->available_priorities[core].SetBit(priority);
180 }
181 }
182
183 constexpr void Remove(s32 priority, s32 core, Member* member) {
184 ASSERT(IsValidCore(core));
185 ASSERT(IsValidPriority(priority));
186
187 if (priority > LowestPriority) {
188 return;
189 }
190
191 if (this->queues[priority].Remove(core, member)) {
192 this->available_priorities[core].ClearBit(priority);
193 }
194 }
195
196 constexpr Member* GetFront(s32 core) const {
197 ASSERT(IsValidCore(core));
198
199 const s32 priority =
200 static_cast<s32>(this->available_priorities[core].CountLeadingZero());
201 if (priority <= LowestPriority) {
202 return this->queues[priority].GetFront(core);
203 } else {
204 return nullptr;
205 }
206 }
207
208 constexpr Member* GetFront(s32 priority, s32 core) const {
209 ASSERT(IsValidCore(core));
210 ASSERT(IsValidPriority(priority));
211
212 if (priority <= LowestPriority) {
213 return this->queues[priority].GetFront(core);
214 } else {
215 return nullptr;
216 }
217 }
218
219 constexpr Member* GetNext(s32 core, const Member* member) const {
220 ASSERT(IsValidCore(core));
221
222 Member* next = member->GetPriorityQueueEntry(core).GetNext();
223 if (next == nullptr) {
224 const s32 priority = static_cast<s32>(
225 this->available_priorities[core].GetNextSet(member->GetPriority()));
226 if (priority <= LowestPriority) {
227 next = this->queues[priority].GetFront(core);
228 }
229 }
230 return next;
231 }
232
233 constexpr void MoveToFront(s32 priority, s32 core, Member* member) {
234 ASSERT(IsValidCore(core));
235 ASSERT(IsValidPriority(priority));
236
237 if (priority <= LowestPriority) {
238 this->queues[priority].Remove(core, member);
239 this->queues[priority].PushFront(core, member);
240 }
241 }
242
243 constexpr Member* MoveToBack(s32 priority, s32 core, Member* member) {
244 ASSERT(IsValidCore(core));
245 ASSERT(IsValidPriority(priority));
246
247 if (priority <= LowestPriority) {
248 this->queues[priority].Remove(core, member);
249 this->queues[priority].PushBack(core, member);
250 return this->queues[priority].GetFront(core);
251 } else {
252 return nullptr;
253 }
254 }
255
256 private:
257 std::array<KPerCoreQueue, NumPriority> queues{};
258 std::array<Common::BitSet64<NumPriority>, NumCores> available_priorities{};
259 };
260
261private:
262 KPriorityQueueImpl scheduled_queue;
263 KPriorityQueueImpl suggested_queue;
264
265private:
266 constexpr void ClearAffinityBit(u64& affinity, s32 core) {
267 affinity &= ~(u64(1) << core);
268 }
269
270 constexpr s32 GetNextCore(u64& affinity) {
271 const s32 core = Common::CountTrailingZeroes64(affinity);
272 ClearAffinityBit(affinity, core);
273 return core;
274 }
275
276 constexpr void PushBack(s32 priority, Member* member) {
277 ASSERT(IsValidPriority(priority));
278
279 // Push onto the scheduled queue for its core, if we can.
280 u64 affinity = member->GetAffinityMask().GetAffinityMask();
281 if (const s32 core = member->GetActiveCore(); core >= 0) {
282 this->scheduled_queue.PushBack(priority, core, member);
283 ClearAffinityBit(affinity, core);
284 }
285
286 // And suggest the thread for all other cores.
287 while (affinity) {
288 this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
289 }
290 }
291
292 constexpr void PushFront(s32 priority, Member* member) {
293 ASSERT(IsValidPriority(priority));
294
295 // Push onto the scheduled queue for its core, if we can.
296 u64 affinity = member->GetAffinityMask().GetAffinityMask();
297 if (const s32 core = member->GetActiveCore(); core >= 0) {
298 this->scheduled_queue.PushFront(priority, core, member);
299 ClearAffinityBit(affinity, core);
300 }
301
302 // And suggest the thread for all other cores.
303 // Note: Nintendo pushes onto the back of the suggested queue, not the front.
304 while (affinity) {
305 this->suggested_queue.PushBack(priority, GetNextCore(affinity), member);
306 }
307 }
308
309 constexpr void Remove(s32 priority, Member* member) {
310 ASSERT(IsValidPriority(priority));
311
312 // Remove from the scheduled queue for its core.
313 u64 affinity = member->GetAffinityMask().GetAffinityMask();
314 if (const s32 core = member->GetActiveCore(); core >= 0) {
315 this->scheduled_queue.Remove(priority, core, member);
316 ClearAffinityBit(affinity, core);
317 }
318
319 // Remove from the suggested queue for all other cores.
320 while (affinity) {
321 this->suggested_queue.Remove(priority, GetNextCore(affinity), member);
322 }
323 }
324
325public:
326 constexpr KPriorityQueue() = default;
327
328 // Getters.
329 constexpr Member* GetScheduledFront(s32 core) const {
330 return this->scheduled_queue.GetFront(core);
331 }
332
333 constexpr Member* GetScheduledFront(s32 core, s32 priority) const {
334 return this->scheduled_queue.GetFront(priority, core);
335 }
336
337 constexpr Member* GetSuggestedFront(s32 core) const {
338 return this->suggested_queue.GetFront(core);
339 }
340
341 constexpr Member* GetSuggestedFront(s32 core, s32 priority) const {
342 return this->suggested_queue.GetFront(priority, core);
343 }
344
345 constexpr Member* GetScheduledNext(s32 core, const Member* member) const {
346 return this->scheduled_queue.GetNext(core, member);
347 }
348
349 constexpr Member* GetSuggestedNext(s32 core, const Member* member) const {
350 return this->suggested_queue.GetNext(core, member);
351 }
352
353 constexpr Member* GetSamePriorityNext(s32 core, const Member* member) const {
354 return member->GetPriorityQueueEntry(core).GetNext();
355 }
356
357 // Mutators.
358 constexpr void PushBack(Member* member) {
359 this->PushBack(member->GetPriority(), member);
360 }
361
362 constexpr void Remove(Member* member) {
363 this->Remove(member->GetPriority(), member);
364 }
365
366 constexpr void MoveToScheduledFront(Member* member) {
367 this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
368 }
369
370 constexpr Thread* MoveToScheduledBack(Member* member) {
371 return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(),
372 member);
373 }
374
375 // First class fancy operations.
376 constexpr void ChangePriority(s32 prev_priority, bool is_running, Member* member) {
377 ASSERT(IsValidPriority(prev_priority));
378
379 // Remove the member from the queues.
380 const s32 new_priority = member->GetPriority();
381 this->Remove(prev_priority, member);
382
383 // And enqueue. If the member is running, we want to keep it running.
384 if (is_running) {
385 this->PushFront(new_priority, member);
386 } else {
387 this->PushBack(new_priority, member);
388 }
389 }
390
391 constexpr void ChangeAffinityMask(s32 prev_core, const AffinityMaskType& prev_affinity,
392 Member* member) {
393 // Get the new information.
394 const s32 priority = member->GetPriority();
395 const AffinityMaskType& new_affinity = member->GetAffinityMask();
396 const s32 new_core = member->GetActiveCore();
397
398 // Remove the member from all queues it was in before.
399 for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
400 if (prev_affinity.GetAffinity(core)) {
401 if (core == prev_core) {
402 this->scheduled_queue.Remove(priority, core, member);
403 } else {
404 this->suggested_queue.Remove(priority, core, member);
405 }
406 }
407 }
408
409 // And add the member to all queues it should be in now.
410 for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
411 if (new_affinity.GetAffinity(core)) {
412 if (core == new_core) {
413 this->scheduled_queue.PushBack(priority, core, member);
414 } else {
415 this->suggested_queue.PushBack(priority, core, member);
416 }
417 }
418 }
419 }
420
421 constexpr void ChangeCore(s32 prev_core, Member* member, bool to_front = false) {
422 // Get the new information.
423 const s32 new_core = member->GetActiveCore();
424 const s32 priority = member->GetPriority();
425
426 // We don't need to do anything if the core is the same.
427 if (prev_core != new_core) {
428 // Remove from the scheduled queue for the previous core.
429 if (prev_core >= 0) {
430 this->scheduled_queue.Remove(priority, prev_core, member);
431 }
432
433 // Remove from the suggested queue and add to the scheduled queue for the new core.
434 if (new_core >= 0) {
435 this->suggested_queue.Remove(priority, new_core, member);
436 if (to_front) {
437 this->scheduled_queue.PushFront(priority, new_core, member);
438 } else {
439 this->scheduled_queue.PushBack(priority, new_core, member);
440 }
441 }
442
443 // Add to the suggested queue for the previous core.
444 if (prev_core >= 0) {
445 this->suggested_queue.PushBack(priority, prev_core, member);
446 }
447 }
448 }
449};
450
451} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
new file mode 100644
index 000000000..c5fd82a6b
--- /dev/null
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -0,0 +1,784 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7
8#include "common/assert.h"
9#include "common/bit_util.h"
10#include "common/fiber.h"
11#include "common/logging/log.h"
12#include "core/arm/arm_interface.h"
13#include "core/core.h"
14#include "core/core_timing.h"
15#include "core/cpu_manager.h"
16#include "core/hle/kernel/k_scheduler.h"
17#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
18#include "core/hle/kernel/kernel.h"
19#include "core/hle/kernel/physical_core.h"
20#include "core/hle/kernel/process.h"
21#include "core/hle/kernel/thread.h"
22#include "core/hle/kernel/time_manager.h"
23
24namespace Kernel {
25
26static void IncrementScheduledCount(Kernel::Thread* thread) {
27 if (auto process = thread->GetOwnerProcess(); process) {
28 process->IncrementScheduledCount();
29 }
30}
31
32void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
33 Core::EmuThreadHandle global_thread) {
34 u32 current_core = global_thread.host_handle;
35 bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
36 (current_core < Core::Hardware::NUM_CPU_CORES);
37
38 while (cores_pending_reschedule != 0) {
39 u32 core = Common::CountTrailingZeroes64(cores_pending_reschedule);
40 ASSERT(core < Core::Hardware::NUM_CPU_CORES);
41 if (!must_context_switch || core != current_core) {
42 auto& phys_core = kernel.PhysicalCore(core);
43 phys_core.Interrupt();
44 } else {
45 must_context_switch = true;
46 }
47 cores_pending_reschedule &= ~(1ULL << core);
48 }
49 if (must_context_switch) {
50 auto core_scheduler = kernel.CurrentScheduler();
51 kernel.ExitSVCProfile();
52 core_scheduler->RescheduleCurrentCore();
53 kernel.EnterSVCProfile();
54 }
55}
56
57u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) {
58 std::scoped_lock lock{guard};
59 if (Thread* prev_highest_thread = this->state.highest_priority_thread;
60 prev_highest_thread != highest_thread) {
61 if (prev_highest_thread != nullptr) {
62 IncrementScheduledCount(prev_highest_thread);
63 prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks());
64 }
65 if (this->state.should_count_idle) {
66 if (highest_thread != nullptr) {
67 // if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) {
68 // process->SetRunningThread(this->core_id, highest_thread,
69 // this->state.idle_count);
70 //}
71 } else {
72 this->state.idle_count++;
73 }
74 }
75
76 this->state.highest_priority_thread = highest_thread;
77 this->state.needs_scheduling = true;
78 return (1ULL << this->core_id);
79 } else {
80 return 0;
81 }
82}
83
84u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
85 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
86
87 // Clear that we need to update.
88 ClearSchedulerUpdateNeeded(kernel);
89
90 u64 cores_needing_scheduling = 0, idle_cores = 0;
91 Thread* top_threads[Core::Hardware::NUM_CPU_CORES];
92 auto& priority_queue = GetPriorityQueue(kernel);
93
94 /// We want to go over all cores, finding the highest priority thread and determining if
95 /// scheduling is needed for that core.
96 for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
97 Thread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id));
98 if (top_thread != nullptr) {
99 // If the thread has no waiters, we need to check if the process has a thread pinned.
100 // TODO(bunnei): Implement thread pinning
101 } else {
102 idle_cores |= (1ULL << core_id);
103 }
104
105 top_threads[core_id] = top_thread;
106 cores_needing_scheduling |=
107 kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
108 }
109
110 // Idle cores are bad. We're going to try to migrate threads to each idle core in turn.
111 while (idle_cores != 0) {
112 u32 core_id = Common::CountTrailingZeroes64(idle_cores);
113 if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
114 s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
115 size_t num_candidates = 0;
116
117 // While we have a suggested thread, try to migrate it!
118 while (suggested != nullptr) {
119 // Check if the suggested thread is the top thread on its core.
120 const s32 suggested_core = suggested->GetActiveCore();
121 if (Thread* top_thread =
122 (suggested_core >= 0) ? top_threads[suggested_core] : nullptr;
123 top_thread != suggested) {
124 // Make sure we're not dealing with threads too high priority for migration.
125 if (top_thread != nullptr &&
126 top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) {
127 break;
128 }
129
130 // The suggested thread isn't bound to its core, so we can migrate it!
131 suggested->SetActiveCore(core_id);
132 priority_queue.ChangeCore(suggested_core, suggested);
133
134 top_threads[core_id] = suggested;
135 cores_needing_scheduling |=
136 kernel.Scheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
137 break;
138 }
139
140 // Note this core as a candidate for migration.
141 ASSERT(num_candidates < Core::Hardware::NUM_CPU_CORES);
142 migration_candidates[num_candidates++] = suggested_core;
143 suggested = priority_queue.GetSuggestedNext(core_id, suggested);
144 }
145
146 // If suggested is nullptr, we failed to migrate a specific thread. So let's try all our
147 // candidate cores' top threads.
148 if (suggested == nullptr) {
149 for (size_t i = 0; i < num_candidates; i++) {
150 // Check if there's some other thread that can run on the candidate core.
151 const s32 candidate_core = migration_candidates[i];
152 suggested = top_threads[candidate_core];
153 if (Thread* next_on_candidate_core =
154 priority_queue.GetScheduledNext(candidate_core, suggested);
155 next_on_candidate_core != nullptr) {
156 // The candidate core can run some other thread! We'll migrate its current
157 // top thread to us.
158 top_threads[candidate_core] = next_on_candidate_core;
159 cores_needing_scheduling |=
160 kernel.Scheduler(candidate_core)
161 .UpdateHighestPriorityThread(top_threads[candidate_core]);
162
163 // Perform the migration.
164 suggested->SetActiveCore(core_id);
165 priority_queue.ChangeCore(candidate_core, suggested);
166
167 top_threads[core_id] = suggested;
168 cores_needing_scheduling |=
169 kernel.Scheduler(core_id).UpdateHighestPriorityThread(
170 top_threads[core_id]);
171 break;
172 }
173 }
174 }
175 }
176
177 idle_cores &= ~(1ULL << core_id);
178 }
179
180 return cores_needing_scheduling;
181}
182
183void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state) {
184 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
185
186 // Check if the state has changed, because if it hasn't there's nothing to do.
187 const auto cur_state = thread->scheduling_state;
188 if (cur_state == old_state) {
189 return;
190 }
191
192 // Update the priority queues.
193 if (old_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
194 // If we were previously runnable, then we're not runnable now, and we should remove.
195 GetPriorityQueue(kernel).Remove(thread);
196 IncrementScheduledCount(thread);
197 SetSchedulerUpdateNeeded(kernel);
198 } else if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
199 // If we're now runnable, then we weren't previously, and we should add.
200 GetPriorityQueue(kernel).PushBack(thread);
201 IncrementScheduledCount(thread);
202 SetSchedulerUpdateNeeded(kernel);
203 }
204}
205
206void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
207 u32 old_priority) {
208
209 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
210
211 // If the thread is runnable, we want to change its priority in the queue.
212 if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
213 GetPriorityQueue(kernel).ChangePriority(
214 old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread);
215 IncrementScheduledCount(thread);
216 SetSchedulerUpdateNeeded(kernel);
217 }
218}
219
220void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
221 const KAffinityMask& old_affinity, s32 old_core) {
222 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
223
224 // If the thread is runnable, we want to change its affinity in the queue.
225 if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
226 GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread);
227 IncrementScheduledCount(thread);
228 SetSchedulerUpdateNeeded(kernel);
229 }
230}
231
232void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
233 ASSERT(system.GlobalSchedulerContext().IsLocked());
234
235 // Get a reference to the priority queue.
236 auto& kernel = system.Kernel();
237 auto& priority_queue = GetPriorityQueue(kernel);
238
239 // Rotate the front of the queue to the end.
240 Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority);
241 Thread* next_thread = nullptr;
242 if (top_thread != nullptr) {
243 next_thread = priority_queue.MoveToScheduledBack(top_thread);
244 if (next_thread != top_thread) {
245 IncrementScheduledCount(top_thread);
246 IncrementScheduledCount(next_thread);
247 }
248 }
249
250 // While we have a suggested thread, try to migrate it!
251 {
252 Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority);
253 while (suggested != nullptr) {
254 // Check if the suggested thread is the top thread on its core.
255 const s32 suggested_core = suggested->GetActiveCore();
256 if (Thread* top_on_suggested_core =
257 (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
258 : nullptr;
259 top_on_suggested_core != suggested) {
260 // If the next thread is a new thread that has been waiting longer than our
261 // suggestion, we prefer it to our suggestion.
262 if (top_thread != next_thread && next_thread != nullptr &&
263 next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) {
264 suggested = nullptr;
265 break;
266 }
267
268 // If we're allowed to do a migration, do one.
269 // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion
270 // to the front of the queue.
271 if (top_on_suggested_core == nullptr ||
272 top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
273 suggested->SetActiveCore(core_id);
274 priority_queue.ChangeCore(suggested_core, suggested, true);
275 IncrementScheduledCount(suggested);
276 break;
277 }
278 }
279
280 // Get the next suggestion.
281 suggested = priority_queue.GetSamePriorityNext(core_id, suggested);
282 }
283 }
284
285 // Now that we might have migrated a thread with the same priority, check if we can do better.
286
287 {
288 Thread* best_thread = priority_queue.GetScheduledFront(core_id);
289 if (best_thread == GetCurrentThread()) {
290 best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
291 }
292
293 // If the best thread we can choose has a priority the same or worse than ours, try to
294 // migrate a higher priority thread.
295 if (best_thread != nullptr && best_thread->GetPriority() >= static_cast<u32>(priority)) {
296 Thread* suggested = priority_queue.GetSuggestedFront(core_id);
297 while (suggested != nullptr) {
298 // If the suggestion's priority is the same as ours, don't bother.
299 if (suggested->GetPriority() >= best_thread->GetPriority()) {
300 break;
301 }
302
303 // Check if the suggested thread is the top thread on its core.
304 const s32 suggested_core = suggested->GetActiveCore();
305 if (Thread* top_on_suggested_core =
306 (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
307 : nullptr;
308 top_on_suggested_core != suggested) {
309 // If we're allowed to do a migration, do one.
310 // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
311 // suggestion to the front of the queue.
312 if (top_on_suggested_core == nullptr ||
313 top_on_suggested_core->GetPriority() >=
314 HighestCoreMigrationAllowedPriority) {
315 suggested->SetActiveCore(core_id);
316 priority_queue.ChangeCore(suggested_core, suggested, true);
317 IncrementScheduledCount(suggested);
318 break;
319 }
320 }
321
322 // Get the next suggestion.
323 suggested = priority_queue.GetSuggestedNext(core_id, suggested);
324 }
325 }
326 }
327
328 // After a rotation, we need a scheduler update.
329 SetSchedulerUpdateNeeded(kernel);
330}
331
332bool KScheduler::CanSchedule(KernelCore& kernel) {
333 return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1;
334}
335
336bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
337 return kernel.GlobalSchedulerContext().scheduler_update_needed.load(std::memory_order_acquire);
338}
339
340void KScheduler::SetSchedulerUpdateNeeded(KernelCore& kernel) {
341 kernel.GlobalSchedulerContext().scheduler_update_needed.store(true, std::memory_order_release);
342}
343
344void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
345 kernel.GlobalSchedulerContext().scheduler_update_needed.store(false, std::memory_order_release);
346}
347
348void KScheduler::DisableScheduling(KernelCore& kernel) {
349 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
350 ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0);
351 scheduler->GetCurrentThread()->DisableDispatch();
352 }
353}
354
355void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
356 Core::EmuThreadHandle global_thread) {
357 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
358 scheduler->GetCurrentThread()->EnableDispatch();
359 }
360 RescheduleCores(kernel, cores_needing_scheduling, global_thread);
361}
362
363u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
364 if (IsSchedulerUpdateNeeded(kernel)) {
365 return UpdateHighestPriorityThreadsImpl(kernel);
366 } else {
367 return 0;
368 }
369}
370
371KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) {
372 return kernel.GlobalSchedulerContext().priority_queue;
373}
374
375void KScheduler::YieldWithoutCoreMigration() {
376 auto& kernel = system.Kernel();
377
378 // Validate preconditions.
379 ASSERT(CanSchedule(kernel));
380 ASSERT(kernel.CurrentProcess() != nullptr);
381
382 // Get the current thread and process.
383 Thread& cur_thread = *GetCurrentThread();
384 Process& cur_process = *kernel.CurrentProcess();
385
386 // If the thread's yield count matches, there's nothing for us to do.
387 if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
388 return;
389 }
390
391 // Get a reference to the priority queue.
392 auto& priority_queue = GetPriorityQueue(kernel);
393
394 // Perform the yield.
395 {
396 KScopedSchedulerLock lock(kernel);
397
398 const auto cur_state = cur_thread.scheduling_state;
399 if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
400 // Put the current thread at the back of the queue.
401 Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
402 IncrementScheduledCount(std::addressof(cur_thread));
403
404 // If the next thread is different, we have an update to perform.
405 if (next_thread != std::addressof(cur_thread)) {
406 SetSchedulerUpdateNeeded(kernel);
407 } else {
408 // Otherwise, set the thread's yield count so that we won't waste work until the
409 // process is scheduled again.
410 cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
411 }
412 }
413 }
414}
415
416void KScheduler::YieldWithCoreMigration() {
417 auto& kernel = system.Kernel();
418
419 // Validate preconditions.
420 ASSERT(CanSchedule(kernel));
421 ASSERT(kernel.CurrentProcess() != nullptr);
422
423 // Get the current thread and process.
424 Thread& cur_thread = *GetCurrentThread();
425 Process& cur_process = *kernel.CurrentProcess();
426
427 // If the thread's yield count matches, there's nothing for us to do.
428 if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
429 return;
430 }
431
432 // Get a reference to the priority queue.
433 auto& priority_queue = GetPriorityQueue(kernel);
434
435 // Perform the yield.
436 {
437 KScopedSchedulerLock lock(kernel);
438
439 const auto cur_state = cur_thread.scheduling_state;
440 if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
441 // Get the current active core.
442 const s32 core_id = cur_thread.GetActiveCore();
443
444 // Put the current thread at the back of the queue.
445 Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
446 IncrementScheduledCount(std::addressof(cur_thread));
447
448 // While we have a suggested thread, try to migrate it!
449 bool recheck = false;
450 Thread* suggested = priority_queue.GetSuggestedFront(core_id);
451 while (suggested != nullptr) {
452 // Check if the suggested thread is the thread running on its core.
453 const s32 suggested_core = suggested->GetActiveCore();
454
455 if (Thread* running_on_suggested_core =
456 (suggested_core >= 0)
457 ? kernel.Scheduler(suggested_core).state.highest_priority_thread
458 : nullptr;
459 running_on_suggested_core != suggested) {
460 // If the current thread's priority is higher than our suggestion's we prefer
461 // the next thread to the suggestion. We also prefer the next thread when the
462 // current thread's priority is equal to the suggestions, but the next thread
463 // has been waiting longer.
464 if ((suggested->GetPriority() > cur_thread.GetPriority()) ||
465 (suggested->GetPriority() == cur_thread.GetPriority() &&
466 next_thread != std::addressof(cur_thread) &&
467 next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick())) {
468 suggested = nullptr;
469 break;
470 }
471
472 // If we're allowed to do a migration, do one.
473 // NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the
474 // suggestion to the front of the queue.
475 if (running_on_suggested_core == nullptr ||
476 running_on_suggested_core->GetPriority() >=
477 HighestCoreMigrationAllowedPriority) {
478 suggested->SetActiveCore(core_id);
479 priority_queue.ChangeCore(suggested_core, suggested, true);
480 IncrementScheduledCount(suggested);
481 break;
482 } else {
483 // We couldn't perform a migration, but we should check again on a future
484 // yield.
485 recheck = true;
486 }
487 }
488
489 // Get the next suggestion.
490 suggested = priority_queue.GetSuggestedNext(core_id, suggested);
491 }
492
493 // If we still have a suggestion or the next thread is different, we have an update to
494 // perform.
495 if (suggested != nullptr || next_thread != std::addressof(cur_thread)) {
496 SetSchedulerUpdateNeeded(kernel);
497 } else if (!recheck) {
498 // Otherwise if we don't need to re-check, set the thread's yield count so that we
499 // won't waste work until the process is scheduled again.
500 cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
501 }
502 }
503 }
504}
505
506void KScheduler::YieldToAnyThread() {
507 auto& kernel = system.Kernel();
508
509 // Validate preconditions.
510 ASSERT(CanSchedule(kernel));
511 ASSERT(kernel.CurrentProcess() != nullptr);
512
513 // Get the current thread and process.
514 Thread& cur_thread = *GetCurrentThread();
515 Process& cur_process = *kernel.CurrentProcess();
516
517 // If the thread's yield count matches, there's nothing for us to do.
518 if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
519 return;
520 }
521
522 // Get a reference to the priority queue.
523 auto& priority_queue = GetPriorityQueue(kernel);
524
525 // Perform the yield.
526 {
527 KScopedSchedulerLock lock(kernel);
528
529 const auto cur_state = cur_thread.scheduling_state;
530 if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
531 // Get the current active core.
532 const s32 core_id = cur_thread.GetActiveCore();
533
534 // Migrate the current thread to core -1.
535 cur_thread.SetActiveCore(-1);
536 priority_queue.ChangeCore(core_id, std::addressof(cur_thread));
537 IncrementScheduledCount(std::addressof(cur_thread));
538
539 // If there's nothing scheduled, we can try to perform a migration.
540 if (priority_queue.GetScheduledFront(core_id) == nullptr) {
541 // While we have a suggested thread, try to migrate it!
542 Thread* suggested = priority_queue.GetSuggestedFront(core_id);
543 while (suggested != nullptr) {
544 // Check if the suggested thread is the top thread on its core.
545 const s32 suggested_core = suggested->GetActiveCore();
546 if (Thread* top_on_suggested_core =
547 (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core)
548 : nullptr;
549 top_on_suggested_core != suggested) {
550 // If we're allowed to do a migration, do one.
551 if (top_on_suggested_core == nullptr ||
552 top_on_suggested_core->GetPriority() >=
553 HighestCoreMigrationAllowedPriority) {
554 suggested->SetActiveCore(core_id);
555 priority_queue.ChangeCore(suggested_core, suggested);
556 IncrementScheduledCount(suggested);
557 }
558
559 // Regardless of whether we migrated, we had a candidate, so we're done.
560 break;
561 }
562
563 // Get the next suggestion.
564 suggested = priority_queue.GetSuggestedNext(core_id, suggested);
565 }
566
567 // If the suggestion is different from the current thread, we need to perform an
568 // update.
569 if (suggested != std::addressof(cur_thread)) {
570 SetSchedulerUpdateNeeded(kernel);
571 } else {
572 // Otherwise, set the thread's yield count so that we won't waste work until the
573 // process is scheduled again.
574 cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
575 }
576 } else {
577 // Otherwise, we have an update to perform.
578 SetSchedulerUpdateNeeded(kernel);
579 }
580 }
581 }
582}
583
584KScheduler::KScheduler(Core::System& system, std::size_t core_id)
585 : system(system), core_id(core_id) {
586 switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this);
587 this->state.needs_scheduling = true;
588 this->state.interrupt_task_thread_runnable = false;
589 this->state.should_count_idle = false;
590 this->state.idle_count = 0;
591 this->state.idle_thread_stack = nullptr;
592 this->state.highest_priority_thread = nullptr;
593}
594
595KScheduler::~KScheduler() = default;
596
597Thread* KScheduler::GetCurrentThread() const {
598 if (current_thread) {
599 return current_thread;
600 }
601 return idle_thread;
602}
603
604u64 KScheduler::GetLastContextSwitchTicks() const {
605 return last_context_switch_time;
606}
607
608void KScheduler::RescheduleCurrentCore() {
609 ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
610
611 auto& phys_core = system.Kernel().PhysicalCore(core_id);
612 if (phys_core.IsInterrupted()) {
613 phys_core.ClearInterrupt();
614 }
615 guard.lock();
616 if (this->state.needs_scheduling) {
617 Schedule();
618 } else {
619 guard.unlock();
620 }
621}
622
623void KScheduler::OnThreadStart() {
624 SwitchContextStep2();
625}
626
627void KScheduler::Unload(Thread* thread) {
628 if (thread) {
629 thread->SetIsRunning(false);
630 if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) {
631 system.ArmInterface(core_id).ExceptionalExit();
632 thread->SetContinuousOnSVC(false);
633 }
634 if (!thread->IsHLEThread() && !thread->HasExited()) {
635 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
636 cpu_core.SaveContext(thread->GetContext32());
637 cpu_core.SaveContext(thread->GetContext64());
638 // Save the TPIDR_EL0 system register in case it was modified.
639 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
640 cpu_core.ClearExclusiveState();
641 }
642 thread->context_guard.unlock();
643 }
644}
645
646void KScheduler::Reload(Thread* thread) {
647 if (thread) {
648 ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
649 "Thread must be runnable.");
650
651 // Cancel any outstanding wakeup events for this thread
652 thread->SetIsRunning(true);
653 thread->SetWasRunning(false);
654
655 auto* const thread_owner_process = thread->GetOwnerProcess();
656 if (thread_owner_process != nullptr) {
657 system.Kernel().MakeCurrentProcess(thread_owner_process);
658 }
659 if (!thread->IsHLEThread()) {
660 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
661 cpu_core.LoadContext(thread->GetContext32());
662 cpu_core.LoadContext(thread->GetContext64());
663 cpu_core.SetTlsAddress(thread->GetTLSAddress());
664 cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
665 cpu_core.ClearExclusiveState();
666 }
667 }
668}
669
670void KScheduler::SwitchContextStep2() {
671 // Load context of new thread
672 Reload(current_thread);
673
674 RescheduleCurrentCore();
675}
676
677void KScheduler::ScheduleImpl() {
678 Thread* previous_thread = current_thread;
679 current_thread = state.highest_priority_thread;
680
681 this->state.needs_scheduling = false;
682
683 if (current_thread == previous_thread) {
684 guard.unlock();
685 return;
686 }
687
688 Process* const previous_process = system.Kernel().CurrentProcess();
689
690 UpdateLastContextSwitchTime(previous_thread, previous_process);
691
692 // Save context for previous thread
693 Unload(previous_thread);
694
695 std::shared_ptr<Common::Fiber>* old_context;
696 if (previous_thread != nullptr) {
697 old_context = &previous_thread->GetHostContext();
698 } else {
699 old_context = &idle_thread->GetHostContext();
700 }
701 guard.unlock();
702
703 Common::Fiber::YieldTo(*old_context, switch_fiber);
704 /// When a thread wakes up, the scheduler may have changed to other in another core.
705 auto& next_scheduler = *system.Kernel().CurrentScheduler();
706 next_scheduler.SwitchContextStep2();
707}
708
709void KScheduler::OnSwitch(void* this_scheduler) {
710 KScheduler* sched = static_cast<KScheduler*>(this_scheduler);
711 sched->SwitchToCurrent();
712}
713
714void KScheduler::SwitchToCurrent() {
715 while (true) {
716 {
717 std::scoped_lock lock{guard};
718 current_thread = state.highest_priority_thread;
719 this->state.needs_scheduling = false;
720 }
721 const auto is_switch_pending = [this] {
722 std::scoped_lock lock{guard};
723 return state.needs_scheduling.load(std::memory_order_relaxed);
724 };
725 do {
726 if (current_thread != nullptr && !current_thread->IsHLEThread()) {
727 current_thread->context_guard.lock();
728 if (!current_thread->IsRunnable()) {
729 current_thread->context_guard.unlock();
730 break;
731 }
732 if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) {
733 current_thread->context_guard.unlock();
734 break;
735 }
736 }
737 std::shared_ptr<Common::Fiber>* next_context;
738 if (current_thread != nullptr) {
739 next_context = &current_thread->GetHostContext();
740 } else {
741 next_context = &idle_thread->GetHostContext();
742 }
743 Common::Fiber::YieldTo(switch_fiber, *next_context);
744 } while (!is_switch_pending());
745 }
746}
747
748void KScheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
749 const u64 prev_switch_ticks = last_context_switch_time;
750 const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
751 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
752
753 if (thread != nullptr) {
754 thread->UpdateCPUTimeTicks(update_ticks);
755 }
756
757 if (process != nullptr) {
758 process->UpdateCPUTimeTicks(update_ticks);
759 }
760
761 last_context_switch_time = most_recent_switch_ticks;
762}
763
764void KScheduler::Initialize() {
765 std::string name = "Idle Thread Id:" + std::to_string(core_id);
766 std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
767 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
768 ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
769 auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
770 nullptr, std::move(init_func), init_func_parameter);
771 idle_thread = thread_res.Unwrap().get();
772
773 {
774 KScopedSchedulerLock lock{system.Kernel()};
775 idle_thread->SetStatus(ThreadStatus::Ready);
776 }
777}
778
779KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)
780 : KScopedLock(kernel.GlobalSchedulerContext().SchedulerLock()) {}
781
782KScopedSchedulerLock::~KScopedSchedulerLock() = default;
783
784} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
new file mode 100644
index 000000000..e84abc84c
--- /dev/null
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -0,0 +1,201 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7
8#pragma once
9
10#include <atomic>
11
12#include "common/common_types.h"
13#include "common/spin_lock.h"
14#include "core/hle/kernel/global_scheduler_context.h"
15#include "core/hle/kernel/k_priority_queue.h"
16#include "core/hle/kernel/k_scheduler_lock.h"
17#include "core/hle/kernel/k_scoped_lock.h"
18
19namespace Common {
20class Fiber;
21}
22
23namespace Core {
24class System;
25}
26
27namespace Kernel {
28
29class KernelCore;
30class Process;
31class SchedulerLock;
32class Thread;
33
34class KScheduler final {
35public:
36 explicit KScheduler(Core::System& system, std::size_t core_id);
37 ~KScheduler();
38
39 /// Reschedules to the next available thread (call after current thread is suspended)
40 void RescheduleCurrentCore();
41
42 /// Reschedules cores pending reschedule, to be called on EnableScheduling.
43 static void RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
44 Core::EmuThreadHandle global_thread);
45
46 /// The next two are for SingleCore Only.
47 /// Unload current thread before preempting core.
48 void Unload(Thread* thread);
49
50 /// Reload current thread after core preemption.
51 void Reload(Thread* thread);
52
53 /// Gets the current running thread
54 [[nodiscard]] Thread* GetCurrentThread() const;
55
56 /// Gets the timestamp for the last context switch in ticks.
57 [[nodiscard]] u64 GetLastContextSwitchTicks() const;
58
59 [[nodiscard]] bool ContextSwitchPending() const {
60 return state.needs_scheduling.load(std::memory_order_relaxed);
61 }
62
63 void Initialize();
64
65 void OnThreadStart();
66
67 [[nodiscard]] std::shared_ptr<Common::Fiber>& ControlContext() {
68 return switch_fiber;
69 }
70
71 [[nodiscard]] const std::shared_ptr<Common::Fiber>& ControlContext() const {
72 return switch_fiber;
73 }
74
75 [[nodiscard]] u64 UpdateHighestPriorityThread(Thread* highest_thread);
76
77 /**
78 * Takes a thread and moves it to the back of the it's priority list.
79 *
80 * @note This operation can be redundant and no scheduling is changed if marked as so.
81 */
82 void YieldWithoutCoreMigration();
83
84 /**
85 * Takes a thread and moves it to the back of the it's priority list.
86 * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
87 * a better priority than the next thread in the core.
88 *
89 * @note This operation can be redundant and no scheduling is changed if marked as so.
90 */
91 void YieldWithCoreMigration();
92
93 /**
94 * Takes a thread and moves it out of the scheduling queue.
95 * and into the suggested queue. If no thread can be scheduled afterwards in that core,
96 * a suggested thread is obtained instead.
97 *
98 * @note This operation can be redundant and no scheduling is changed if marked as so.
99 */
100 void YieldToAnyThread();
101
102 /// Notify the scheduler a thread's status has changed.
103 static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state);
104
105 /// Notify the scheduler a thread's priority has changed.
106 static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread,
107 u32 old_priority);
108
109 /// Notify the scheduler a thread's core and/or affinity mask has changed.
110 static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
111 const KAffinityMask& old_affinity, s32 old_core);
112
113 static bool CanSchedule(KernelCore& kernel);
114 static bool IsSchedulerUpdateNeeded(const KernelCore& kernel);
115 static void SetSchedulerUpdateNeeded(KernelCore& kernel);
116 static void ClearSchedulerUpdateNeeded(KernelCore& kernel);
117 static void DisableScheduling(KernelCore& kernel);
118 static void EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling,
119 Core::EmuThreadHandle global_thread);
120 [[nodiscard]] static u64 UpdateHighestPriorityThreads(KernelCore& kernel);
121
122private:
123 friend class GlobalSchedulerContext;
124
125 /**
126 * Takes care of selecting the new scheduled threads in three steps:
127 *
128 * 1. First a thread is selected from the top of the priority queue. If no thread
129 * is obtained then we move to step two, else we are done.
130 *
131 * 2. Second we try to get a suggested thread that's not assigned to any core or
132 * that is not the top thread in that core.
133 *
134 * 3. Third is no suggested thread is found, we do a second pass and pick a running
135 * thread in another core and swap it with its current thread.
136 *
137 * returns the cores needing scheduling.
138 */
139 [[nodiscard]] static u64 UpdateHighestPriorityThreadsImpl(KernelCore& kernel);
140
141 [[nodiscard]] static KSchedulerPriorityQueue& GetPriorityQueue(KernelCore& kernel);
142
143 void RotateScheduledQueue(s32 core_id, s32 priority);
144
145 void Schedule() {
146 ASSERT(GetCurrentThread()->GetDisableDispatchCount() == 1);
147 this->ScheduleImpl();
148 }
149
150 /// Switches the CPU's active thread context to that of the specified thread
151 void ScheduleImpl();
152
153 /// When a thread wakes up, it must run this through it's new scheduler
154 void SwitchContextStep2();
155
156 /**
157 * Called on every context switch to update the internal timestamp
158 * This also updates the running time ticks for the given thread and
159 * process using the following difference:
160 *
161 * ticks += most_recent_ticks - last_context_switch_ticks
162 *
163 * The internal tick timestamp for the scheduler is simply the
164 * most recent tick count retrieved. No special arithmetic is
165 * applied to it.
166 */
167 void UpdateLastContextSwitchTime(Thread* thread, Process* process);
168
169 static void OnSwitch(void* this_scheduler);
170 void SwitchToCurrent();
171
172 Thread* current_thread{};
173 Thread* idle_thread{};
174
175 std::shared_ptr<Common::Fiber> switch_fiber{};
176
177 struct SchedulingState {
178 std::atomic<bool> needs_scheduling;
179 bool interrupt_task_thread_runnable{};
180 bool should_count_idle{};
181 u64 idle_count{};
182 Thread* highest_priority_thread{};
183 void* idle_thread_stack{};
184 };
185
186 SchedulingState state;
187
188 Core::System& system;
189 u64 last_context_switch_time{};
190 const std::size_t core_id;
191
192 Common::SpinLock guard{};
193};
194
195class KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> {
196public:
197 explicit KScopedSchedulerLock(KernelCore& kernel);
198 ~KScopedSchedulerLock();
199};
200
201} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
new file mode 100644
index 000000000..2f1c1f691
--- /dev/null
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -0,0 +1,75 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7
8#pragma once
9
10#include "common/assert.h"
11#include "common/spin_lock.h"
12#include "core/hardware_properties.h"
13#include "core/hle/kernel/kernel.h"
14
15namespace Kernel {
16
17class KernelCore;
18
19template <typename SchedulerType>
20class KAbstractSchedulerLock {
21public:
22 explicit KAbstractSchedulerLock(KernelCore& kernel) : kernel{kernel} {}
23
24 bool IsLockedByCurrentThread() const {
25 return this->owner_thread == kernel.GetCurrentEmuThreadID();
26 }
27
28 void Lock() {
29 if (this->IsLockedByCurrentThread()) {
30 // If we already own the lock, we can just increment the count.
31 ASSERT(this->lock_count > 0);
32 this->lock_count++;
33 } else {
34 // Otherwise, we want to disable scheduling and acquire the spinlock.
35 SchedulerType::DisableScheduling(kernel);
36 this->spin_lock.lock();
37
38 // For debug, ensure that our state is valid.
39 ASSERT(this->lock_count == 0);
40 ASSERT(this->owner_thread == Core::EmuThreadHandle::InvalidHandle());
41
42 // Increment count, take ownership.
43 this->lock_count = 1;
44 this->owner_thread = kernel.GetCurrentEmuThreadID();
45 }
46 }
47
48 void Unlock() {
49 ASSERT(this->IsLockedByCurrentThread());
50 ASSERT(this->lock_count > 0);
51
52 // Release an instance of the lock.
53 if ((--this->lock_count) == 0) {
54 // We're no longer going to hold the lock. Take note of what cores need scheduling.
55 const u64 cores_needing_scheduling =
56 SchedulerType::UpdateHighestPriorityThreads(kernel);
57 Core::EmuThreadHandle leaving_thread = owner_thread;
58
59 // Note that we no longer hold the lock, and unlock the spinlock.
60 this->owner_thread = Core::EmuThreadHandle::InvalidHandle();
61 this->spin_lock.unlock();
62
63 // Enable scheduling, and perform a rescheduling operation.
64 SchedulerType::EnableScheduling(kernel, cores_needing_scheduling, leaving_thread);
65 }
66 }
67
68private:
69 KernelCore& kernel;
70 Common::SpinLock spin_lock{};
71 s32 lock_count{};
72 Core::EmuThreadHandle owner_thread{Core::EmuThreadHandle::InvalidHandle()};
73};
74
75} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scoped_lock.h b/src/core/hle/kernel/k_scoped_lock.h
new file mode 100644
index 000000000..d7cc557b2
--- /dev/null
+++ b/src/core/hle/kernel/k_scoped_lock.h
@@ -0,0 +1,41 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7
8#pragma once
9
10#include "common/common_types.h"
11
12namespace Kernel {
13
14template <typename T>
15concept KLockable = !std::is_reference_v<T> && requires(T & t) {
16 { t.Lock() }
17 ->std::same_as<void>;
18 { t.Unlock() }
19 ->std::same_as<void>;
20};
21
22template <typename T>
23requires KLockable<T> class KScopedLock {
24public:
25 explicit KScopedLock(T* l) : lock_ptr(l) {
26 this->lock_ptr->Lock();
27 }
28 explicit KScopedLock(T& l) : KScopedLock(std::addressof(l)) { /* ... */
29 }
30 ~KScopedLock() {
31 this->lock_ptr->Unlock();
32 }
33
34 KScopedLock(const KScopedLock&) = delete;
35 KScopedLock(KScopedLock&&) = delete;
36
37private:
38 T* lock_ptr;
39};
40
41} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
new file mode 100644
index 000000000..2bb3817fa
--- /dev/null
+++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
@@ -0,0 +1,50 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7
8#pragma once
9
10#include "common/common_types.h"
11#include "core/hle/kernel/handle_table.h"
12#include "core/hle/kernel/kernel.h"
13#include "core/hle/kernel/thread.h"
14#include "core/hle/kernel/time_manager.h"
15
16namespace Kernel {
17
18class KScopedSchedulerLockAndSleep {
19public:
20 explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* t,
21 s64 timeout)
22 : kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) {
23 event_handle = InvalidHandle;
24
25 // Lock the scheduler.
26 kernel.GlobalSchedulerContext().scheduler_lock.Lock();
27 }
28
29 ~KScopedSchedulerLockAndSleep() {
30 // Register the sleep.
31 if (this->timeout_tick > 0) {
32 kernel.TimeManager().ScheduleTimeEvent(event_handle, this->thread, this->timeout_tick);
33 }
34
35 // Unlock the scheduler.
36 kernel.GlobalSchedulerContext().scheduler_lock.Unlock();
37 }
38
39 void CancelSleep() {
40 this->timeout_tick = 0;
41 }
42
43private:
44 KernelCore& kernel;
45 Handle& event_handle;
46 Thread* thread{};
47 s64 timeout_tick{};
48};
49
50} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index f2b0fe2fd..e8ece8164 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -7,15 +7,15 @@
7#include <bitset> 7#include <bitset>
8#include <functional> 8#include <functional>
9#include <memory> 9#include <memory>
10#include <mutex>
11#include <thread> 10#include <thread>
12#include <unordered_map> 11#include <unordered_set>
13#include <utility> 12#include <utility>
14 13
15#include "common/assert.h" 14#include "common/assert.h"
16#include "common/logging/log.h" 15#include "common/logging/log.h"
17#include "common/microprofile.h" 16#include "common/microprofile.h"
18#include "common/thread.h" 17#include "common/thread.h"
18#include "common/thread_worker.h"
19#include "core/arm/arm_interface.h" 19#include "core/arm/arm_interface.h"
20#include "core/arm/cpu_interrupt_handler.h" 20#include "core/arm/cpu_interrupt_handler.h"
21#include "core/arm/exclusive_monitor.h" 21#include "core/arm/exclusive_monitor.h"
@@ -28,6 +28,7 @@
28#include "core/hle/kernel/client_port.h" 28#include "core/hle/kernel/client_port.h"
29#include "core/hle/kernel/errors.h" 29#include "core/hle/kernel/errors.h"
30#include "core/hle/kernel/handle_table.h" 30#include "core/hle/kernel/handle_table.h"
31#include "core/hle/kernel/k_scheduler.h"
31#include "core/hle/kernel/kernel.h" 32#include "core/hle/kernel/kernel.h"
32#include "core/hle/kernel/memory/memory_layout.h" 33#include "core/hle/kernel/memory/memory_layout.h"
33#include "core/hle/kernel/memory/memory_manager.h" 34#include "core/hle/kernel/memory/memory_manager.h"
@@ -35,7 +36,7 @@
35#include "core/hle/kernel/physical_core.h" 36#include "core/hle/kernel/physical_core.h"
36#include "core/hle/kernel/process.h" 37#include "core/hle/kernel/process.h"
37#include "core/hle/kernel/resource_limit.h" 38#include "core/hle/kernel/resource_limit.h"
38#include "core/hle/kernel/scheduler.h" 39#include "core/hle/kernel/service_thread.h"
39#include "core/hle/kernel/shared_memory.h" 40#include "core/hle/kernel/shared_memory.h"
40#include "core/hle/kernel/synchronization.h" 41#include "core/hle/kernel/synchronization.h"
41#include "core/hle/kernel/thread.h" 42#include "core/hle/kernel/thread.h"
@@ -50,17 +51,20 @@ namespace Kernel {
50 51
51struct KernelCore::Impl { 52struct KernelCore::Impl {
52 explicit Impl(Core::System& system, KernelCore& kernel) 53 explicit Impl(Core::System& system, KernelCore& kernel)
53 : global_scheduler{kernel}, synchronization{system}, time_manager{system}, 54 : synchronization{system}, time_manager{system}, global_handle_table{kernel}, system{
54 global_handle_table{kernel}, system{system} {} 55 system} {}
55 56
56 void SetMulticore(bool is_multicore) { 57 void SetMulticore(bool is_multicore) {
57 this->is_multicore = is_multicore; 58 this->is_multicore = is_multicore;
58 } 59 }
59 60
60 void Initialize(KernelCore& kernel) { 61 void Initialize(KernelCore& kernel) {
61 Shutdown();
62 RegisterHostThread(); 62 RegisterHostThread();
63 63
64 global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
65 service_thread_manager =
66 std::make_unique<Common::ThreadWorker>(1, "yuzu:ServiceThreadManager");
67
64 InitializePhysicalCores(); 68 InitializePhysicalCores();
65 InitializeSystemResourceLimit(kernel); 69 InitializeSystemResourceLimit(kernel);
66 InitializeMemoryLayout(); 70 InitializeMemoryLayout();
@@ -69,7 +73,19 @@ struct KernelCore::Impl {
69 InitializeSuspendThreads(); 73 InitializeSuspendThreads();
70 } 74 }
71 75
76 void InitializeCores() {
77 for (auto& core : cores) {
78 core.Initialize(current_process->Is64BitProcess());
79 }
80 }
81
72 void Shutdown() { 82 void Shutdown() {
83 process_list.clear();
84
85 // Ensures all service threads gracefully shutdown
86 service_thread_manager.reset();
87 service_threads.clear();
88
73 next_object_id = 0; 89 next_object_id = 0;
74 next_kernel_process_id = Process::InitialKIPIDMin; 90 next_kernel_process_id = Process::InitialKIPIDMin;
75 next_user_process_id = Process::ProcessIDMin; 91 next_user_process_id = Process::ProcessIDMin;
@@ -81,41 +97,30 @@ struct KernelCore::Impl {
81 } 97 }
82 } 98 }
83 99
84 for (std::size_t i = 0; i < cores.size(); i++) {
85 cores[i].Shutdown();
86 schedulers[i].reset();
87 }
88 cores.clear(); 100 cores.clear();
89 101
90 registered_core_threads.reset();
91
92 process_list.clear();
93 current_process = nullptr; 102 current_process = nullptr;
94 103
95 system_resource_limit = nullptr; 104 system_resource_limit = nullptr;
96 105
97 global_handle_table.Clear(); 106 global_handle_table.Clear();
98 preemption_event = nullptr;
99 107
100 global_scheduler.Shutdown(); 108 preemption_event = nullptr;
101 109
102 named_ports.clear(); 110 named_ports.clear();
103 111
104 for (auto& core : cores) {
105 core.Shutdown();
106 }
107 cores.clear();
108
109 exclusive_monitor.reset(); 112 exclusive_monitor.reset();
110 host_thread_ids.clear(); 113
114 // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
115 next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
111 } 116 }
112 117
113 void InitializePhysicalCores() { 118 void InitializePhysicalCores() {
114 exclusive_monitor = 119 exclusive_monitor =
115 Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); 120 Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
116 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 121 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
117 schedulers[i] = std::make_unique<Kernel::Scheduler>(system, i); 122 schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i);
118 cores.emplace_back(system, i, *schedulers[i], interrupts[i]); 123 cores.emplace_back(i, system, *schedulers[i], interrupts);
119 } 124 }
120 } 125 }
121 126
@@ -147,8 +152,8 @@ struct KernelCore::Impl {
147 preemption_event = Core::Timing::CreateEvent( 152 preemption_event = Core::Timing::CreateEvent(
148 "PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) { 153 "PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) {
149 { 154 {
150 SchedulerLock lock(kernel); 155 KScopedSchedulerLock lock(kernel);
151 global_scheduler.PreemptThreads(); 156 global_scheduler_context->PreemptThreads();
152 } 157 }
153 const auto time_interval = std::chrono::nanoseconds{ 158 const auto time_interval = std::chrono::nanoseconds{
154 Core::Timing::msToCycles(std::chrono::milliseconds(10))}; 159 Core::Timing::msToCycles(std::chrono::milliseconds(10))};
@@ -177,63 +182,62 @@ struct KernelCore::Impl {
177 182
178 void MakeCurrentProcess(Process* process) { 183 void MakeCurrentProcess(Process* process) {
179 current_process = process; 184 current_process = process;
180
181 if (process == nullptr) { 185 if (process == nullptr) {
182 return; 186 return;
183 } 187 }
184 188
185 u32 core_id = GetCurrentHostThreadID(); 189 const u32 core_id = GetCurrentHostThreadID();
186 if (core_id < Core::Hardware::NUM_CPU_CORES) { 190 if (core_id < Core::Hardware::NUM_CPU_CORES) {
187 system.Memory().SetCurrentPageTable(*process, core_id); 191 system.Memory().SetCurrentPageTable(*process, core_id);
188 } 192 }
189 } 193 }
190 194
195 /// Creates a new host thread ID, should only be called by GetHostThreadId
196 u32 AllocateHostThreadId(std::optional<std::size_t> core_id) {
197 if (core_id) {
198 // The first for slots are reserved for CPU core threads
199 ASSERT(*core_id < Core::Hardware::NUM_CPU_CORES);
200 return static_cast<u32>(*core_id);
201 } else {
202 return next_host_thread_id++;
203 }
204 }
205
206 /// Gets the host thread ID for the caller, allocating a new one if this is the first time
207 u32 GetHostThreadId(std::optional<std::size_t> core_id = std::nullopt) {
208 const thread_local auto host_thread_id{AllocateHostThreadId(core_id)};
209 return host_thread_id;
210 }
211
212 /// Registers a CPU core thread by allocating a host thread ID for it
191 void RegisterCoreThread(std::size_t core_id) { 213 void RegisterCoreThread(std::size_t core_id) {
192 std::unique_lock lock{register_thread_mutex}; 214 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
215 const auto this_id = GetHostThreadId(core_id);
193 if (!is_multicore) { 216 if (!is_multicore) {
194 single_core_thread_id = std::this_thread::get_id(); 217 single_core_thread_id = this_id;
195 } 218 }
196 const std::thread::id this_id = std::this_thread::get_id();
197 const auto it = host_thread_ids.find(this_id);
198 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
199 ASSERT(it == host_thread_ids.end());
200 ASSERT(!registered_core_threads[core_id]);
201 host_thread_ids[this_id] = static_cast<u32>(core_id);
202 registered_core_threads.set(core_id);
203 } 219 }
204 220
221 /// Registers a new host thread by allocating a host thread ID for it
205 void RegisterHostThread() { 222 void RegisterHostThread() {
206 std::unique_lock lock{register_thread_mutex}; 223 [[maybe_unused]] const auto this_id = GetHostThreadId();
207 const std::thread::id this_id = std::this_thread::get_id();
208 const auto it = host_thread_ids.find(this_id);
209 if (it != host_thread_ids.end()) {
210 return;
211 }
212 host_thread_ids[this_id] = registered_thread_ids++;
213 } 224 }
214 225
215 u32 GetCurrentHostThreadID() const { 226 [[nodiscard]] u32 GetCurrentHostThreadID() {
216 const std::thread::id this_id = std::this_thread::get_id(); 227 const auto this_id = GetHostThreadId();
217 if (!is_multicore) { 228 if (!is_multicore && single_core_thread_id == this_id) {
218 if (single_core_thread_id == this_id) { 229 return static_cast<u32>(system.GetCpuManager().CurrentCore());
219 return static_cast<u32>(system.GetCpuManager().CurrentCore());
220 }
221 }
222 std::unique_lock lock{register_thread_mutex};
223 const auto it = host_thread_ids.find(this_id);
224 if (it == host_thread_ids.end()) {
225 return Core::INVALID_HOST_THREAD_ID;
226 } 230 }
227 return it->second; 231 return this_id;
228 } 232 }
229 233
230 Core::EmuThreadHandle GetCurrentEmuThreadID() const { 234 [[nodiscard]] Core::EmuThreadHandle GetCurrentEmuThreadID() {
231 Core::EmuThreadHandle result = Core::EmuThreadHandle::InvalidHandle(); 235 Core::EmuThreadHandle result = Core::EmuThreadHandle::InvalidHandle();
232 result.host_handle = GetCurrentHostThreadID(); 236 result.host_handle = GetCurrentHostThreadID();
233 if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) { 237 if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) {
234 return result; 238 return result;
235 } 239 }
236 const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler(); 240 const Kernel::KScheduler& sched = cores[result.host_handle].Scheduler();
237 const Kernel::Thread* current = sched.GetCurrentThread(); 241 const Kernel::Thread* current = sched.GetCurrentThread();
238 if (current != nullptr && !current->IsPhantomMode()) { 242 if (current != nullptr && !current->IsPhantomMode()) {
239 result.guest_handle = current->GetGlobalHandle(); 243 result.guest_handle = current->GetGlobalHandle();
@@ -302,7 +306,7 @@ struct KernelCore::Impl {
302 // Lists all processes that exist in the current session. 306 // Lists all processes that exist in the current session.
303 std::vector<std::shared_ptr<Process>> process_list; 307 std::vector<std::shared_ptr<Process>> process_list;
304 Process* current_process = nullptr; 308 Process* current_process = nullptr;
305 Kernel::GlobalScheduler global_scheduler; 309 std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
306 Kernel::Synchronization synchronization; 310 Kernel::Synchronization synchronization;
307 Kernel::TimeManager time_manager; 311 Kernel::TimeManager time_manager;
308 312
@@ -321,11 +325,8 @@ struct KernelCore::Impl {
321 std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor; 325 std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor;
322 std::vector<Kernel::PhysicalCore> cores; 326 std::vector<Kernel::PhysicalCore> cores;
323 327
324 // 0-3 IDs represent core threads, >3 represent others 328 // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
325 std::unordered_map<std::thread::id, u32> host_thread_ids; 329 std::atomic<u32> next_host_thread_id{Core::Hardware::NUM_CPU_CORES};
326 u32 registered_thread_ids{Core::Hardware::NUM_CPU_CORES};
327 std::bitset<Core::Hardware::NUM_CPU_CORES> registered_core_threads;
328 mutable std::mutex register_thread_mutex;
329 330
330 // Kernel memory management 331 // Kernel memory management
331 std::unique_ptr<Memory::MemoryManager> memory_manager; 332 std::unique_ptr<Memory::MemoryManager> memory_manager;
@@ -337,12 +338,19 @@ struct KernelCore::Impl {
337 std::shared_ptr<Kernel::SharedMemory> irs_shared_mem; 338 std::shared_ptr<Kernel::SharedMemory> irs_shared_mem;
338 std::shared_ptr<Kernel::SharedMemory> time_shared_mem; 339 std::shared_ptr<Kernel::SharedMemory> time_shared_mem;
339 340
341 // Threads used for services
342 std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
343
344 // Service threads are managed by a worker thread, so that a calling service thread can queue up
345 // the release of itself
346 std::unique_ptr<Common::ThreadWorker> service_thread_manager;
347
340 std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; 348 std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{};
341 std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{}; 349 std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
342 std::array<std::unique_ptr<Kernel::Scheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; 350 std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
343 351
344 bool is_multicore{}; 352 bool is_multicore{};
345 std::thread::id single_core_thread_id{}; 353 u32 single_core_thread_id{};
346 354
347 std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{}; 355 std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{};
348 356
@@ -363,6 +371,10 @@ void KernelCore::Initialize() {
363 impl->Initialize(*this); 371 impl->Initialize(*this);
364} 372}
365 373
374void KernelCore::InitializeCores() {
375 impl->InitializeCores();
376}
377
366void KernelCore::Shutdown() { 378void KernelCore::Shutdown() {
367 impl->Shutdown(); 379 impl->Shutdown();
368} 380}
@@ -395,19 +407,19 @@ const std::vector<std::shared_ptr<Process>>& KernelCore::GetProcessList() const
395 return impl->process_list; 407 return impl->process_list;
396} 408}
397 409
398Kernel::GlobalScheduler& KernelCore::GlobalScheduler() { 410Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() {
399 return impl->global_scheduler; 411 return *impl->global_scheduler_context;
400} 412}
401 413
402const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const { 414const Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() const {
403 return impl->global_scheduler; 415 return *impl->global_scheduler_context;
404} 416}
405 417
406Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) { 418Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) {
407 return *impl->schedulers[id]; 419 return *impl->schedulers[id];
408} 420}
409 421
410const Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) const { 422const Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) const {
411 return *impl->schedulers[id]; 423 return *impl->schedulers[id];
412} 424}
413 425
@@ -431,16 +443,13 @@ const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
431 return impl->cores[core_id]; 443 return impl->cores[core_id];
432} 444}
433 445
434Kernel::Scheduler& KernelCore::CurrentScheduler() { 446Kernel::KScheduler* KernelCore::CurrentScheduler() {
435 u32 core_id = impl->GetCurrentHostThreadID(); 447 u32 core_id = impl->GetCurrentHostThreadID();
436 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); 448 if (core_id >= Core::Hardware::NUM_CPU_CORES) {
437 return *impl->schedulers[core_id]; 449 // This is expected when called from not a guest thread
438} 450 return {};
439 451 }
440const Kernel::Scheduler& KernelCore::CurrentScheduler() const { 452 return impl->schedulers[core_id].get();
441 u32 core_id = impl->GetCurrentHostThreadID();
442 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
443 return *impl->schedulers[core_id];
444} 453}
445 454
446std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() { 455std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() {
@@ -477,12 +486,17 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const {
477} 486}
478 487
479void KernelCore::InvalidateAllInstructionCaches() { 488void KernelCore::InvalidateAllInstructionCaches() {
480 auto& threads = GlobalScheduler().GetThreadList(); 489 for (auto& physical_core : impl->cores) {
481 for (auto& thread : threads) { 490 physical_core.ArmInterface().ClearInstructionCache();
482 if (!thread->IsHLEThread()) { 491 }
483 auto& arm_interface = thread->ArmInterface(); 492}
484 arm_interface.ClearInstructionCache(); 493
494void KernelCore::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
495 for (auto& physical_core : impl->cores) {
496 if (!physical_core.IsInitialized()) {
497 continue;
485 } 498 }
499 physical_core.ArmInterface().InvalidateCacheRange(addr, size);
486 } 500 }
487} 501}
488 502
@@ -598,7 +612,7 @@ const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const {
598void KernelCore::Suspend(bool in_suspention) { 612void KernelCore::Suspend(bool in_suspention) {
599 const bool should_suspend = exception_exited || in_suspention; 613 const bool should_suspend = exception_exited || in_suspention;
600 { 614 {
601 SchedulerLock lock(*this); 615 KScopedSchedulerLock lock(*this);
602 ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep; 616 ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep;
603 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 617 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
604 impl->suspend_threads[i]->SetStatus(status); 618 impl->suspend_threads[i]->SetStatus(status);
@@ -625,4 +639,19 @@ void KernelCore::ExitSVCProfile() {
625 MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]); 639 MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
626} 640}
627 641
642std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
643 auto service_thread = std::make_shared<Kernel::ServiceThread>(*this, 1, name);
644 impl->service_thread_manager->QueueWork(
645 [this, service_thread] { impl->service_threads.emplace(service_thread); });
646 return service_thread;
647}
648
649void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) {
650 impl->service_thread_manager->QueueWork([this, service_thread] {
651 if (auto strong_ptr = service_thread.lock()) {
652 impl->service_threads.erase(strong_ptr);
653 }
654 });
655}
656
628} // namespace Kernel 657} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 16285c3f0..e3169f5a7 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -35,13 +35,14 @@ class SlabHeap;
35 35
36class AddressArbiter; 36class AddressArbiter;
37class ClientPort; 37class ClientPort;
38class GlobalScheduler; 38class GlobalSchedulerContext;
39class HandleTable; 39class HandleTable;
40class PhysicalCore; 40class PhysicalCore;
41class Process; 41class Process;
42class ResourceLimit; 42class ResourceLimit;
43class Scheduler; 43class KScheduler;
44class SharedMemory; 44class SharedMemory;
45class ServiceThread;
45class Synchronization; 46class Synchronization;
46class Thread; 47class Thread;
47class TimeManager; 48class TimeManager;
@@ -74,6 +75,9 @@ public:
74 /// Resets the kernel to a clean slate for use. 75 /// Resets the kernel to a clean slate for use.
75 void Initialize(); 76 void Initialize();
76 77
78 /// Initializes the CPU cores.
79 void InitializeCores();
80
77 /// Clears all resources in use by the kernel instance. 81 /// Clears all resources in use by the kernel instance.
78 void Shutdown(); 82 void Shutdown();
79 83
@@ -99,16 +103,16 @@ public:
99 const std::vector<std::shared_ptr<Process>>& GetProcessList() const; 103 const std::vector<std::shared_ptr<Process>>& GetProcessList() const;
100 104
101 /// Gets the sole instance of the global scheduler 105 /// Gets the sole instance of the global scheduler
102 Kernel::GlobalScheduler& GlobalScheduler(); 106 Kernel::GlobalSchedulerContext& GlobalSchedulerContext();
103 107
104 /// Gets the sole instance of the global scheduler 108 /// Gets the sole instance of the global scheduler
105 const Kernel::GlobalScheduler& GlobalScheduler() const; 109 const Kernel::GlobalSchedulerContext& GlobalSchedulerContext() const;
106 110
107 /// Gets the sole instance of the Scheduler assoviated with cpu core 'id' 111 /// Gets the sole instance of the Scheduler assoviated with cpu core 'id'
108 Kernel::Scheduler& Scheduler(std::size_t id); 112 Kernel::KScheduler& Scheduler(std::size_t id);
109 113
110 /// Gets the sole instance of the Scheduler assoviated with cpu core 'id' 114 /// Gets the sole instance of the Scheduler assoviated with cpu core 'id'
111 const Kernel::Scheduler& Scheduler(std::size_t id) const; 115 const Kernel::KScheduler& Scheduler(std::size_t id) const;
112 116
113 /// Gets the an instance of the respective physical CPU core. 117 /// Gets the an instance of the respective physical CPU core.
114 Kernel::PhysicalCore& PhysicalCore(std::size_t id); 118 Kernel::PhysicalCore& PhysicalCore(std::size_t id);
@@ -117,10 +121,7 @@ public:
117 const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; 121 const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
118 122
119 /// Gets the sole instance of the Scheduler at the current running core. 123 /// Gets the sole instance of the Scheduler at the current running core.
120 Kernel::Scheduler& CurrentScheduler(); 124 Kernel::KScheduler* CurrentScheduler();
121
122 /// Gets the sole instance of the Scheduler at the current running core.
123 const Kernel::Scheduler& CurrentScheduler() const;
124 125
125 /// Gets the an instance of the current physical CPU core. 126 /// Gets the an instance of the current physical CPU core.
126 Kernel::PhysicalCore& CurrentPhysicalCore(); 127 Kernel::PhysicalCore& CurrentPhysicalCore();
@@ -153,6 +154,8 @@ public:
153 154
154 void InvalidateAllInstructionCaches(); 155 void InvalidateAllInstructionCaches();
155 156
157 void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
158
156 /// Adds a port to the named port table 159 /// Adds a port to the named port table
157 void AddNamedPort(std::string name, std::shared_ptr<ClientPort> port); 160 void AddNamedPort(std::string name, std::shared_ptr<ClientPort> port);
158 161
@@ -225,6 +228,22 @@ public:
225 228
226 void ExitSVCProfile(); 229 void ExitSVCProfile();
227 230
231 /**
232 * Creates an HLE service thread, which are used to execute service routines asynchronously.
233 * While these are allocated per ServerSession, these need to be owned and managed outside of
234 * ServerSession to avoid a circular dependency.
235 * @param name String name for the ServerSession creating this thread, used for debug purposes.
236 * @returns The a weak pointer newly created service thread.
237 */
238 std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name);
239
240 /**
241 * Releases a HLE service thread, instructing KernelCore to free it. This should be called when
242 * the ServerSession associated with the thread is destroyed.
243 * @param service_thread Service thread to release.
244 */
245 void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread);
246
228private: 247private:
229 friend class Object; 248 friend class Object;
230 friend class Process; 249 friend class Process;
diff --git a/src/core/hle/kernel/memory/address_space_info.cpp b/src/core/hle/kernel/memory/address_space_info.cpp
index e4288cab4..6cf43ba24 100644
--- a/src/core/hle/kernel/memory/address_space_info.cpp
+++ b/src/core/hle/kernel/memory/address_space_info.cpp
@@ -96,6 +96,7 @@ u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
96 return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address; 96 return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
97 } 97 }
98 UNREACHABLE(); 98 UNREACHABLE();
99 return 0;
99} 100}
100 101
101std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) { 102std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) {
@@ -112,6 +113,7 @@ std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type)
112 return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size; 113 return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
113 } 114 }
114 UNREACHABLE(); 115 UNREACHABLE();
116 return 0;
115} 117}
116 118
117} // namespace Kernel::Memory 119} // namespace Kernel::Memory
diff --git a/src/core/hle/kernel/memory/memory_block.h b/src/core/hle/kernel/memory/memory_block.h
index 9d7839d08..83acece1e 100644
--- a/src/core/hle/kernel/memory/memory_block.h
+++ b/src/core/hle/kernel/memory/memory_block.h
@@ -73,12 +73,12 @@ enum class MemoryState : u32 {
73 ThreadLocal = 73 ThreadLocal =
74 static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted, 74 static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted,
75 75
76 Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc | 76 Transferred = static_cast<u32>(Svc::MemoryState::Transferred) | FlagsMisc |
77 FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | 77 FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc |
78 FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, 78 FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
79 79
80 SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransfered) | FlagsMisc | 80 SharedTransferred = static_cast<u32>(Svc::MemoryState::SharedTransferred) | FlagsMisc |
81 FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, 81 FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
82 82
83 SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped | 83 SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped |
84 FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, 84 FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc,
@@ -111,8 +111,8 @@ static_assert(static_cast<u32>(MemoryState::AliasCodeData) == 0x03FFBD09);
111static_assert(static_cast<u32>(MemoryState::Ipc) == 0x005C3C0A); 111static_assert(static_cast<u32>(MemoryState::Ipc) == 0x005C3C0A);
112static_assert(static_cast<u32>(MemoryState::Stack) == 0x005C3C0B); 112static_assert(static_cast<u32>(MemoryState::Stack) == 0x005C3C0B);
113static_assert(static_cast<u32>(MemoryState::ThreadLocal) == 0x0040200C); 113static_assert(static_cast<u32>(MemoryState::ThreadLocal) == 0x0040200C);
114static_assert(static_cast<u32>(MemoryState::Transfered) == 0x015C3C0D); 114static_assert(static_cast<u32>(MemoryState::Transferred) == 0x015C3C0D);
115static_assert(static_cast<u32>(MemoryState::SharedTransfered) == 0x005C380E); 115static_assert(static_cast<u32>(MemoryState::SharedTransferred) == 0x005C380E);
116static_assert(static_cast<u32>(MemoryState::SharedCode) == 0x0040380F); 116static_assert(static_cast<u32>(MemoryState::SharedCode) == 0x0040380F);
117static_assert(static_cast<u32>(MemoryState::Inaccessible) == 0x00000010); 117static_assert(static_cast<u32>(MemoryState::Inaccessible) == 0x00000010);
118static_assert(static_cast<u32>(MemoryState::NonSecureIpc) == 0x005C3811); 118static_assert(static_cast<u32>(MemoryState::NonSecureIpc) == 0x005C3811);
@@ -222,9 +222,9 @@ public:
222 222
223public: 223public:
224 constexpr MemoryBlock() = default; 224 constexpr MemoryBlock() = default;
225 constexpr MemoryBlock(VAddr addr, std::size_t num_pages, MemoryState state, 225 constexpr MemoryBlock(VAddr addr_, std::size_t num_pages_, MemoryState state_,
226 MemoryPermission perm, MemoryAttribute attribute) 226 MemoryPermission perm_, MemoryAttribute attribute_)
227 : addr{addr}, num_pages(num_pages), state{state}, perm{perm}, attribute{attribute} {} 227 : addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {}
228 228
229 constexpr VAddr GetAddress() const { 229 constexpr VAddr GetAddress() const {
230 return addr; 230 return addr;
diff --git a/src/core/hle/kernel/memory/memory_block_manager.h b/src/core/hle/kernel/memory/memory_block_manager.h
index 6e1d41075..f57d1bbcc 100644
--- a/src/core/hle/kernel/memory/memory_block_manager.h
+++ b/src/core/hle/kernel/memory/memory_block_manager.h
@@ -57,8 +57,8 @@ public:
57private: 57private:
58 void MergeAdjacent(iterator it, iterator& next_it); 58 void MergeAdjacent(iterator it, iterator& next_it);
59 59
60 const VAddr start_addr; 60 [[maybe_unused]] const VAddr start_addr;
61 const VAddr end_addr; 61 [[maybe_unused]] const VAddr end_addr;
62 62
63 MemoryBlockTree memory_block_tree; 63 MemoryBlockTree memory_block_tree;
64}; 64};
diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp
index a3fadb533..080886554 100644
--- a/src/core/hle/kernel/memory/page_table.cpp
+++ b/src/core/hle/kernel/memory/page_table.cpp
@@ -265,7 +265,7 @@ ResultCode PageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_t
265 physical_memory_usage = 0; 265 physical_memory_usage = 0;
266 memory_pool = pool; 266 memory_pool = pool;
267 267
268 page_table_impl.Resize(address_space_width, PageBits, true); 268 page_table_impl.Resize(address_space_width, PageBits);
269 269
270 return InitializeMemoryLayout(start, end); 270 return InitializeMemoryLayout(start, end);
271} 271}
@@ -670,6 +670,11 @@ ResultCode PageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size, Memo
670 return RESULT_SUCCESS; 670 return RESULT_SUCCESS;
671 } 671 }
672 672
673 if ((prev_perm & MemoryPermission::Execute) != (perm & MemoryPermission::Execute)) {
674 // Memory execution state is changing, invalidate CPU cache range
675 system.InvalidateCpuInstructionCacheRange(addr, size);
676 }
677
673 const std::size_t num_pages{size / PageSize}; 678 const std::size_t num_pages{size / PageSize};
674 const OperationType operation{(perm & MemoryPermission::Execute) != MemoryPermission::None 679 const OperationType operation{(perm & MemoryPermission::Execute) != MemoryPermission::None
675 ? OperationType::ChangePermissionsAndRefresh 680 ? OperationType::ChangePermissionsAndRefresh
@@ -1002,8 +1007,8 @@ constexpr VAddr PageTable::GetRegionAddress(MemoryState state) const {
1002 case MemoryState::Shared: 1007 case MemoryState::Shared:
1003 case MemoryState::AliasCode: 1008 case MemoryState::AliasCode:
1004 case MemoryState::AliasCodeData: 1009 case MemoryState::AliasCodeData:
1005 case MemoryState::Transfered: 1010 case MemoryState::Transferred:
1006 case MemoryState::SharedTransfered: 1011 case MemoryState::SharedTransferred:
1007 case MemoryState::SharedCode: 1012 case MemoryState::SharedCode:
1008 case MemoryState::GeneratedCode: 1013 case MemoryState::GeneratedCode:
1009 case MemoryState::CodeOut: 1014 case MemoryState::CodeOut:
@@ -1037,8 +1042,8 @@ constexpr std::size_t PageTable::GetRegionSize(MemoryState state) const {
1037 case MemoryState::Shared: 1042 case MemoryState::Shared:
1038 case MemoryState::AliasCode: 1043 case MemoryState::AliasCode:
1039 case MemoryState::AliasCodeData: 1044 case MemoryState::AliasCodeData:
1040 case MemoryState::Transfered: 1045 case MemoryState::Transferred:
1041 case MemoryState::SharedTransfered: 1046 case MemoryState::SharedTransferred:
1042 case MemoryState::SharedCode: 1047 case MemoryState::SharedCode:
1043 case MemoryState::GeneratedCode: 1048 case MemoryState::GeneratedCode:
1044 case MemoryState::CodeOut: 1049 case MemoryState::CodeOut:
@@ -1075,8 +1080,8 @@ constexpr bool PageTable::CanContain(VAddr addr, std::size_t size, MemoryState s
1075 case MemoryState::AliasCodeData: 1080 case MemoryState::AliasCodeData:
1076 case MemoryState::Stack: 1081 case MemoryState::Stack:
1077 case MemoryState::ThreadLocal: 1082 case MemoryState::ThreadLocal:
1078 case MemoryState::Transfered: 1083 case MemoryState::Transferred:
1079 case MemoryState::SharedTransfered: 1084 case MemoryState::SharedTransferred:
1080 case MemoryState::SharedCode: 1085 case MemoryState::SharedCode:
1081 case MemoryState::GeneratedCode: 1086 case MemoryState::GeneratedCode:
1082 case MemoryState::CodeOut: 1087 case MemoryState::CodeOut:
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
index 8f6c944d1..4f8075e0e 100644
--- a/src/core/hle/kernel/mutex.cpp
+++ b/src/core/hle/kernel/mutex.cpp
@@ -11,11 +11,11 @@
11#include "core/core.h" 11#include "core/core.h"
12#include "core/hle/kernel/errors.h" 12#include "core/hle/kernel/errors.h"
13#include "core/hle/kernel/handle_table.h" 13#include "core/hle/kernel/handle_table.h"
14#include "core/hle/kernel/k_scheduler.h"
14#include "core/hle/kernel/kernel.h" 15#include "core/hle/kernel/kernel.h"
15#include "core/hle/kernel/mutex.h" 16#include "core/hle/kernel/mutex.h"
16#include "core/hle/kernel/object.h" 17#include "core/hle/kernel/object.h"
17#include "core/hle/kernel/process.h" 18#include "core/hle/kernel/process.h"
18#include "core/hle/kernel/scheduler.h"
19#include "core/hle/kernel/thread.h" 19#include "core/hle/kernel/thread.h"
20#include "core/hle/result.h" 20#include "core/hle/result.h"
21#include "core/memory.h" 21#include "core/memory.h"
@@ -73,9 +73,9 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
73 73
74 auto& kernel = system.Kernel(); 74 auto& kernel = system.Kernel();
75 std::shared_ptr<Thread> current_thread = 75 std::shared_ptr<Thread> current_thread =
76 SharedFrom(kernel.CurrentScheduler().GetCurrentThread()); 76 SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
77 { 77 {
78 SchedulerLock lock(kernel); 78 KScopedSchedulerLock lock(kernel);
79 // The mutex address must be 4-byte aligned 79 // The mutex address must be 4-byte aligned
80 if ((address % sizeof(u32)) != 0) { 80 if ((address % sizeof(u32)) != 0) {
81 return ERR_INVALID_ADDRESS; 81 return ERR_INVALID_ADDRESS;
@@ -114,7 +114,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
114 } 114 }
115 115
116 { 116 {
117 SchedulerLock lock(kernel); 117 KScopedSchedulerLock lock(kernel);
118 auto* owner = current_thread->GetLockOwner(); 118 auto* owner = current_thread->GetLockOwner();
119 if (owner != nullptr) { 119 if (owner != nullptr) {
120 owner->RemoveMutexWaiter(current_thread); 120 owner->RemoveMutexWaiter(current_thread);
@@ -153,10 +153,10 @@ std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thr
153 153
154ResultCode Mutex::Release(VAddr address) { 154ResultCode Mutex::Release(VAddr address) {
155 auto& kernel = system.Kernel(); 155 auto& kernel = system.Kernel();
156 SchedulerLock lock(kernel); 156 KScopedSchedulerLock lock(kernel);
157 157
158 std::shared_ptr<Thread> current_thread = 158 std::shared_ptr<Thread> current_thread =
159 SharedFrom(kernel.CurrentScheduler().GetCurrentThread()); 159 SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
160 160
161 auto [result, new_owner] = Unlock(current_thread, address); 161 auto [result, new_owner] = Unlock(current_thread, address);
162 162
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index c6bbdb080..7fea45f96 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -2,54 +2,60 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "common/assert.h"
6#include "common/logging/log.h"
7#include "common/spin_lock.h" 5#include "common/spin_lock.h"
8#include "core/arm/arm_interface.h" 6#include "core/arm/cpu_interrupt_handler.h"
9#ifdef ARCHITECTURE_x86_64
10#include "core/arm/dynarmic/arm_dynarmic_32.h" 7#include "core/arm/dynarmic/arm_dynarmic_32.h"
11#include "core/arm/dynarmic/arm_dynarmic_64.h" 8#include "core/arm/dynarmic/arm_dynarmic_64.h"
12#endif
13#include "core/arm/cpu_interrupt_handler.h"
14#include "core/arm/exclusive_monitor.h"
15#include "core/arm/unicorn/arm_unicorn.h"
16#include "core/core.h" 9#include "core/core.h"
10#include "core/hle/kernel/k_scheduler.h"
11#include "core/hle/kernel/kernel.h"
17#include "core/hle/kernel/physical_core.h" 12#include "core/hle/kernel/physical_core.h"
18#include "core/hle/kernel/scheduler.h"
19#include "core/hle/kernel/thread.h"
20 13
21namespace Kernel { 14namespace Kernel {
22 15
23PhysicalCore::PhysicalCore(Core::System& system, std::size_t id, Kernel::Scheduler& scheduler, 16PhysicalCore::PhysicalCore(std::size_t core_index, Core::System& system,
24 Core::CPUInterruptHandler& interrupt_handler) 17 Kernel::KScheduler& scheduler, Core::CPUInterrupts& interrupts)
25 : interrupt_handler{interrupt_handler}, core_index{id}, scheduler{scheduler} { 18 : core_index{core_index}, system{system}, scheduler{scheduler},
26 19 interrupts{interrupts}, guard{std::make_unique<Common::SpinLock>()} {}
27 guard = std::make_unique<Common::SpinLock>();
28}
29 20
30PhysicalCore::~PhysicalCore() = default; 21PhysicalCore::~PhysicalCore() = default;
31 22
32void PhysicalCore::Idle() { 23void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) {
33 interrupt_handler.AwaitInterrupt(); 24#ifdef ARCHITECTURE_x86_64
25 auto& kernel = system.Kernel();
26 if (is_64_bit) {
27 arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
28 system, interrupts, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
29 } else {
30 arm_interface = std::make_unique<Core::ARM_Dynarmic_32>(
31 system, interrupts, kernel.IsMulticore(), kernel.GetExclusiveMonitor(), core_index);
32 }
33#else
34#error Platform not supported yet.
35#endif
34} 36}
35 37
36void PhysicalCore::Shutdown() { 38void PhysicalCore::Run() {
37 scheduler.Shutdown(); 39 arm_interface->Run();
40}
41
42void PhysicalCore::Idle() {
43 interrupts[core_index].AwaitInterrupt();
38} 44}
39 45
40bool PhysicalCore::IsInterrupted() const { 46bool PhysicalCore::IsInterrupted() const {
41 return interrupt_handler.IsInterrupted(); 47 return interrupts[core_index].IsInterrupted();
42} 48}
43 49
44void PhysicalCore::Interrupt() { 50void PhysicalCore::Interrupt() {
45 guard->lock(); 51 guard->lock();
46 interrupt_handler.SetInterrupt(true); 52 interrupts[core_index].SetInterrupt(true);
47 guard->unlock(); 53 guard->unlock();
48} 54}
49 55
50void PhysicalCore::ClearInterrupt() { 56void PhysicalCore::ClearInterrupt() {
51 guard->lock(); 57 guard->lock();
52 interrupt_handler.SetInterrupt(false); 58 interrupts[core_index].SetInterrupt(false);
53 guard->unlock(); 59 guard->unlock();
54} 60}
55 61
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h
index d7a7a951c..f2b0911aa 100644
--- a/src/core/hle/kernel/physical_core.h
+++ b/src/core/hle/kernel/physical_core.h
@@ -4,19 +4,21 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <array>
7#include <cstddef> 8#include <cstddef>
8#include <memory> 9#include <memory>
9 10
11#include "core/arm/arm_interface.h"
12
10namespace Common { 13namespace Common {
11class SpinLock; 14class SpinLock;
12} 15}
13 16
14namespace Kernel { 17namespace Kernel {
15class Scheduler; 18class KScheduler;
16} // namespace Kernel 19} // namespace Kernel
17 20
18namespace Core { 21namespace Core {
19class ARM_Interface;
20class CPUInterruptHandler; 22class CPUInterruptHandler;
21class ExclusiveMonitor; 23class ExclusiveMonitor;
22class System; 24class System;
@@ -26,17 +28,24 @@ namespace Kernel {
26 28
27class PhysicalCore { 29class PhysicalCore {
28public: 30public:
29 PhysicalCore(Core::System& system, std::size_t id, Kernel::Scheduler& scheduler, 31 PhysicalCore(std::size_t core_index, Core::System& system, Kernel::KScheduler& scheduler,
30 Core::CPUInterruptHandler& interrupt_handler); 32 Core::CPUInterrupts& interrupts);
31 ~PhysicalCore(); 33 ~PhysicalCore();
32 34
33 PhysicalCore(const PhysicalCore&) = delete; 35 PhysicalCore(const PhysicalCore&) = delete;
34 PhysicalCore& operator=(const PhysicalCore&) = delete; 36 PhysicalCore& operator=(const PhysicalCore&) = delete;
35 37
36 PhysicalCore(PhysicalCore&&) = default; 38 PhysicalCore(PhysicalCore&&) = default;
37 PhysicalCore& operator=(PhysicalCore&&) = default; 39 PhysicalCore& operator=(PhysicalCore&&) = delete;
40
41 /// Initialize the core for the specified parameters.
42 void Initialize(bool is_64_bit);
43
44 /// Execute current jit state
45 void Run();
38 46
39 void Idle(); 47 void Idle();
48
40 /// Interrupt this physical core. 49 /// Interrupt this physical core.
41 void Interrupt(); 50 void Interrupt();
42 51
@@ -46,8 +55,17 @@ public:
46 /// Check if this core is interrupted 55 /// Check if this core is interrupted
47 bool IsInterrupted() const; 56 bool IsInterrupted() const;
48 57
49 // Shutdown this physical core. 58 bool IsInitialized() const {
50 void Shutdown(); 59 return arm_interface != nullptr;
60 }
61
62 Core::ARM_Interface& ArmInterface() {
63 return *arm_interface;
64 }
65
66 const Core::ARM_Interface& ArmInterface() const {
67 return *arm_interface;
68 }
51 69
52 bool IsMainCore() const { 70 bool IsMainCore() const {
53 return core_index == 0; 71 return core_index == 0;
@@ -61,19 +79,21 @@ public:
61 return core_index; 79 return core_index;
62 } 80 }
63 81
64 Kernel::Scheduler& Scheduler() { 82 Kernel::KScheduler& Scheduler() {
65 return scheduler; 83 return scheduler;
66 } 84 }
67 85
68 const Kernel::Scheduler& Scheduler() const { 86 const Kernel::KScheduler& Scheduler() const {
69 return scheduler; 87 return scheduler;
70 } 88 }
71 89
72private: 90private:
73 Core::CPUInterruptHandler& interrupt_handler; 91 const std::size_t core_index;
74 std::size_t core_index; 92 Core::System& system;
75 Kernel::Scheduler& scheduler; 93 Kernel::KScheduler& scheduler;
94 Core::CPUInterrupts& interrupts;
76 std::unique_ptr<Common::SpinLock> guard; 95 std::unique_ptr<Common::SpinLock> guard;
96 std::unique_ptr<Core::ARM_Interface> arm_interface;
77}; 97};
78 98
79} // namespace Kernel 99} // namespace Kernel
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index ff9d9248b..b905b486a 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -4,6 +4,7 @@
4 4
5#include <algorithm> 5#include <algorithm>
6#include <bitset> 6#include <bitset>
7#include <ctime>
7#include <memory> 8#include <memory>
8#include <random> 9#include <random>
9#include "common/alignment.h" 10#include "common/alignment.h"
@@ -14,13 +15,13 @@
14#include "core/file_sys/program_metadata.h" 15#include "core/file_sys/program_metadata.h"
15#include "core/hle/kernel/code_set.h" 16#include "core/hle/kernel/code_set.h"
16#include "core/hle/kernel/errors.h" 17#include "core/hle/kernel/errors.h"
18#include "core/hle/kernel/k_scheduler.h"
17#include "core/hle/kernel/kernel.h" 19#include "core/hle/kernel/kernel.h"
18#include "core/hle/kernel/memory/memory_block_manager.h" 20#include "core/hle/kernel/memory/memory_block_manager.h"
19#include "core/hle/kernel/memory/page_table.h" 21#include "core/hle/kernel/memory/page_table.h"
20#include "core/hle/kernel/memory/slab_heap.h" 22#include "core/hle/kernel/memory/slab_heap.h"
21#include "core/hle/kernel/process.h" 23#include "core/hle/kernel/process.h"
22#include "core/hle/kernel/resource_limit.h" 24#include "core/hle/kernel/resource_limit.h"
23#include "core/hle/kernel/scheduler.h"
24#include "core/hle/kernel/thread.h" 25#include "core/hle/kernel/thread.h"
25#include "core/hle/lock.h" 26#include "core/hle/lock.h"
26#include "core/memory.h" 27#include "core/memory.h"
@@ -53,7 +54,7 @@ void SetupMainThread(Core::System& system, Process& owner_process, u32 priority,
53 auto& kernel = system.Kernel(); 54 auto& kernel = system.Kernel();
54 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires 55 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
55 { 56 {
56 SchedulerLock lock{kernel}; 57 KScopedSchedulerLock lock{kernel};
57 thread->SetStatus(ThreadStatus::Ready); 58 thread->SetStatus(ThreadStatus::Ready);
58 } 59 }
59} 60}
@@ -123,7 +124,7 @@ std::shared_ptr<Process> Process::Create(Core::System& system, std::string name,
123 : kernel.CreateNewUserProcessID(); 124 : kernel.CreateNewUserProcessID();
124 process->capabilities.InitializeForMetadatalessProcess(); 125 process->capabilities.InitializeForMetadatalessProcess();
125 126
126 std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(0)); 127 std::mt19937 rng(Settings::values.rng_seed.GetValue().value_or(std::time(nullptr)));
127 std::uniform_int_distribution<u64> distribution; 128 std::uniform_int_distribution<u64> distribution;
128 std::generate(process->random_entropy.begin(), process->random_entropy.end(), 129 std::generate(process->random_entropy.begin(), process->random_entropy.end(),
129 [&] { return distribution(rng); }); 130 [&] { return distribution(rng); });
@@ -212,7 +213,7 @@ void Process::UnregisterThread(const Thread* thread) {
212} 213}
213 214
214ResultCode Process::ClearSignalState() { 215ResultCode Process::ClearSignalState() {
215 SchedulerLock lock(system.Kernel()); 216 KScopedSchedulerLock lock(system.Kernel());
216 if (status == ProcessStatus::Exited) { 217 if (status == ProcessStatus::Exited) {
217 LOG_ERROR(Kernel, "called on a terminated process instance."); 218 LOG_ERROR(Kernel, "called on a terminated process instance.");
218 return ERR_INVALID_STATE; 219 return ERR_INVALID_STATE;
@@ -313,7 +314,7 @@ void Process::PrepareForTermination() {
313 if (thread->GetOwnerProcess() != this) 314 if (thread->GetOwnerProcess() != this)
314 continue; 315 continue;
315 316
316 if (thread.get() == system.CurrentScheduler().GetCurrentThread()) 317 if (thread.get() == kernel.CurrentScheduler()->GetCurrentThread())
317 continue; 318 continue;
318 319
319 // TODO(Subv): When are the other running/ready threads terminated? 320 // TODO(Subv): When are the other running/ready threads terminated?
@@ -324,7 +325,7 @@ void Process::PrepareForTermination() {
324 } 325 }
325 }; 326 };
326 327
327 stop_threads(system.GlobalScheduler().GetThreadList()); 328 stop_threads(system.GlobalSchedulerContext().GetThreadList());
328 329
329 FreeTLSRegion(tls_region_address); 330 FreeTLSRegion(tls_region_address);
330 tls_region_address = 0; 331 tls_region_address = 0;
@@ -346,7 +347,7 @@ static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
346} 347}
347 348
348VAddr Process::CreateTLSRegion() { 349VAddr Process::CreateTLSRegion() {
349 SchedulerLock lock(system.Kernel()); 350 KScopedSchedulerLock lock(system.Kernel());
350 if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)}; 351 if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
351 tls_page_iter != tls_pages.cend()) { 352 tls_page_iter != tls_pages.cend()) {
352 return *tls_page_iter->ReserveSlot(); 353 return *tls_page_iter->ReserveSlot();
@@ -377,7 +378,7 @@ VAddr Process::CreateTLSRegion() {
377} 378}
378 379
379void Process::FreeTLSRegion(VAddr tls_address) { 380void Process::FreeTLSRegion(VAddr tls_address) {
380 SchedulerLock lock(system.Kernel()); 381 KScopedSchedulerLock lock(system.Kernel());
381 const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE); 382 const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
382 auto iter = 383 auto iter =
383 std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) { 384 std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index f45cb5674..e412e58aa 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -216,6 +216,16 @@ public:
216 total_process_running_time_ticks += ticks; 216 total_process_running_time_ticks += ticks;
217 } 217 }
218 218
219 /// Gets the process schedule count, used for thread yelding
220 s64 GetScheduledCount() const {
221 return schedule_count;
222 }
223
224 /// Increments the process schedule count, used for thread yielding.
225 void IncrementScheduledCount() {
226 ++schedule_count;
227 }
228
219 /// Gets 8 bytes of random data for svcGetInfo RandomEntropy 229 /// Gets 8 bytes of random data for svcGetInfo RandomEntropy
220 u64 GetRandomEntropy(std::size_t index) const { 230 u64 GetRandomEntropy(std::size_t index) const {
221 return random_entropy.at(index); 231 return random_entropy.at(index);
@@ -397,6 +407,9 @@ private:
397 /// Name of this process 407 /// Name of this process
398 std::string name; 408 std::string name;
399 409
410 /// Schedule count of this process
411 s64 schedule_count{};
412
400 /// System context 413 /// System context
401 Core::System& system; 414 Core::System& system;
402}; 415};
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp
index 63880f13d..0f128c586 100644
--- a/src/core/hle/kernel/process_capability.cpp
+++ b/src/core/hle/kernel/process_capability.cpp
@@ -199,7 +199,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
199 break; 199 break;
200 } 200 }
201 201
202 LOG_ERROR(Kernel, "Invalid capability type! type={}", static_cast<u32>(type)); 202 LOG_ERROR(Kernel, "Invalid capability type! type={}", type);
203 return ERR_INVALID_CAPABILITY_DESCRIPTOR; 203 return ERR_INVALID_CAPABILITY_DESCRIPTOR;
204} 204}
205 205
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index 6e286419e..cea262ce0 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -6,10 +6,10 @@
6#include "common/assert.h" 6#include "common/assert.h"
7#include "common/logging/log.h" 7#include "common/logging/log.h"
8#include "core/hle/kernel/errors.h" 8#include "core/hle/kernel/errors.h"
9#include "core/hle/kernel/k_scheduler.h"
9#include "core/hle/kernel/kernel.h" 10#include "core/hle/kernel/kernel.h"
10#include "core/hle/kernel/object.h" 11#include "core/hle/kernel/object.h"
11#include "core/hle/kernel/readable_event.h" 12#include "core/hle/kernel/readable_event.h"
12#include "core/hle/kernel/scheduler.h"
13#include "core/hle/kernel/thread.h" 13#include "core/hle/kernel/thread.h"
14 14
15namespace Kernel { 15namespace Kernel {
@@ -39,7 +39,7 @@ void ReadableEvent::Clear() {
39} 39}
40 40
41ResultCode ReadableEvent::Reset() { 41ResultCode ReadableEvent::Reset() {
42 SchedulerLock lock(kernel); 42 KScopedSchedulerLock lock(kernel);
43 if (!is_signaled) { 43 if (!is_signaled) {
44 LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}", 44 LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}",
45 GetObjectId(), GetTypeName(), GetName()); 45 GetObjectId(), GetTypeName(), GetName());
diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp
index 212e442f4..7bf50339d 100644
--- a/src/core/hle/kernel/resource_limit.cpp
+++ b/src/core/hle/kernel/resource_limit.cpp
@@ -65,8 +65,8 @@ ResultCode ResourceLimit::SetLimitValue(ResourceType resource, s64 value) {
65 limit[index] = value; 65 limit[index] = value;
66 return RESULT_SUCCESS; 66 return RESULT_SUCCESS;
67 } else { 67 } else {
68 LOG_ERROR(Kernel, "Limit value is too large! resource={}, value={}, index={}", 68 LOG_ERROR(Kernel, "Limit value is too large! resource={}, value={}, index={}", resource,
69 static_cast<u32>(resource), value, index); 69 value, index);
70 return ERR_INVALID_STATE; 70 return ERR_INVALID_STATE;
71 } 71 }
72} 72}
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
deleted file mode 100644
index 5cbd3b912..000000000
--- a/src/core/hle/kernel/scheduler.cpp
+++ /dev/null
@@ -1,849 +0,0 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4//
5// SelectThreads, Yield functions originally by TuxSH.
6// licensed under GPLv2 or later under exception provided by the author.
7
8#include <algorithm>
9#include <mutex>
10#include <set>
11#include <unordered_set>
12#include <utility>
13
14#include "common/assert.h"
15#include "common/bit_util.h"
16#include "common/fiber.h"
17#include "common/logging/log.h"
18#include "core/arm/arm_interface.h"
19#include "core/core.h"
20#include "core/core_timing.h"
21#include "core/cpu_manager.h"
22#include "core/hle/kernel/kernel.h"
23#include "core/hle/kernel/physical_core.h"
24#include "core/hle/kernel/process.h"
25#include "core/hle/kernel/scheduler.h"
26#include "core/hle/kernel/time_manager.h"
27
28namespace Kernel {
29
30GlobalScheduler::GlobalScheduler(KernelCore& kernel) : kernel{kernel} {}
31
32GlobalScheduler::~GlobalScheduler() = default;
33
34void GlobalScheduler::AddThread(std::shared_ptr<Thread> thread) {
35 std::scoped_lock lock{global_list_guard};
36 thread_list.push_back(std::move(thread));
37}
38
39void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) {
40 std::scoped_lock lock{global_list_guard};
41 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
42 thread_list.end());
43}
44
45u32 GlobalScheduler::SelectThreads() {
46 ASSERT(is_locked);
47 const auto update_thread = [](Thread* thread, Scheduler& sched) {
48 std::scoped_lock lock{sched.guard};
49 if (thread != sched.selected_thread_set.get()) {
50 if (thread == nullptr) {
51 ++sched.idle_selection_count;
52 }
53 sched.selected_thread_set = SharedFrom(thread);
54 }
55 const bool reschedule_pending =
56 sched.is_context_switch_pending || (sched.selected_thread_set != sched.current_thread);
57 sched.is_context_switch_pending = reschedule_pending;
58 std::atomic_thread_fence(std::memory_order_seq_cst);
59 return reschedule_pending;
60 };
61 if (!is_reselection_pending.load()) {
62 return 0;
63 }
64 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> top_threads{};
65
66 u32 idle_cores{};
67
68 // Step 1: Get top thread in schedule queue.
69 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
70 Thread* top_thread =
71 scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
72 if (top_thread != nullptr) {
73 // TODO(Blinkhawk): Implement Thread Pinning
74 } else {
75 idle_cores |= (1ul << core);
76 }
77 top_threads[core] = top_thread;
78 }
79
80 while (idle_cores != 0) {
81 u32 core_id = Common::CountTrailingZeroes32(idle_cores);
82
83 if (!suggested_queue[core_id].empty()) {
84 std::array<s32, Core::Hardware::NUM_CPU_CORES> migration_candidates{};
85 std::size_t num_candidates = 0;
86 auto iter = suggested_queue[core_id].begin();
87 Thread* suggested = nullptr;
88 // Step 2: Try selecting a suggested thread.
89 while (iter != suggested_queue[core_id].end()) {
90 suggested = *iter;
91 iter++;
92 s32 suggested_core_id = suggested->GetProcessorID();
93 Thread* top_thread =
94 suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr;
95 if (top_thread != suggested) {
96 if (top_thread != nullptr &&
97 top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) {
98 suggested = nullptr;
99 break;
100 // There's a too high thread to do core migration, cancel
101 }
102 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested);
103 break;
104 }
105 suggested = nullptr;
106 migration_candidates[num_candidates++] = suggested_core_id;
107 }
108 // Step 3: Select a suggested thread from another core
109 if (suggested == nullptr) {
110 for (std::size_t i = 0; i < num_candidates; i++) {
111 s32 candidate_core = migration_candidates[i];
112 suggested = top_threads[candidate_core];
113 auto it = scheduled_queue[candidate_core].begin();
114 it++;
115 Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr;
116 if (next != nullptr) {
117 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id),
118 suggested);
119 top_threads[candidate_core] = next;
120 break;
121 } else {
122 suggested = nullptr;
123 }
124 }
125 }
126 top_threads[core_id] = suggested;
127 }
128
129 idle_cores &= ~(1ul << core_id);
130 }
131 u32 cores_needing_context_switch{};
132 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
133 Scheduler& sched = kernel.Scheduler(core);
134 ASSERT(top_threads[core] == nullptr ||
135 static_cast<u32>(top_threads[core]->GetProcessorID()) == core);
136 if (update_thread(top_threads[core], sched)) {
137 cores_needing_context_switch |= (1ul << core);
138 }
139 }
140 return cores_needing_context_switch;
141}
142
143bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
144 ASSERT(is_locked);
145 // Note: caller should use critical section, etc.
146 if (!yielding_thread->IsRunnable()) {
147 // Normally this case shouldn't happen except for SetThreadActivity.
148 is_reselection_pending.store(true, std::memory_order_release);
149 return false;
150 }
151 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
152 const u32 priority = yielding_thread->GetPriority();
153
154 // Yield the thread
155 Reschedule(priority, core_id, yielding_thread);
156 const Thread* const winner = scheduled_queue[core_id].front();
157 if (kernel.GetCurrentHostThreadID() != core_id) {
158 is_reselection_pending.store(true, std::memory_order_release);
159 }
160
161 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
162}
163
164bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
165 ASSERT(is_locked);
166 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
167 // etc.
168 if (!yielding_thread->IsRunnable()) {
169 // Normally this case shouldn't happen except for SetThreadActivity.
170 is_reselection_pending.store(true, std::memory_order_release);
171 return false;
172 }
173 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
174 const u32 priority = yielding_thread->GetPriority();
175
176 // Yield the thread
177 Reschedule(priority, core_id, yielding_thread);
178
179 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
180 for (std::size_t i = 0; i < current_threads.size(); i++) {
181 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
182 }
183
184 Thread* next_thread = scheduled_queue[core_id].front(priority);
185 Thread* winner = nullptr;
186 for (auto& thread : suggested_queue[core_id]) {
187 const s32 source_core = thread->GetProcessorID();
188 if (source_core >= 0) {
189 if (current_threads[source_core] != nullptr) {
190 if (thread == current_threads[source_core] ||
191 current_threads[source_core]->GetPriority() < min_regular_priority) {
192 continue;
193 }
194 }
195 }
196 if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() ||
197 next_thread->GetPriority() < thread->GetPriority()) {
198 if (thread->GetPriority() <= priority) {
199 winner = thread;
200 break;
201 }
202 }
203 }
204
205 if (winner != nullptr) {
206 if (winner != yielding_thread) {
207 TransferToCore(winner->GetPriority(), s32(core_id), winner);
208 }
209 } else {
210 winner = next_thread;
211 }
212
213 if (kernel.GetCurrentHostThreadID() != core_id) {
214 is_reselection_pending.store(true, std::memory_order_release);
215 }
216
217 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
218}
219
220bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
221 ASSERT(is_locked);
222 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
223 // etc.
224 if (!yielding_thread->IsRunnable()) {
225 // Normally this case shouldn't happen except for SetThreadActivity.
226 is_reselection_pending.store(true, std::memory_order_release);
227 return false;
228 }
229 Thread* winner = nullptr;
230 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
231
232 // Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead
233 TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread);
234
235 // If the core is idle, perform load balancing, excluding the threads that have just used this
236 // function...
237 if (scheduled_queue[core_id].empty()) {
238 // Here, "current_threads" is calculated after the ""yield"", unlike yield -1
239 std::array<Thread*, Core::Hardware::NUM_CPU_CORES> current_threads;
240 for (std::size_t i = 0; i < current_threads.size(); i++) {
241 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
242 }
243 for (auto& thread : suggested_queue[core_id]) {
244 const s32 source_core = thread->GetProcessorID();
245 if (source_core < 0 || thread == current_threads[source_core]) {
246 continue;
247 }
248 if (current_threads[source_core] == nullptr ||
249 current_threads[source_core]->GetPriority() >= min_regular_priority) {
250 winner = thread;
251 }
252 break;
253 }
254 if (winner != nullptr) {
255 if (winner != yielding_thread) {
256 TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner);
257 }
258 } else {
259 winner = yielding_thread;
260 }
261 } else {
262 winner = scheduled_queue[core_id].front();
263 }
264
265 if (kernel.GetCurrentHostThreadID() != core_id) {
266 is_reselection_pending.store(true, std::memory_order_release);
267 }
268
269 return AskForReselectionOrMarkRedundant(yielding_thread, winner);
270}
271
272void GlobalScheduler::PreemptThreads() {
273 ASSERT(is_locked);
274 for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
275 const u32 priority = preemption_priorities[core_id];
276
277 if (scheduled_queue[core_id].size(priority) > 0) {
278 if (scheduled_queue[core_id].size(priority) > 1) {
279 scheduled_queue[core_id].front(priority)->IncrementYieldCount();
280 }
281 scheduled_queue[core_id].yield(priority);
282 if (scheduled_queue[core_id].size(priority) > 1) {
283 scheduled_queue[core_id].front(priority)->IncrementYieldCount();
284 }
285 }
286
287 Thread* current_thread =
288 scheduled_queue[core_id].empty() ? nullptr : scheduled_queue[core_id].front();
289 Thread* winner = nullptr;
290 for (auto& thread : suggested_queue[core_id]) {
291 const s32 source_core = thread->GetProcessorID();
292 if (thread->GetPriority() != priority) {
293 continue;
294 }
295 if (source_core >= 0) {
296 Thread* next_thread = scheduled_queue[source_core].empty()
297 ? nullptr
298 : scheduled_queue[source_core].front();
299 if (next_thread != nullptr && next_thread->GetPriority() < 2) {
300 break;
301 }
302 if (next_thread == thread) {
303 continue;
304 }
305 }
306 if (current_thread != nullptr &&
307 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
308 winner = thread;
309 break;
310 }
311 }
312
313 if (winner != nullptr) {
314 TransferToCore(winner->GetPriority(), s32(core_id), winner);
315 current_thread =
316 winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread;
317 }
318
319 if (current_thread != nullptr && current_thread->GetPriority() > priority) {
320 for (auto& thread : suggested_queue[core_id]) {
321 const s32 source_core = thread->GetProcessorID();
322 if (thread->GetPriority() < priority) {
323 continue;
324 }
325 if (source_core >= 0) {
326 Thread* next_thread = scheduled_queue[source_core].empty()
327 ? nullptr
328 : scheduled_queue[source_core].front();
329 if (next_thread != nullptr && next_thread->GetPriority() < 2) {
330 break;
331 }
332 if (next_thread == thread) {
333 continue;
334 }
335 }
336 if (current_thread != nullptr &&
337 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
338 winner = thread;
339 break;
340 }
341 }
342
343 if (winner != nullptr) {
344 TransferToCore(winner->GetPriority(), s32(core_id), winner);
345 current_thread = winner;
346 }
347 }
348
349 is_reselection_pending.store(true, std::memory_order_release);
350 }
351}
352
353void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
354 Core::EmuThreadHandle global_thread) {
355 u32 current_core = global_thread.host_handle;
356 bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
357 (current_core < Core::Hardware::NUM_CPU_CORES);
358 while (cores_pending_reschedule != 0) {
359 u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule);
360 ASSERT(core < Core::Hardware::NUM_CPU_CORES);
361 if (!must_context_switch || core != current_core) {
362 auto& phys_core = kernel.PhysicalCore(core);
363 phys_core.Interrupt();
364 } else {
365 must_context_switch = true;
366 }
367 cores_pending_reschedule &= ~(1ul << core);
368 }
369 if (must_context_switch) {
370 auto& core_scheduler = kernel.CurrentScheduler();
371 kernel.ExitSVCProfile();
372 core_scheduler.TryDoContextSwitch();
373 kernel.EnterSVCProfile();
374 }
375}
376
377void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) {
378 ASSERT(is_locked);
379 suggested_queue[core].add(thread, priority);
380}
381
382void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) {
383 ASSERT(is_locked);
384 suggested_queue[core].remove(thread, priority);
385}
386
387void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) {
388 ASSERT(is_locked);
389 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
390 scheduled_queue[core].add(thread, priority);
391}
392
393void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) {
394 ASSERT(is_locked);
395 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
396 scheduled_queue[core].add(thread, priority, false);
397}
398
399void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) {
400 ASSERT(is_locked);
401 scheduled_queue[core].remove(thread, priority);
402 scheduled_queue[core].add(thread, priority);
403}
404
405void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) {
406 ASSERT(is_locked);
407 scheduled_queue[core].remove(thread, priority);
408}
409
410void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
411 ASSERT(is_locked);
412 const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
413 const s32 source_core = thread->GetProcessorID();
414 if (source_core == destination_core || !schedulable) {
415 return;
416 }
417 thread->SetProcessorID(destination_core);
418 if (source_core >= 0) {
419 Unschedule(priority, static_cast<u32>(source_core), thread);
420 }
421 if (destination_core >= 0) {
422 Unsuggest(priority, static_cast<u32>(destination_core), thread);
423 Schedule(priority, static_cast<u32>(destination_core), thread);
424 }
425 if (source_core >= 0) {
426 Suggest(priority, static_cast<u32>(source_core), thread);
427 }
428}
429
430bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread,
431 const Thread* winner) {
432 if (current_thread == winner) {
433 current_thread->IncrementYieldCount();
434 return true;
435 } else {
436 is_reselection_pending.store(true, std::memory_order_release);
437 return false;
438 }
439}
440
441void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) {
442 if (old_flags == thread->scheduling_state) {
443 return;
444 }
445 ASSERT(is_locked);
446
447 if (old_flags == static_cast<u32>(ThreadSchedStatus::Runnable)) {
448 // In this case the thread was running, now it's pausing/exitting
449 if (thread->processor_id >= 0) {
450 Unschedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
451 }
452
453 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
454 if (core != static_cast<u32>(thread->processor_id) &&
455 ((thread->affinity_mask >> core) & 1) != 0) {
456 Unsuggest(thread->current_priority, core, thread);
457 }
458 }
459 } else if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
460 // The thread is now set to running from being stopped
461 if (thread->processor_id >= 0) {
462 Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
463 }
464
465 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
466 if (core != static_cast<u32>(thread->processor_id) &&
467 ((thread->affinity_mask >> core) & 1) != 0) {
468 Suggest(thread->current_priority, core, thread);
469 }
470 }
471 }
472
473 SetReselectionPending();
474}
475
476void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priority) {
477 if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable)) {
478 return;
479 }
480 ASSERT(is_locked);
481 if (thread->processor_id >= 0) {
482 Unschedule(old_priority, static_cast<u32>(thread->processor_id), thread);
483 }
484
485 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
486 if (core != static_cast<u32>(thread->processor_id) &&
487 ((thread->affinity_mask >> core) & 1) != 0) {
488 Unsuggest(old_priority, core, thread);
489 }
490 }
491
492 if (thread->processor_id >= 0) {
493 if (thread == kernel.CurrentScheduler().GetCurrentThread()) {
494 SchedulePrepend(thread->current_priority, static_cast<u32>(thread->processor_id),
495 thread);
496 } else {
497 Schedule(thread->current_priority, static_cast<u32>(thread->processor_id), thread);
498 }
499 }
500
501 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
502 if (core != static_cast<u32>(thread->processor_id) &&
503 ((thread->affinity_mask >> core) & 1) != 0) {
504 Suggest(thread->current_priority, core, thread);
505 }
506 }
507 thread->IncrementYieldCount();
508 SetReselectionPending();
509}
510
511void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask,
512 s32 old_core) {
513 if (thread->scheduling_state != static_cast<u32>(ThreadSchedStatus::Runnable) ||
514 thread->current_priority >= THREADPRIO_COUNT) {
515 return;
516 }
517 ASSERT(is_locked);
518
519 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
520 if (((old_affinity_mask >> core) & 1) != 0) {
521 if (core == static_cast<u32>(old_core)) {
522 Unschedule(thread->current_priority, core, thread);
523 } else {
524 Unsuggest(thread->current_priority, core, thread);
525 }
526 }
527 }
528
529 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
530 if (((thread->affinity_mask >> core) & 1) != 0) {
531 if (core == static_cast<u32>(thread->processor_id)) {
532 Schedule(thread->current_priority, core, thread);
533 } else {
534 Suggest(thread->current_priority, core, thread);
535 }
536 }
537 }
538
539 thread->IncrementYieldCount();
540 SetReselectionPending();
541}
542
543void GlobalScheduler::Shutdown() {
544 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
545 scheduled_queue[core].clear();
546 suggested_queue[core].clear();
547 }
548 thread_list.clear();
549}
550
551void GlobalScheduler::Lock() {
552 Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID();
553 ASSERT(!current_thread.IsInvalid());
554 if (current_thread == current_owner) {
555 ++scope_lock;
556 } else {
557 inner_lock.lock();
558 is_locked = true;
559 current_owner = current_thread;
560 ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle());
561 scope_lock = 1;
562 }
563}
564
565void GlobalScheduler::Unlock() {
566 if (--scope_lock != 0) {
567 ASSERT(scope_lock > 0);
568 return;
569 }
570 u32 cores_pending_reschedule = SelectThreads();
571 Core::EmuThreadHandle leaving_thread = current_owner;
572 current_owner = Core::EmuThreadHandle::InvalidHandle();
573 scope_lock = 1;
574 is_locked = false;
575 inner_lock.unlock();
576 EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread);
577}
578
579Scheduler::Scheduler(Core::System& system, std::size_t core_id) : system(system), core_id(core_id) {
580 switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this);
581}
582
583Scheduler::~Scheduler() = default;
584
585bool Scheduler::HaveReadyThreads() const {
586 return system.GlobalScheduler().HaveReadyThreads(core_id);
587}
588
589Thread* Scheduler::GetCurrentThread() const {
590 if (current_thread) {
591 return current_thread.get();
592 }
593 return idle_thread.get();
594}
595
596Thread* Scheduler::GetSelectedThread() const {
597 return selected_thread.get();
598}
599
600u64 Scheduler::GetLastContextSwitchTicks() const {
601 return last_context_switch_time;
602}
603
604void Scheduler::TryDoContextSwitch() {
605 auto& phys_core = system.Kernel().CurrentPhysicalCore();
606 if (phys_core.IsInterrupted()) {
607 phys_core.ClearInterrupt();
608 }
609 guard.lock();
610 if (is_context_switch_pending) {
611 SwitchContext();
612 } else {
613 guard.unlock();
614 }
615}
616
617void Scheduler::OnThreadStart() {
618 SwitchContextStep2();
619}
620
621void Scheduler::Unload() {
622 Thread* thread = current_thread.get();
623 if (thread) {
624 thread->SetContinuousOnSVC(false);
625 thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
626 thread->SetIsRunning(false);
627 if (!thread->IsHLEThread() && !thread->HasExited()) {
628 Core::ARM_Interface& cpu_core = thread->ArmInterface();
629 cpu_core.SaveContext(thread->GetContext32());
630 cpu_core.SaveContext(thread->GetContext64());
631 // Save the TPIDR_EL0 system register in case it was modified.
632 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
633 cpu_core.ClearExclusiveState();
634 }
635 thread->context_guard.unlock();
636 }
637}
638
639void Scheduler::Reload() {
640 Thread* thread = current_thread.get();
641 if (thread) {
642 ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
643 "Thread must be runnable.");
644
645 // Cancel any outstanding wakeup events for this thread
646 thread->SetIsRunning(true);
647 thread->SetWasRunning(false);
648 thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
649
650 auto* const thread_owner_process = thread->GetOwnerProcess();
651 if (thread_owner_process != nullptr) {
652 system.Kernel().MakeCurrentProcess(thread_owner_process);
653 }
654 if (!thread->IsHLEThread()) {
655 Core::ARM_Interface& cpu_core = thread->ArmInterface();
656 cpu_core.LoadContext(thread->GetContext32());
657 cpu_core.LoadContext(thread->GetContext64());
658 cpu_core.SetTlsAddress(thread->GetTLSAddress());
659 cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
660 cpu_core.ChangeProcessorID(this->core_id);
661 cpu_core.ClearExclusiveState();
662 }
663 }
664}
665
666void Scheduler::SwitchContextStep2() {
667 // Load context of new thread
668 if (selected_thread) {
669 ASSERT_MSG(selected_thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable,
670 "Thread must be runnable.");
671
672 // Cancel any outstanding wakeup events for this thread
673 selected_thread->SetIsRunning(true);
674 selected_thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
675 selected_thread->SetWasRunning(false);
676
677 auto* const thread_owner_process = current_thread->GetOwnerProcess();
678 if (thread_owner_process != nullptr) {
679 system.Kernel().MakeCurrentProcess(thread_owner_process);
680 }
681 if (!selected_thread->IsHLEThread()) {
682 Core::ARM_Interface& cpu_core = selected_thread->ArmInterface();
683 cpu_core.LoadContext(selected_thread->GetContext32());
684 cpu_core.LoadContext(selected_thread->GetContext64());
685 cpu_core.SetTlsAddress(selected_thread->GetTLSAddress());
686 cpu_core.SetTPIDR_EL0(selected_thread->GetTPIDR_EL0());
687 cpu_core.ChangeProcessorID(this->core_id);
688 cpu_core.ClearExclusiveState();
689 }
690 }
691
692 TryDoContextSwitch();
693}
694
695void Scheduler::SwitchContext() {
696 current_thread_prev = current_thread;
697 selected_thread = selected_thread_set;
698 Thread* previous_thread = current_thread_prev.get();
699 Thread* new_thread = selected_thread.get();
700 current_thread = selected_thread;
701
702 is_context_switch_pending = false;
703
704 if (new_thread == previous_thread) {
705 guard.unlock();
706 return;
707 }
708
709 Process* const previous_process = system.Kernel().CurrentProcess();
710
711 UpdateLastContextSwitchTime(previous_thread, previous_process);
712
713 // Save context for previous thread
714 if (previous_thread) {
715 if (new_thread != nullptr && new_thread->IsSuspendThread()) {
716 previous_thread->SetWasRunning(true);
717 }
718 previous_thread->SetContinuousOnSVC(false);
719 previous_thread->last_running_ticks = system.CoreTiming().GetCPUTicks();
720 previous_thread->SetIsRunning(false);
721 if (!previous_thread->IsHLEThread() && !previous_thread->HasExited()) {
722 Core::ARM_Interface& cpu_core = previous_thread->ArmInterface();
723 cpu_core.SaveContext(previous_thread->GetContext32());
724 cpu_core.SaveContext(previous_thread->GetContext64());
725 // Save the TPIDR_EL0 system register in case it was modified.
726 previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
727 cpu_core.ClearExclusiveState();
728 }
729 previous_thread->context_guard.unlock();
730 }
731
732 std::shared_ptr<Common::Fiber>* old_context;
733 if (previous_thread != nullptr) {
734 old_context = &previous_thread->GetHostContext();
735 } else {
736 old_context = &idle_thread->GetHostContext();
737 }
738 guard.unlock();
739
740 Common::Fiber::YieldTo(*old_context, switch_fiber);
741 /// When a thread wakes up, the scheduler may have changed to other in another core.
742 auto& next_scheduler = system.Kernel().CurrentScheduler();
743 next_scheduler.SwitchContextStep2();
744}
745
746void Scheduler::OnSwitch(void* this_scheduler) {
747 Scheduler* sched = static_cast<Scheduler*>(this_scheduler);
748 sched->SwitchToCurrent();
749}
750
751void Scheduler::SwitchToCurrent() {
752 while (true) {
753 {
754 std::scoped_lock lock{guard};
755 selected_thread = selected_thread_set;
756 current_thread = selected_thread;
757 is_context_switch_pending = false;
758 }
759 const auto is_switch_pending = [this] {
760 std::scoped_lock lock{guard};
761 return is_context_switch_pending;
762 };
763 do {
764 if (current_thread != nullptr && !current_thread->IsHLEThread()) {
765 current_thread->context_guard.lock();
766 if (!current_thread->IsRunnable()) {
767 current_thread->context_guard.unlock();
768 break;
769 }
770 if (current_thread->GetProcessorID() != core_id) {
771 current_thread->context_guard.unlock();
772 break;
773 }
774 }
775 std::shared_ptr<Common::Fiber>* next_context;
776 if (current_thread != nullptr) {
777 next_context = &current_thread->GetHostContext();
778 } else {
779 next_context = &idle_thread->GetHostContext();
780 }
781 Common::Fiber::YieldTo(switch_fiber, *next_context);
782 } while (!is_switch_pending());
783 }
784}
785
786void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
787 const u64 prev_switch_ticks = last_context_switch_time;
788 const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks();
789 const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
790
791 if (thread != nullptr) {
792 thread->UpdateCPUTimeTicks(update_ticks);
793 }
794
795 if (process != nullptr) {
796 process->UpdateCPUTimeTicks(update_ticks);
797 }
798
799 last_context_switch_time = most_recent_switch_ticks;
800}
801
802void Scheduler::Initialize() {
803 std::string name = "Idle Thread Id:" + std::to_string(core_id);
804 std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
805 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
806 ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
807 auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
808 nullptr, std::move(init_func), init_func_parameter);
809 idle_thread = std::move(thread_res).Unwrap();
810}
811
812void Scheduler::Shutdown() {
813 current_thread = nullptr;
814 selected_thread = nullptr;
815}
816
817SchedulerLock::SchedulerLock(KernelCore& kernel) : kernel{kernel} {
818 kernel.GlobalScheduler().Lock();
819}
820
821SchedulerLock::~SchedulerLock() {
822 kernel.GlobalScheduler().Unlock();
823}
824
825SchedulerLockAndSleep::SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle,
826 Thread* time_task, s64 nanoseconds)
827 : SchedulerLock{kernel}, event_handle{event_handle}, time_task{time_task}, nanoseconds{
828 nanoseconds} {
829 event_handle = InvalidHandle;
830}
831
832SchedulerLockAndSleep::~SchedulerLockAndSleep() {
833 if (sleep_cancelled) {
834 return;
835 }
836 auto& time_manager = kernel.TimeManager();
837 time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
838}
839
840void SchedulerLockAndSleep::Release() {
841 if (sleep_cancelled) {
842 return;
843 }
844 auto& time_manager = kernel.TimeManager();
845 time_manager.ScheduleTimeEvent(event_handle, time_task, nanoseconds);
846 sleep_cancelled = true;
847}
848
849} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
deleted file mode 100644
index b6f04dcea..000000000
--- a/src/core/hle/kernel/scheduler.h
+++ /dev/null
@@ -1,318 +0,0 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <atomic>
8#include <memory>
9#include <mutex>
10#include <vector>
11
12#include "common/common_types.h"
13#include "common/multi_level_queue.h"
14#include "common/spin_lock.h"
15#include "core/hardware_properties.h"
16#include "core/hle/kernel/thread.h"
17
18namespace Common {
19class Fiber;
20}
21
22namespace Core {
23class ARM_Interface;
24class System;
25} // namespace Core
26
27namespace Kernel {
28
29class KernelCore;
30class Process;
31class SchedulerLock;
32
33class GlobalScheduler final {
34public:
35 explicit GlobalScheduler(KernelCore& kernel);
36 ~GlobalScheduler();
37
38 /// Adds a new thread to the scheduler
39 void AddThread(std::shared_ptr<Thread> thread);
40
41 /// Removes a thread from the scheduler
42 void RemoveThread(std::shared_ptr<Thread> thread);
43
44 /// Returns a list of all threads managed by the scheduler
45 const std::vector<std::shared_ptr<Thread>>& GetThreadList() const {
46 return thread_list;
47 }
48
49 /// Notify the scheduler a thread's status has changed.
50 void AdjustSchedulingOnStatus(Thread* thread, u32 old_flags);
51
52 /// Notify the scheduler a thread's priority has changed.
53 void AdjustSchedulingOnPriority(Thread* thread, u32 old_priority);
54
55 /// Notify the scheduler a thread's core and/or affinity mask has changed.
56 void AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinity_mask, s32 old_core);
57
58 /**
59 * Takes care of selecting the new scheduled threads in three steps:
60 *
61 * 1. First a thread is selected from the top of the priority queue. If no thread
62 * is obtained then we move to step two, else we are done.
63 *
64 * 2. Second we try to get a suggested thread that's not assigned to any core or
65 * that is not the top thread in that core.
66 *
67 * 3. Third is no suggested thread is found, we do a second pass and pick a running
68 * thread in another core and swap it with its current thread.
69 *
70 * returns the cores needing scheduling.
71 */
72 u32 SelectThreads();
73
74 bool HaveReadyThreads(std::size_t core_id) const {
75 return !scheduled_queue[core_id].empty();
76 }
77
78 /**
79 * Takes a thread and moves it to the back of the it's priority list.
80 *
81 * @note This operation can be redundant and no scheduling is changed if marked as so.
82 */
83 bool YieldThread(Thread* thread);
84
85 /**
86 * Takes a thread and moves it to the back of the it's priority list.
87 * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or
88 * a better priority than the next thread in the core.
89 *
90 * @note This operation can be redundant and no scheduling is changed if marked as so.
91 */
92 bool YieldThreadAndBalanceLoad(Thread* thread);
93
94 /**
95 * Takes a thread and moves it out of the scheduling queue.
96 * and into the suggested queue. If no thread can be scheduled afterwards in that core,
97 * a suggested thread is obtained instead.
98 *
99 * @note This operation can be redundant and no scheduling is changed if marked as so.
100 */
101 bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
102
103 /**
104 * Rotates the scheduling queues of threads at a preemption priority and then does
105 * some core rebalancing. Preemption priorities can be found in the array
106 * 'preemption_priorities'.
107 *
108 * @note This operation happens every 10ms.
109 */
110 void PreemptThreads();
111
112 u32 CpuCoresCount() const {
113 return Core::Hardware::NUM_CPU_CORES;
114 }
115
116 void SetReselectionPending() {
117 is_reselection_pending.store(true, std::memory_order_release);
118 }
119
120 bool IsReselectionPending() const {
121 return is_reselection_pending.load(std::memory_order_acquire);
122 }
123
124 void Shutdown();
125
126private:
127 friend class SchedulerLock;
128
129 /// Lock the scheduler to the current thread.
130 void Lock();
131
132 /// Unlocks the scheduler, reselects threads, interrupts cores for rescheduling
133 /// and reschedules current core if needed.
134 void Unlock();
135
136 void EnableInterruptAndSchedule(u32 cores_pending_reschedule,
137 Core::EmuThreadHandle global_thread);
138
139 /**
140 * Add a thread to the suggested queue of a cpu core. Suggested threads may be
141 * picked if no thread is scheduled to run on the core.
142 */
143 void Suggest(u32 priority, std::size_t core, Thread* thread);
144
145 /**
146 * Remove a thread to the suggested queue of a cpu core. Suggested threads may be
147 * picked if no thread is scheduled to run on the core.
148 */
149 void Unsuggest(u32 priority, std::size_t core, Thread* thread);
150
151 /**
152 * Add a thread to the scheduling queue of a cpu core. The thread is added at the
153 * back the queue in its priority level.
154 */
155 void Schedule(u32 priority, std::size_t core, Thread* thread);
156
157 /**
158 * Add a thread to the scheduling queue of a cpu core. The thread is added at the
159 * front the queue in its priority level.
160 */
161 void SchedulePrepend(u32 priority, std::size_t core, Thread* thread);
162
163 /// Reschedule an already scheduled thread based on a new priority
164 void Reschedule(u32 priority, std::size_t core, Thread* thread);
165
166 /// Unschedules a thread.
167 void Unschedule(u32 priority, std::size_t core, Thread* thread);
168
169 /**
170 * Transfers a thread into an specific core. If the destination_core is -1
171 * it will be unscheduled from its source code and added into its suggested
172 * queue.
173 */
174 void TransferToCore(u32 priority, s32 destination_core, Thread* thread);
175
176 bool AskForReselectionOrMarkRedundant(Thread* current_thread, const Thread* winner);
177
178 static constexpr u32 min_regular_priority = 2;
179 std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES>
180 scheduled_queue;
181 std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, Core::Hardware::NUM_CPU_CORES>
182 suggested_queue;
183 std::atomic<bool> is_reselection_pending{false};
184
185 // The priority levels at which the global scheduler preempts threads every 10 ms. They are
186 // ordered from Core 0 to Core 3.
187 std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
188
189 /// Scheduler lock mechanisms.
190 bool is_locked{};
191 std::mutex inner_lock;
192 std::atomic<s64> scope_lock{};
193 Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()};
194
195 Common::SpinLock global_list_guard{};
196
197 /// Lists all thread ids that aren't deleted/etc.
198 std::vector<std::shared_ptr<Thread>> thread_list;
199 KernelCore& kernel;
200};
201
202class Scheduler final {
203public:
204 explicit Scheduler(Core::System& system, std::size_t core_id);
205 ~Scheduler();
206
207 /// Returns whether there are any threads that are ready to run.
208 bool HaveReadyThreads() const;
209
210 /// Reschedules to the next available thread (call after current thread is suspended)
211 void TryDoContextSwitch();
212
213 /// The next two are for SingleCore Only.
214 /// Unload current thread before preempting core.
215 void Unload();
216 /// Reload current thread after core preemption.
217 void Reload();
218
219 /// Gets the current running thread
220 Thread* GetCurrentThread() const;
221
222 /// Gets the currently selected thread from the top of the multilevel queue
223 Thread* GetSelectedThread() const;
224
225 /// Gets the timestamp for the last context switch in ticks.
226 u64 GetLastContextSwitchTicks() const;
227
228 bool ContextSwitchPending() const {
229 return is_context_switch_pending;
230 }
231
232 void Initialize();
233
234 /// Shutdowns the scheduler.
235 void Shutdown();
236
237 void OnThreadStart();
238
239 std::shared_ptr<Common::Fiber>& ControlContext() {
240 return switch_fiber;
241 }
242
243 const std::shared_ptr<Common::Fiber>& ControlContext() const {
244 return switch_fiber;
245 }
246
247private:
248 friend class GlobalScheduler;
249
250 /// Switches the CPU's active thread context to that of the specified thread
251 void SwitchContext();
252
253 /// When a thread wakes up, it must run this through it's new scheduler
254 void SwitchContextStep2();
255
256 /**
257 * Called on every context switch to update the internal timestamp
258 * This also updates the running time ticks for the given thread and
259 * process using the following difference:
260 *
261 * ticks += most_recent_ticks - last_context_switch_ticks
262 *
263 * The internal tick timestamp for the scheduler is simply the
264 * most recent tick count retrieved. No special arithmetic is
265 * applied to it.
266 */
267 void UpdateLastContextSwitchTime(Thread* thread, Process* process);
268
269 static void OnSwitch(void* this_scheduler);
270 void SwitchToCurrent();
271
272 std::shared_ptr<Thread> current_thread = nullptr;
273 std::shared_ptr<Thread> selected_thread = nullptr;
274 std::shared_ptr<Thread> current_thread_prev = nullptr;
275 std::shared_ptr<Thread> selected_thread_set = nullptr;
276 std::shared_ptr<Thread> idle_thread = nullptr;
277
278 std::shared_ptr<Common::Fiber> switch_fiber = nullptr;
279
280 Core::System& system;
281 u64 last_context_switch_time = 0;
282 u64 idle_selection_count = 0;
283 const std::size_t core_id;
284
285 Common::SpinLock guard{};
286
287 bool is_context_switch_pending = false;
288};
289
290class SchedulerLock {
291public:
292 [[nodiscard]] explicit SchedulerLock(KernelCore& kernel);
293 ~SchedulerLock();
294
295protected:
296 KernelCore& kernel;
297};
298
299class SchedulerLockAndSleep : public SchedulerLock {
300public:
301 explicit SchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, Thread* time_task,
302 s64 nanoseconds);
303 ~SchedulerLockAndSleep();
304
305 void CancelSleep() {
306 sleep_cancelled = true;
307 }
308
309 void Release();
310
311private:
312 Handle& event_handle;
313 Thread* time_task;
314 s64 nanoseconds;
315 bool sleep_cancelled{};
316};
317
318} // namespace Kernel
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp
index 8c19f2534..b40fe3916 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/server_session.cpp
@@ -14,9 +14,9 @@
14#include "core/hle/kernel/client_session.h" 14#include "core/hle/kernel/client_session.h"
15#include "core/hle/kernel/handle_table.h" 15#include "core/hle/kernel/handle_table.h"
16#include "core/hle/kernel/hle_ipc.h" 16#include "core/hle/kernel/hle_ipc.h"
17#include "core/hle/kernel/k_scheduler.h"
17#include "core/hle/kernel/kernel.h" 18#include "core/hle/kernel/kernel.h"
18#include "core/hle/kernel/process.h" 19#include "core/hle/kernel/process.h"
19#include "core/hle/kernel/scheduler.h"
20#include "core/hle/kernel/server_session.h" 20#include "core/hle/kernel/server_session.h"
21#include "core/hle/kernel/session.h" 21#include "core/hle/kernel/session.h"
22#include "core/hle/kernel/thread.h" 22#include "core/hle/kernel/thread.h"
@@ -25,19 +25,19 @@
25namespace Kernel { 25namespace Kernel {
26 26
27ServerSession::ServerSession(KernelCore& kernel) : SynchronizationObject{kernel} {} 27ServerSession::ServerSession(KernelCore& kernel) : SynchronizationObject{kernel} {}
28ServerSession::~ServerSession() = default; 28
29ServerSession::~ServerSession() {
30 kernel.ReleaseServiceThread(service_thread);
31}
29 32
30ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kernel, 33ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kernel,
31 std::shared_ptr<Session> parent, 34 std::shared_ptr<Session> parent,
32 std::string name) { 35 std::string name) {
33 std::shared_ptr<ServerSession> session{std::make_shared<ServerSession>(kernel)}; 36 std::shared_ptr<ServerSession> session{std::make_shared<ServerSession>(kernel)};
34 37
35 session->request_event =
36 Core::Timing::CreateEvent(name, [session](std::uintptr_t, std::chrono::nanoseconds) {
37 session->CompleteSyncRequest();
38 });
39 session->name = std::move(name); 38 session->name = std::move(name);
40 session->parent = std::move(parent); 39 session->parent = std::move(parent);
40 session->service_thread = kernel.CreateServiceThread(session->name);
41 41
42 return MakeResult(std::move(session)); 42 return MakeResult(std::move(session));
43} 43}
@@ -130,8 +130,7 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con
130 } 130 }
131 } 131 }
132 132
133 LOG_CRITICAL(IPC, "Unknown domain command={}", 133 LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value());
134 static_cast<int>(domain_message_header.command.Value()));
135 ASSERT(false); 134 ASSERT(false);
136 return RESULT_SUCCESS; 135 return RESULT_SUCCESS;
137} 136}
@@ -143,16 +142,16 @@ ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<Thread> thread,
143 std::make_shared<HLERequestContext>(kernel, memory, SharedFrom(this), std::move(thread)); 142 std::make_shared<HLERequestContext>(kernel, memory, SharedFrom(this), std::move(thread));
144 143
145 context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); 144 context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
146 request_queue.Push(std::move(context)); 145
146 if (auto strong_ptr = service_thread.lock()) {
147 strong_ptr->QueueSyncRequest(*this, std::move(context));
148 return RESULT_SUCCESS;
149 }
147 150
148 return RESULT_SUCCESS; 151 return RESULT_SUCCESS;
149} 152}
150 153
151ResultCode ServerSession::CompleteSyncRequest() { 154ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) {
152 ASSERT(!request_queue.Empty());
153
154 auto& context = *request_queue.Front();
155
156 ResultCode result = RESULT_SUCCESS; 155 ResultCode result = RESULT_SUCCESS;
157 // If the session has been converted to a domain, handle the domain request 156 // If the session has been converted to a domain, handle the domain request
158 if (IsDomain() && context.HasDomainMessageHeader()) { 157 if (IsDomain() && context.HasDomainMessageHeader()) {
@@ -171,25 +170,20 @@ ResultCode ServerSession::CompleteSyncRequest() {
171 170
172 // Some service requests require the thread to block 171 // Some service requests require the thread to block
173 { 172 {
174 SchedulerLock lock(kernel); 173 KScopedSchedulerLock lock(kernel);
175 if (!context.IsThreadWaiting()) { 174 if (!context.IsThreadWaiting()) {
176 context.GetThread().ResumeFromWait(); 175 context.GetThread().ResumeFromWait();
177 context.GetThread().SetSynchronizationResults(nullptr, result); 176 context.GetThread().SetSynchronizationResults(nullptr, result);
178 } 177 }
179 } 178 }
180 179
181 request_queue.Pop();
182
183 return result; 180 return result;
184} 181}
185 182
186ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread, 183ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread,
187 Core::Memory::Memory& memory, 184 Core::Memory::Memory& memory,
188 Core::Timing::CoreTiming& core_timing) { 185 Core::Timing::CoreTiming& core_timing) {
189 const ResultCode result = QueueSyncRequest(std::move(thread), memory); 186 return QueueSyncRequest(std::move(thread), memory);
190 const auto delay = std::chrono::nanoseconds{kernel.IsMulticore() ? 0 : 20000};
191 core_timing.ScheduleEvent(delay, request_event, {});
192 return result;
193} 187}
194 188
195} // namespace Kernel 189} // namespace Kernel
diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h
index d23e9ec68..e8d1d99ea 100644
--- a/src/core/hle/kernel/server_session.h
+++ b/src/core/hle/kernel/server_session.h
@@ -10,6 +10,7 @@
10#include <vector> 10#include <vector>
11 11
12#include "common/threadsafe_queue.h" 12#include "common/threadsafe_queue.h"
13#include "core/hle/kernel/service_thread.h"
13#include "core/hle/kernel/synchronization_object.h" 14#include "core/hle/kernel/synchronization_object.h"
14#include "core/hle/result.h" 15#include "core/hle/result.h"
15 16
@@ -43,6 +44,8 @@ class Thread;
43 * TLS buffer and control is transferred back to it. 44 * TLS buffer and control is transferred back to it.
44 */ 45 */
45class ServerSession final : public SynchronizationObject { 46class ServerSession final : public SynchronizationObject {
47 friend class ServiceThread;
48
46public: 49public:
47 explicit ServerSession(KernelCore& kernel); 50 explicit ServerSession(KernelCore& kernel);
48 ~ServerSession() override; 51 ~ServerSession() override;
@@ -132,7 +135,7 @@ private:
132 ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory); 135 ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory);
133 136
134 /// Completes a sync request from the emulated application. 137 /// Completes a sync request from the emulated application.
135 ResultCode CompleteSyncRequest(); 138 ResultCode CompleteSyncRequest(HLERequestContext& context);
136 139
137 /// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an 140 /// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an
138 /// object handle. 141 /// object handle.
@@ -163,11 +166,8 @@ private:
163 /// The name of this session (optional) 166 /// The name of this session (optional)
164 std::string name; 167 std::string name;
165 168
166 /// Core timing event used to schedule the service request at some point in the future 169 /// Thread to dispatch service requests
167 std::shared_ptr<Core::Timing::EventType> request_event; 170 std::weak_ptr<ServiceThread> service_thread;
168
169 /// Queue of scheduled service requests
170 Common::MPSCQueue<std::shared_ptr<Kernel::HLERequestContext>> request_queue;
171}; 171};
172 172
173} // namespace Kernel 173} // namespace Kernel
diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp
new file mode 100644
index 000000000..ee46f3e21
--- /dev/null
+++ b/src/core/hle/kernel/service_thread.cpp
@@ -0,0 +1,110 @@
1// Copyright 2020 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <condition_variable>
6#include <functional>
7#include <mutex>
8#include <thread>
9#include <vector>
10#include <queue>
11
12#include "common/assert.h"
13#include "common/scope_exit.h"
14#include "common/thread.h"
15#include "core/core.h"
16#include "core/hle/kernel/kernel.h"
17#include "core/hle/kernel/server_session.h"
18#include "core/hle/kernel/service_thread.h"
19#include "core/hle/lock.h"
20#include "video_core/renderer_base.h"
21
22namespace Kernel {
23
24class ServiceThread::Impl final {
25public:
26 explicit Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name);
27 ~Impl();
28
29 void QueueSyncRequest(ServerSession& session, std::shared_ptr<HLERequestContext>&& context);
30
31private:
32 std::vector<std::thread> threads;
33 std::queue<std::function<void()>> requests;
34 std::mutex queue_mutex;
35 std::condition_variable condition;
36 const std::string service_name;
37 bool stop{};
38};
39
40ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name)
41 : service_name{name} {
42 for (std::size_t i = 0; i < num_threads; ++i)
43 threads.emplace_back([this, &kernel] {
44 Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str());
45
46 // Wait for first request before trying to acquire a render context
47 {
48 std::unique_lock lock{queue_mutex};
49 condition.wait(lock, [this] { return stop || !requests.empty(); });
50 }
51
52 kernel.RegisterHostThread();
53
54 while (true) {
55 std::function<void()> task;
56
57 {
58 std::unique_lock lock{queue_mutex};
59 condition.wait(lock, [this] { return stop || !requests.empty(); });
60 if (stop || requests.empty()) {
61 return;
62 }
63 task = std::move(requests.front());
64 requests.pop();
65 }
66
67 task();
68 }
69 });
70}
71
72void ServiceThread::Impl::QueueSyncRequest(ServerSession& session,
73 std::shared_ptr<HLERequestContext>&& context) {
74 {
75 std::unique_lock lock{queue_mutex};
76
77 // ServerSession owns the service thread, so we cannot caption a strong pointer here in the
78 // event that the ServerSession is terminated.
79 std::weak_ptr<ServerSession> weak_ptr{SharedFrom(&session)};
80 requests.emplace([weak_ptr, context{std::move(context)}]() {
81 if (auto strong_ptr = weak_ptr.lock()) {
82 strong_ptr->CompleteSyncRequest(*context);
83 }
84 });
85 }
86 condition.notify_one();
87}
88
89ServiceThread::Impl::~Impl() {
90 {
91 std::unique_lock lock{queue_mutex};
92 stop = true;
93 }
94 condition.notify_all();
95 for (std::thread& thread : threads) {
96 thread.join();
97 }
98}
99
100ServiceThread::ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name)
101 : impl{std::make_unique<Impl>(kernel, num_threads, name)} {}
102
103ServiceThread::~ServiceThread() = default;
104
105void ServiceThread::QueueSyncRequest(ServerSession& session,
106 std::shared_ptr<HLERequestContext>&& context) {
107 impl->QueueSyncRequest(session, std::move(context));
108}
109
110} // namespace Kernel
diff --git a/src/core/hle/kernel/service_thread.h b/src/core/hle/kernel/service_thread.h
new file mode 100644
index 000000000..025ab8fb5
--- /dev/null
+++ b/src/core/hle/kernel/service_thread.h
@@ -0,0 +1,28 @@
1// Copyright 2020 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <string>
9
10namespace Kernel {
11
12class HLERequestContext;
13class KernelCore;
14class ServerSession;
15
16class ServiceThread final {
17public:
18 explicit ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name);
19 ~ServiceThread();
20
21 void QueueSyncRequest(ServerSession& session, std::shared_ptr<HLERequestContext>&& context);
22
23private:
24 class Impl;
25 std::unique_ptr<Impl> impl;
26};
27
28} // namespace Kernel
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index bafd1ced7..de3ed25da 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -24,6 +24,8 @@
24#include "core/hle/kernel/client_session.h" 24#include "core/hle/kernel/client_session.h"
25#include "core/hle/kernel/errors.h" 25#include "core/hle/kernel/errors.h"
26#include "core/hle/kernel/handle_table.h" 26#include "core/hle/kernel/handle_table.h"
27#include "core/hle/kernel/k_scheduler.h"
28#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
27#include "core/hle/kernel/kernel.h" 29#include "core/hle/kernel/kernel.h"
28#include "core/hle/kernel/memory/memory_block.h" 30#include "core/hle/kernel/memory/memory_block.h"
29#include "core/hle/kernel/memory/page_table.h" 31#include "core/hle/kernel/memory/page_table.h"
@@ -32,7 +34,6 @@
32#include "core/hle/kernel/process.h" 34#include "core/hle/kernel/process.h"
33#include "core/hle/kernel/readable_event.h" 35#include "core/hle/kernel/readable_event.h"
34#include "core/hle/kernel/resource_limit.h" 36#include "core/hle/kernel/resource_limit.h"
35#include "core/hle/kernel/scheduler.h"
36#include "core/hle/kernel/shared_memory.h" 37#include "core/hle/kernel/shared_memory.h"
37#include "core/hle/kernel/svc.h" 38#include "core/hle/kernel/svc.h"
38#include "core/hle/kernel/svc_types.h" 39#include "core/hle/kernel/svc_types.h"
@@ -234,8 +235,7 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si
234 235
235static ResultCode SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask, 236static ResultCode SetMemoryAttribute32(Core::System& system, u32 address, u32 size, u32 mask,
236 u32 attribute) { 237 u32 attribute) {
237 return SetMemoryAttribute(system, static_cast<VAddr>(address), static_cast<std::size_t>(size), 238 return SetMemoryAttribute(system, address, size, mask, attribute);
238 mask, attribute);
239} 239}
240 240
241/// Maps a memory range into a different range. 241/// Maps a memory range into a different range.
@@ -255,8 +255,7 @@ static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr
255} 255}
256 256
257static ResultCode MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) { 257static ResultCode MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
258 return MapMemory(system, static_cast<VAddr>(dst_addr), static_cast<VAddr>(src_addr), 258 return MapMemory(system, dst_addr, src_addr, size);
259 static_cast<std::size_t>(size));
260} 259}
261 260
262/// Unmaps a region that was previously mapped with svcMapMemory 261/// Unmaps a region that was previously mapped with svcMapMemory
@@ -276,8 +275,7 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad
276} 275}
277 276
278static ResultCode UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) { 277static ResultCode UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u32 size) {
279 return UnmapMemory(system, static_cast<VAddr>(dst_addr), static_cast<VAddr>(src_addr), 278 return UnmapMemory(system, dst_addr, src_addr, size);
280 static_cast<std::size_t>(size));
281} 279}
282 280
283/// Connect to an OS service given the port name, returns the handle to the port to out 281/// Connect to an OS service given the port name, returns the handle to the port to out
@@ -332,7 +330,8 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
332 330
333/// Makes a blocking IPC call to an OS service. 331/// Makes a blocking IPC call to an OS service.
334static ResultCode SendSyncRequest(Core::System& system, Handle handle) { 332static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
335 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); 333 auto& kernel = system.Kernel();
334 const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
336 std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle); 335 std::shared_ptr<ClientSession> session = handle_table.Get<ClientSession>(handle);
337 if (!session) { 336 if (!session) {
338 LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle); 337 LOG_ERROR(Kernel_SVC, "called with invalid handle=0x{:08X}", handle);
@@ -341,9 +340,9 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
341 340
342 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); 341 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
343 342
344 auto thread = system.CurrentScheduler().GetCurrentThread(); 343 auto thread = kernel.CurrentScheduler()->GetCurrentThread();
345 { 344 {
346 SchedulerLock lock(system.Kernel()); 345 KScopedSchedulerLock lock(kernel);
347 thread->InvalidateHLECallback(); 346 thread->InvalidateHLECallback();
348 thread->SetStatus(ThreadStatus::WaitIPC); 347 thread->SetStatus(ThreadStatus::WaitIPC);
349 session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); 348 session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming());
@@ -352,12 +351,12 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
352 if (thread->HasHLECallback()) { 351 if (thread->HasHLECallback()) {
353 Handle event_handle = thread->GetHLETimeEvent(); 352 Handle event_handle = thread->GetHLETimeEvent();
354 if (event_handle != InvalidHandle) { 353 if (event_handle != InvalidHandle) {
355 auto& time_manager = system.Kernel().TimeManager(); 354 auto& time_manager = kernel.TimeManager();
356 time_manager.UnscheduleTimeEvent(event_handle); 355 time_manager.UnscheduleTimeEvent(event_handle);
357 } 356 }
358 357
359 { 358 {
360 SchedulerLock lock(system.Kernel()); 359 KScopedSchedulerLock lock(kernel);
361 auto* sync_object = thread->GetHLESyncObject(); 360 auto* sync_object = thread->GetHLESyncObject();
362 sync_object->RemoveWaitingThread(SharedFrom(thread)); 361 sync_object->RemoveWaitingThread(SharedFrom(thread));
363 } 362 }
@@ -531,8 +530,7 @@ static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_hand
531 530
532static ResultCode ArbitrateLock32(Core::System& system, Handle holding_thread_handle, 531static ResultCode ArbitrateLock32(Core::System& system, Handle holding_thread_handle,
533 u32 mutex_addr, Handle requesting_thread_handle) { 532 u32 mutex_addr, Handle requesting_thread_handle) {
534 return ArbitrateLock(system, holding_thread_handle, static_cast<VAddr>(mutex_addr), 533 return ArbitrateLock(system, holding_thread_handle, mutex_addr, requesting_thread_handle);
535 requesting_thread_handle);
536} 534}
537 535
538/// Unlock a mutex 536/// Unlock a mutex
@@ -555,7 +553,7 @@ static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) {
555} 553}
556 554
557static ResultCode ArbitrateUnlock32(Core::System& system, u32 mutex_addr) { 555static ResultCode ArbitrateUnlock32(Core::System& system, u32 mutex_addr) {
558 return ArbitrateUnlock(system, static_cast<VAddr>(mutex_addr)); 556 return ArbitrateUnlock(system, mutex_addr);
559} 557}
560 558
561enum class BreakType : u32 { 559enum class BreakType : u32 {
@@ -658,7 +656,6 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
658 info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt); 656 info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt);
659 657
660 if (!break_reason.signal_debugger) { 658 if (!break_reason.signal_debugger) {
661 SchedulerLock lock(system.Kernel());
662 LOG_CRITICAL( 659 LOG_CRITICAL(
663 Debug_Emulated, 660 Debug_Emulated,
664 "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}", 661 "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}",
@@ -666,22 +663,18 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) {
666 663
667 handle_debug_buffer(info1, info2); 664 handle_debug_buffer(info1, info2);
668 665
669 auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); 666 auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
670 const auto thread_processor_id = current_thread->GetProcessorID(); 667 const auto thread_processor_id = current_thread->GetProcessorID();
671 system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace(); 668 system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace();
672
673 // Kill the current thread
674 system.Kernel().ExceptionalExit();
675 current_thread->Stop();
676 } 669 }
677} 670}
678 671
679static void Break32(Core::System& system, u32 reason, u32 info1, u32 info2) { 672static void Break32(Core::System& system, u32 reason, u32 info1, u32 info2) {
680 Break(system, reason, static_cast<u64>(info1), static_cast<u64>(info2)); 673 Break(system, reason, info1, info2);
681} 674}
682 675
683/// Used to output a message on a debug hardware unit - does nothing on a retail unit 676/// Used to output a message on a debug hardware unit - does nothing on a retail unit
684static void OutputDebugString([[maybe_unused]] Core::System& system, VAddr address, u64 len) { 677static void OutputDebugString(Core::System& system, VAddr address, u64 len) {
685 if (len == 0) { 678 if (len == 0) {
686 return; 679 return;
687 } 680 }
@@ -922,7 +915,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
922 } 915 }
923 916
924 const auto& core_timing = system.CoreTiming(); 917 const auto& core_timing = system.CoreTiming();
925 const auto& scheduler = system.CurrentScheduler(); 918 const auto& scheduler = *system.Kernel().CurrentScheduler();
926 const auto* const current_thread = scheduler.GetCurrentThread(); 919 const auto* const current_thread = scheduler.GetCurrentThread();
927 const bool same_thread = current_thread == thread.get(); 920 const bool same_thread = current_thread == thread.get();
928 921
@@ -948,7 +941,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
948 941
949static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low, 942static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_high, u32 sub_id_low,
950 u32 info_id, u32 handle, u32 sub_id_high) { 943 u32 info_id, u32 handle, u32 sub_id_high) {
951 const u64 sub_id{static_cast<u64>(sub_id_low | (static_cast<u64>(sub_id_high) << 32))}; 944 const u64 sub_id{u64{sub_id_low} | (u64{sub_id_high} << 32)};
952 u64 res_value{}; 945 u64 res_value{};
953 946
954 const ResultCode result{GetInfo(system, &res_value, info_id, handle, sub_id)}; 947 const ResultCode result{GetInfo(system, &res_value, info_id, handle, sub_id)};
@@ -1009,7 +1002,7 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size)
1009} 1002}
1010 1003
1011static ResultCode MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) { 1004static ResultCode MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
1012 return MapPhysicalMemory(system, static_cast<VAddr>(addr), static_cast<std::size_t>(size)); 1005 return MapPhysicalMemory(system, addr, size);
1013} 1006}
1014 1007
1015/// Unmaps memory previously mapped via MapPhysicalMemory 1008/// Unmaps memory previously mapped via MapPhysicalMemory
@@ -1063,7 +1056,7 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size
1063} 1056}
1064 1057
1065static ResultCode UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) { 1058static ResultCode UnmapPhysicalMemory32(Core::System& system, u32 addr, u32 size) {
1066 return UnmapPhysicalMemory(system, static_cast<VAddr>(addr), static_cast<std::size_t>(size)); 1059 return UnmapPhysicalMemory(system, addr, size);
1067} 1060}
1068 1061
1069/// Sets the thread activity 1062/// Sets the thread activity
@@ -1090,7 +1083,7 @@ static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 act
1090 return ERR_INVALID_HANDLE; 1083 return ERR_INVALID_HANDLE;
1091 } 1084 }
1092 1085
1093 if (thread.get() == system.CurrentScheduler().GetCurrentThread()) { 1086 if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) {
1094 LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread"); 1087 LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
1095 return ERR_BUSY; 1088 return ERR_BUSY;
1096 } 1089 }
@@ -1123,7 +1116,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H
1123 return ERR_INVALID_HANDLE; 1116 return ERR_INVALID_HANDLE;
1124 } 1117 }
1125 1118
1126 if (thread.get() == system.CurrentScheduler().GetCurrentThread()) { 1119 if (thread.get() == system.Kernel().CurrentScheduler()->GetCurrentThread()) {
1127 LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread"); 1120 LOG_ERROR(Kernel_SVC, "The thread handle specified is the current running thread");
1128 return ERR_BUSY; 1121 return ERR_BUSY;
1129 } 1122 }
@@ -1144,7 +1137,7 @@ static ResultCode GetThreadContext(Core::System& system, VAddr thread_context, H
1144} 1137}
1145 1138
1146static ResultCode GetThreadContext32(Core::System& system, u32 thread_context, Handle handle) { 1139static ResultCode GetThreadContext32(Core::System& system, u32 thread_context, Handle handle) {
1147 return GetThreadContext(system, static_cast<VAddr>(thread_context), handle); 1140 return GetThreadContext(system, thread_context, handle);
1148} 1141}
1149 1142
1150/// Gets the priority for the specified thread 1143/// Gets the priority for the specified thread
@@ -1281,8 +1274,7 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han
1281 1274
1282static ResultCode MapSharedMemory32(Core::System& system, Handle shared_memory_handle, u32 addr, 1275static ResultCode MapSharedMemory32(Core::System& system, Handle shared_memory_handle, u32 addr,
1283 u32 size, u32 permissions) { 1276 u32 size, u32 permissions) {
1284 return MapSharedMemory(system, shared_memory_handle, static_cast<VAddr>(addr), 1277 return MapSharedMemory(system, shared_memory_handle, addr, size, permissions);
1285 static_cast<std::size_t>(size), permissions);
1286} 1278}
1287 1279
1288static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address, 1280static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
@@ -1480,7 +1472,7 @@ static void ExitProcess(Core::System& system) {
1480 current_process->PrepareForTermination(); 1472 current_process->PrepareForTermination();
1481 1473
1482 // Kill the current thread 1474 // Kill the current thread
1483 system.CurrentScheduler().GetCurrentThread()->Stop(); 1475 system.Kernel().CurrentScheduler()->GetCurrentThread()->Stop();
1484} 1476}
1485 1477
1486static void ExitProcess32(Core::System& system) { 1478static void ExitProcess32(Core::System& system) {
@@ -1552,8 +1544,7 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e
1552 1544
1553static ResultCode CreateThread32(Core::System& system, Handle* out_handle, u32 priority, 1545static ResultCode CreateThread32(Core::System& system, Handle* out_handle, u32 priority,
1554 u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) { 1546 u32 entry_point, u32 arg, u32 stack_top, s32 processor_id) {
1555 return CreateThread(system, out_handle, static_cast<VAddr>(entry_point), static_cast<u64>(arg), 1547 return CreateThread(system, out_handle, entry_point, arg, stack_top, priority, processor_id);
1556 static_cast<VAddr>(stack_top), priority, processor_id);
1557} 1548}
1558 1549
1559/// Starts the thread for the provided handle 1550/// Starts the thread for the provided handle
@@ -1581,8 +1572,8 @@ static ResultCode StartThread32(Core::System& system, Handle thread_handle) {
1581static void ExitThread(Core::System& system) { 1572static void ExitThread(Core::System& system) {
1582 LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC()); 1573 LOG_DEBUG(Kernel_SVC, "called, pc=0x{:08X}", system.CurrentArmInterface().GetPC());
1583 1574
1584 auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); 1575 auto* const current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
1585 system.GlobalScheduler().RemoveThread(SharedFrom(current_thread)); 1576 system.GlobalSchedulerContext().RemoveThread(SharedFrom(current_thread));
1586 current_thread->Stop(); 1577 current_thread->Stop();
1587} 1578}
1588 1579
@@ -1592,53 +1583,39 @@ static void ExitThread32(Core::System& system) {
1592 1583
1593/// Sleep the current thread 1584/// Sleep the current thread
1594static void SleepThread(Core::System& system, s64 nanoseconds) { 1585static void SleepThread(Core::System& system, s64 nanoseconds) {
1595 LOG_DEBUG(Kernel_SVC, "called nanoseconds={}", nanoseconds); 1586 LOG_TRACE(Kernel_SVC, "called nanoseconds={}", nanoseconds);
1596 1587
1597 enum class SleepType : s64 { 1588 enum class SleepType : s64 {
1598 YieldWithoutLoadBalancing = 0, 1589 YieldWithoutCoreMigration = 0,
1599 YieldWithLoadBalancing = -1, 1590 YieldWithCoreMigration = -1,
1600 YieldAndWaitForLoadBalancing = -2, 1591 YieldAndWaitForLoadBalancing = -2,
1601 }; 1592 };
1602 1593
1603 auto& scheduler = system.CurrentScheduler(); 1594 auto& scheduler = *system.Kernel().CurrentScheduler();
1604 auto* const current_thread = scheduler.GetCurrentThread();
1605 bool is_redundant = false;
1606
1607 if (nanoseconds <= 0) { 1595 if (nanoseconds <= 0) {
1608 switch (static_cast<SleepType>(nanoseconds)) { 1596 switch (static_cast<SleepType>(nanoseconds)) {
1609 case SleepType::YieldWithoutLoadBalancing: { 1597 case SleepType::YieldWithoutCoreMigration: {
1610 auto pair = current_thread->YieldSimple(); 1598 scheduler.YieldWithoutCoreMigration();
1611 is_redundant = pair.second;
1612 break; 1599 break;
1613 } 1600 }
1614 case SleepType::YieldWithLoadBalancing: { 1601 case SleepType::YieldWithCoreMigration: {
1615 auto pair = current_thread->YieldAndBalanceLoad(); 1602 scheduler.YieldWithCoreMigration();
1616 is_redundant = pair.second;
1617 break; 1603 break;
1618 } 1604 }
1619 case SleepType::YieldAndWaitForLoadBalancing: { 1605 case SleepType::YieldAndWaitForLoadBalancing: {
1620 auto pair = current_thread->YieldAndWaitForLoadBalancing(); 1606 scheduler.YieldToAnyThread();
1621 is_redundant = pair.second;
1622 break; 1607 break;
1623 } 1608 }
1624 default: 1609 default:
1625 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); 1610 UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
1626 } 1611 }
1627 } else { 1612 } else {
1628 current_thread->Sleep(nanoseconds); 1613 scheduler.GetCurrentThread()->Sleep(nanoseconds);
1629 }
1630
1631 if (is_redundant && !system.Kernel().IsMulticore()) {
1632 system.Kernel().ExitSVCProfile();
1633 system.CoreTiming().AddTicks(1000U);
1634 system.GetCpuManager().PreemptSingleCore();
1635 system.Kernel().EnterSVCProfile();
1636 } 1614 }
1637} 1615}
1638 1616
1639static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanoseconds_high) { 1617static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanoseconds_high) {
1640 const s64 nanoseconds = static_cast<s64>(static_cast<u64>(nanoseconds_low) | 1618 const auto nanoseconds = static_cast<s64>(u64{nanoseconds_low} | (u64{nanoseconds_high} << 32));
1641 (static_cast<u64>(nanoseconds_high) << 32));
1642 SleepThread(system, nanoseconds); 1619 SleepThread(system, nanoseconds);
1643} 1620}
1644 1621
@@ -1668,10 +1645,10 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
1668 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); 1645 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
1669 auto& kernel = system.Kernel(); 1646 auto& kernel = system.Kernel();
1670 Handle event_handle; 1647 Handle event_handle;
1671 Thread* current_thread = system.CurrentScheduler().GetCurrentThread(); 1648 Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
1672 auto* const current_process = system.Kernel().CurrentProcess(); 1649 auto* const current_process = kernel.CurrentProcess();
1673 { 1650 {
1674 SchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds); 1651 KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds);
1675 const auto& handle_table = current_process->GetHandleTable(); 1652 const auto& handle_table = current_process->GetHandleTable();
1676 std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle); 1653 std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
1677 ASSERT(thread); 1654 ASSERT(thread);
@@ -1707,7 +1684,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
1707 } 1684 }
1708 1685
1709 { 1686 {
1710 SchedulerLock lock(kernel); 1687 KScopedSchedulerLock lock(kernel);
1711 1688
1712 auto* owner = current_thread->GetLockOwner(); 1689 auto* owner = current_thread->GetLockOwner();
1713 if (owner != nullptr) { 1690 if (owner != nullptr) {
@@ -1724,10 +1701,8 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
1724static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 mutex_addr, 1701static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 mutex_addr,
1725 u32 condition_variable_addr, Handle thread_handle, 1702 u32 condition_variable_addr, Handle thread_handle,
1726 u32 nanoseconds_low, u32 nanoseconds_high) { 1703 u32 nanoseconds_low, u32 nanoseconds_high) {
1727 const s64 nanoseconds = 1704 const auto nanoseconds = static_cast<s64>(nanoseconds_low | (u64{nanoseconds_high} << 32));
1728 static_cast<s64>(nanoseconds_low | (static_cast<u64>(nanoseconds_high) << 32)); 1705 return WaitProcessWideKeyAtomic(system, mutex_addr, condition_variable_addr, thread_handle,
1729 return WaitProcessWideKeyAtomic(system, static_cast<VAddr>(mutex_addr),
1730 static_cast<VAddr>(condition_variable_addr), thread_handle,
1731 nanoseconds); 1706 nanoseconds);
1732} 1707}
1733 1708
@@ -1740,7 +1715,7 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
1740 1715
1741 // Retrieve a list of all threads that are waiting for this condition variable. 1716 // Retrieve a list of all threads that are waiting for this condition variable.
1742 auto& kernel = system.Kernel(); 1717 auto& kernel = system.Kernel();
1743 SchedulerLock lock(kernel); 1718 KScopedSchedulerLock lock(kernel);
1744 auto* const current_process = kernel.CurrentProcess(); 1719 auto* const current_process = kernel.CurrentProcess();
1745 std::vector<std::shared_ptr<Thread>> waiting_threads = 1720 std::vector<std::shared_ptr<Thread>> waiting_threads =
1746 current_process->GetConditionVariableThreads(condition_variable_addr); 1721 current_process->GetConditionVariableThreads(condition_variable_addr);
@@ -1833,8 +1808,8 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type,
1833 1808
1834static ResultCode WaitForAddress32(Core::System& system, u32 address, u32 type, s32 value, 1809static ResultCode WaitForAddress32(Core::System& system, u32 address, u32 type, s32 value,
1835 u32 timeout_low, u32 timeout_high) { 1810 u32 timeout_low, u32 timeout_high) {
1836 s64 timeout = static_cast<s64>(timeout_low | (static_cast<u64>(timeout_high) << 32)); 1811 const auto timeout = static_cast<s64>(timeout_low | (u64{timeout_high} << 32));
1837 return WaitForAddress(system, static_cast<VAddr>(address), type, value, timeout); 1812 return WaitForAddress(system, address, type, value, timeout);
1838} 1813}
1839 1814
1840// Signals to an address (via Address Arbiter) 1815// Signals to an address (via Address Arbiter)
@@ -1862,7 +1837,7 @@ static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type,
1862 1837
1863static ResultCode SignalToAddress32(Core::System& system, u32 address, u32 type, s32 value, 1838static ResultCode SignalToAddress32(Core::System& system, u32 address, u32 type, s32 value,
1864 s32 num_to_wake) { 1839 s32 num_to_wake) {
1865 return SignalToAddress(system, static_cast<VAddr>(address), type, value, num_to_wake); 1840 return SignalToAddress(system, address, type, value, num_to_wake);
1866} 1841}
1867 1842
1868static void KernelDebug([[maybe_unused]] Core::System& system, 1843static void KernelDebug([[maybe_unused]] Core::System& system,
@@ -1893,7 +1868,7 @@ static u64 GetSystemTick(Core::System& system) {
1893} 1868}
1894 1869
1895static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high) { 1870static void GetSystemTick32(Core::System& system, u32* time_low, u32* time_high) {
1896 u64 time = GetSystemTick(system); 1871 const auto time = GetSystemTick(system);
1897 *time_low = static_cast<u32>(time); 1872 *time_low = static_cast<u32>(time);
1898 *time_high = static_cast<u32>(time >> 32); 1873 *time_high = static_cast<u32>(time >> 32);
1899} 1874}
@@ -1984,8 +1959,7 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd
1984 1959
1985static ResultCode CreateTransferMemory32(Core::System& system, Handle* handle, u32 addr, u32 size, 1960static ResultCode CreateTransferMemory32(Core::System& system, Handle* handle, u32 addr, u32 size,
1986 u32 permissions) { 1961 u32 permissions) {
1987 return CreateTransferMemory(system, handle, static_cast<VAddr>(addr), 1962 return CreateTransferMemory(system, handle, addr, size, permissions);
1988 static_cast<std::size_t>(size), permissions);
1989} 1963}
1990 1964
1991static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core, 1965static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core,
@@ -2003,7 +1977,7 @@ static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle,
2003 } 1977 }
2004 1978
2005 *core = thread->GetIdealCore(); 1979 *core = thread->GetIdealCore();
2006 *mask = thread->GetAffinityMask(); 1980 *mask = thread->GetAffinityMask().GetAffinityMask();
2007 1981
2008 return RESULT_SUCCESS; 1982 return RESULT_SUCCESS;
2009} 1983}
@@ -2075,8 +2049,7 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle,
2075 2049
2076static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core, 2050static ResultCode SetThreadCoreMask32(Core::System& system, Handle thread_handle, u32 core,
2077 u32 affinity_mask_low, u32 affinity_mask_high) { 2051 u32 affinity_mask_low, u32 affinity_mask_high) {
2078 const u64 affinity_mask = 2052 const auto affinity_mask = u64{affinity_mask_low} | (u64{affinity_mask_high} << 32);
2079 static_cast<u64>(affinity_mask_low) | (static_cast<u64>(affinity_mask_high) << 32);
2080 return SetThreadCoreMask(system, thread_handle, core, affinity_mask); 2053 return SetThreadCoreMask(system, thread_handle, core, affinity_mask);
2081} 2054}
2082 2055
@@ -2341,9 +2314,10 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd
2341 return RESULT_SUCCESS; 2314 return RESULT_SUCCESS;
2342} 2315}
2343 2316
2344static ResultCode FlushProcessDataCache32(Core::System& system, Handle handle, u32 address, 2317static ResultCode FlushProcessDataCache32([[maybe_unused]] Core::System& system,
2345 u32 size) { 2318 [[maybe_unused]] Handle handle,
2346 // Note(Blinkhawk): For emulation purposes of the data cache this is mostly a nope 2319 [[maybe_unused]] u32 address, [[maybe_unused]] u32 size) {
2320 // Note(Blinkhawk): For emulation purposes of the data cache this is mostly a no-op,
2347 // as all emulation is done in the same cache level in host architecture, thus data cache 2321 // as all emulation is done in the same cache level in host architecture, thus data cache
2348 // does not need flushing. 2322 // does not need flushing.
2349 LOG_DEBUG(Kernel_SVC, "called"); 2323 LOG_DEBUG(Kernel_SVC, "called");
@@ -2639,6 +2613,9 @@ void Call(Core::System& system, u32 immediate) {
2639 auto& kernel = system.Kernel(); 2613 auto& kernel = system.Kernel();
2640 kernel.EnterSVCProfile(); 2614 kernel.EnterSVCProfile();
2641 2615
2616 auto* thread = kernel.CurrentScheduler()->GetCurrentThread();
2617 thread->SetContinuousOnSVC(true);
2618
2642 const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate) 2619 const FunctionDef* info = system.CurrentProcess()->Is64BitProcess() ? GetSVCInfo64(immediate)
2643 : GetSVCInfo32(immediate); 2620 : GetSVCInfo32(immediate);
2644 if (info) { 2621 if (info) {
@@ -2652,6 +2629,12 @@ void Call(Core::System& system, u32 immediate) {
2652 } 2629 }
2653 2630
2654 kernel.ExitSVCProfile(); 2631 kernel.ExitSVCProfile();
2632
2633 if (!thread->IsContinuousOnSVC()) {
2634 auto* host_context = thread->GetHostContext().get();
2635 host_context->Rewind();
2636 }
2637
2655 system.EnterDynarmicProfile(); 2638 system.EnterDynarmicProfile();
2656} 2639}
2657 2640
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 986724beb..11e1d8e2d 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -23,8 +23,8 @@ enum class MemoryState : u32 {
23 Ipc = 0x0A, 23 Ipc = 0x0A,
24 Stack = 0x0B, 24 Stack = 0x0B,
25 ThreadLocal = 0x0C, 25 ThreadLocal = 0x0C,
26 Transfered = 0x0D, 26 Transferred = 0x0D,
27 SharedTransfered = 0x0E, 27 SharedTransferred = 0x0E,
28 SharedCode = 0x0F, 28 SharedCode = 0x0F,
29 Inaccessible = 0x10, 29 Inaccessible = 0x10,
30 NonSecureIpc = 0x11, 30 NonSecureIpc = 0x11,
diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp
index 8b875d853..d3f520ea2 100644
--- a/src/core/hle/kernel/synchronization.cpp
+++ b/src/core/hle/kernel/synchronization.cpp
@@ -5,8 +5,9 @@
5#include "core/core.h" 5#include "core/core.h"
6#include "core/hle/kernel/errors.h" 6#include "core/hle/kernel/errors.h"
7#include "core/hle/kernel/handle_table.h" 7#include "core/hle/kernel/handle_table.h"
8#include "core/hle/kernel/k_scheduler.h"
9#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
8#include "core/hle/kernel/kernel.h" 10#include "core/hle/kernel/kernel.h"
9#include "core/hle/kernel/scheduler.h"
10#include "core/hle/kernel/synchronization.h" 11#include "core/hle/kernel/synchronization.h"
11#include "core/hle/kernel/synchronization_object.h" 12#include "core/hle/kernel/synchronization_object.h"
12#include "core/hle/kernel/thread.h" 13#include "core/hle/kernel/thread.h"
@@ -18,7 +19,7 @@ Synchronization::Synchronization(Core::System& system) : system{system} {}
18 19
19void Synchronization::SignalObject(SynchronizationObject& obj) const { 20void Synchronization::SignalObject(SynchronizationObject& obj) const {
20 auto& kernel = system.Kernel(); 21 auto& kernel = system.Kernel();
21 SchedulerLock lock(kernel); 22 KScopedSchedulerLock lock(kernel);
22 if (obj.IsSignaled()) { 23 if (obj.IsSignaled()) {
23 for (auto thread : obj.GetWaitingThreads()) { 24 for (auto thread : obj.GetWaitingThreads()) {
24 if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) { 25 if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) {
@@ -37,10 +38,10 @@ void Synchronization::SignalObject(SynchronizationObject& obj) const {
37std::pair<ResultCode, Handle> Synchronization::WaitFor( 38std::pair<ResultCode, Handle> Synchronization::WaitFor(
38 std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) { 39 std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) {
39 auto& kernel = system.Kernel(); 40 auto& kernel = system.Kernel();
40 auto* const thread = system.CurrentScheduler().GetCurrentThread(); 41 auto* const thread = kernel.CurrentScheduler()->GetCurrentThread();
41 Handle event_handle = InvalidHandle; 42 Handle event_handle = InvalidHandle;
42 { 43 {
43 SchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds); 44 KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds);
44 const auto itr = 45 const auto itr =
45 std::find_if(sync_objects.begin(), sync_objects.end(), 46 std::find_if(sync_objects.begin(), sync_objects.end(),
46 [thread](const std::shared_ptr<SynchronizationObject>& object) { 47 [thread](const std::shared_ptr<SynchronizationObject>& object) {
@@ -89,7 +90,7 @@ std::pair<ResultCode, Handle> Synchronization::WaitFor(
89 } 90 }
90 91
91 { 92 {
92 SchedulerLock lock(kernel); 93 KScopedSchedulerLock lock(kernel);
93 ResultCode signaling_result = thread->GetSignalingResult(); 94 ResultCode signaling_result = thread->GetSignalingResult();
94 SynchronizationObject* signaling_object = thread->GetSignalingObject(); 95 SynchronizationObject* signaling_object = thread->GetSignalingObject();
95 thread->SetSynchronizationObjects(nullptr); 96 thread->SetSynchronizationObjects(nullptr);
diff --git a/src/core/hle/kernel/synchronization_object.h b/src/core/hle/kernel/synchronization_object.h
index f89b24204..7408ed51f 100644
--- a/src/core/hle/kernel/synchronization_object.h
+++ b/src/core/hle/kernel/synchronization_object.h
@@ -4,6 +4,7 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <atomic>
7#include <memory> 8#include <memory>
8#include <vector> 9#include <vector>
9 10
@@ -56,7 +57,7 @@ public:
56 void ClearWaitingThreads(); 57 void ClearWaitingThreads();
57 58
58protected: 59protected:
59 bool is_signaled{}; // Tells if this sync object is signalled; 60 std::atomic_bool is_signaled{}; // Tells if this sync object is signaled
60 61
61private: 62private:
62 /// Threads waiting for this object to become available 63 /// Threads waiting for this object to become available
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index d132aba34..a4f9e0d97 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -12,17 +12,16 @@
12#include "common/fiber.h" 12#include "common/fiber.h"
13#include "common/logging/log.h" 13#include "common/logging/log.h"
14#include "common/thread_queue_list.h" 14#include "common/thread_queue_list.h"
15#include "core/arm/arm_interface.h"
16#include "core/arm/unicorn/arm_unicorn.h"
17#include "core/core.h" 15#include "core/core.h"
18#include "core/cpu_manager.h" 16#include "core/cpu_manager.h"
19#include "core/hardware_properties.h" 17#include "core/hardware_properties.h"
20#include "core/hle/kernel/errors.h" 18#include "core/hle/kernel/errors.h"
21#include "core/hle/kernel/handle_table.h" 19#include "core/hle/kernel/handle_table.h"
20#include "core/hle/kernel/k_scheduler.h"
21#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
22#include "core/hle/kernel/kernel.h" 22#include "core/hle/kernel/kernel.h"
23#include "core/hle/kernel/object.h" 23#include "core/hle/kernel/object.h"
24#include "core/hle/kernel/process.h" 24#include "core/hle/kernel/process.h"
25#include "core/hle/kernel/scheduler.h"
26#include "core/hle/kernel/thread.h" 25#include "core/hle/kernel/thread.h"
27#include "core/hle/kernel/time_manager.h" 26#include "core/hle/kernel/time_manager.h"
28#include "core/hle/result.h" 27#include "core/hle/result.h"
@@ -52,7 +51,7 @@ Thread::~Thread() = default;
52 51
53void Thread::Stop() { 52void Thread::Stop() {
54 { 53 {
55 SchedulerLock lock(kernel); 54 KScopedSchedulerLock lock(kernel);
56 SetStatus(ThreadStatus::Dead); 55 SetStatus(ThreadStatus::Dead);
57 Signal(); 56 Signal();
58 kernel.GlobalHandleTable().Close(global_handle); 57 kernel.GlobalHandleTable().Close(global_handle);
@@ -63,14 +62,13 @@ void Thread::Stop() {
63 // Mark the TLS slot in the thread's page as free. 62 // Mark the TLS slot in the thread's page as free.
64 owner_process->FreeTLSRegion(tls_address); 63 owner_process->FreeTLSRegion(tls_address);
65 } 64 }
66 arm_interface.reset();
67 has_exited = true; 65 has_exited = true;
68 } 66 }
69 global_handle = 0; 67 global_handle = 0;
70} 68}
71 69
72void Thread::ResumeFromWait() { 70void Thread::ResumeFromWait() {
73 SchedulerLock lock(kernel); 71 KScopedSchedulerLock lock(kernel);
74 switch (status) { 72 switch (status) {
75 case ThreadStatus::Paused: 73 case ThreadStatus::Paused:
76 case ThreadStatus::WaitSynch: 74 case ThreadStatus::WaitSynch:
@@ -91,10 +89,6 @@ void Thread::ResumeFromWait() {
91 // before actually resuming. We can ignore subsequent wakeups if the thread status has 89 // before actually resuming. We can ignore subsequent wakeups if the thread status has
92 // already been set to ThreadStatus::Ready. 90 // already been set to ThreadStatus::Ready.
93 return; 91 return;
94
95 case ThreadStatus::Running:
96 DEBUG_ASSERT_MSG(false, "Thread with object id {} has already resumed.", GetObjectId());
97 return;
98 case ThreadStatus::Dead: 92 case ThreadStatus::Dead:
99 // This should never happen, as threads must complete before being stopped. 93 // This should never happen, as threads must complete before being stopped.
100 DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.", 94 DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.",
@@ -106,19 +100,18 @@ void Thread::ResumeFromWait() {
106} 100}
107 101
108void Thread::OnWakeUp() { 102void Thread::OnWakeUp() {
109 SchedulerLock lock(kernel); 103 KScopedSchedulerLock lock(kernel);
110
111 SetStatus(ThreadStatus::Ready); 104 SetStatus(ThreadStatus::Ready);
112} 105}
113 106
114ResultCode Thread::Start() { 107ResultCode Thread::Start() {
115 SchedulerLock lock(kernel); 108 KScopedSchedulerLock lock(kernel);
116 SetStatus(ThreadStatus::Ready); 109 SetStatus(ThreadStatus::Ready);
117 return RESULT_SUCCESS; 110 return RESULT_SUCCESS;
118} 111}
119 112
120void Thread::CancelWait() { 113void Thread::CancelWait() {
121 SchedulerLock lock(kernel); 114 KScopedSchedulerLock lock(kernel);
122 if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) { 115 if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) {
123 is_sync_cancelled = true; 116 is_sync_cancelled = true;
124 return; 117 return;
@@ -193,12 +186,14 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
193 thread->status = ThreadStatus::Dormant; 186 thread->status = ThreadStatus::Dormant;
194 thread->entry_point = entry_point; 187 thread->entry_point = entry_point;
195 thread->stack_top = stack_top; 188 thread->stack_top = stack_top;
189 thread->disable_count = 1;
196 thread->tpidr_el0 = 0; 190 thread->tpidr_el0 = 0;
197 thread->nominal_priority = thread->current_priority = priority; 191 thread->nominal_priority = thread->current_priority = priority;
198 thread->last_running_ticks = 0; 192 thread->schedule_count = -1;
193 thread->last_scheduled_tick = 0;
199 thread->processor_id = processor_id; 194 thread->processor_id = processor_id;
200 thread->ideal_core = processor_id; 195 thread->ideal_core = processor_id;
201 thread->affinity_mask = 1ULL << processor_id; 196 thread->affinity_mask.SetAffinity(processor_id, true);
202 thread->wait_objects = nullptr; 197 thread->wait_objects = nullptr;
203 thread->mutex_wait_address = 0; 198 thread->mutex_wait_address = 0;
204 thread->condvar_wait_address = 0; 199 thread->condvar_wait_address = 0;
@@ -208,7 +203,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
208 thread->owner_process = owner_process; 203 thread->owner_process = owner_process;
209 thread->type = type_flags; 204 thread->type = type_flags;
210 if ((type_flags & THREADTYPE_IDLE) == 0) { 205 if ((type_flags & THREADTYPE_IDLE) == 0) {
211 auto& scheduler = kernel.GlobalScheduler(); 206 auto& scheduler = kernel.GlobalSchedulerContext();
212 scheduler.AddThread(thread); 207 scheduler.AddThread(thread);
213 } 208 }
214 if (owner_process) { 209 if (owner_process) {
@@ -217,33 +212,10 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
217 } else { 212 } else {
218 thread->tls_address = 0; 213 thread->tls_address = 0;
219 } 214 }
215
220 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used 216 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
221 // to initialize the context 217 // to initialize the context
222 thread->arm_interface.reset();
223 if ((type_flags & THREADTYPE_HLE) == 0) { 218 if ((type_flags & THREADTYPE_HLE) == 0) {
224#ifdef ARCHITECTURE_x86_64
225 if (owner_process && !owner_process->Is64BitProcess()) {
226 thread->arm_interface = std::make_unique<Core::ARM_Dynarmic_32>(
227 system, kernel.Interrupts(), kernel.IsMulticore(), kernel.GetExclusiveMonitor(),
228 processor_id);
229 } else {
230 thread->arm_interface = std::make_unique<Core::ARM_Dynarmic_64>(
231 system, kernel.Interrupts(), kernel.IsMulticore(), kernel.GetExclusiveMonitor(),
232 processor_id);
233 }
234
235#else
236 if (owner_process && !owner_process->Is64BitProcess()) {
237 thread->arm_interface = std::make_shared<Core::ARM_Unicorn>(
238 system, kernel.Interrupts(), kernel.IsMulticore(), ARM_Unicorn::Arch::AArch32,
239 processor_id);
240 } else {
241 thread->arm_interface = std::make_shared<Core::ARM_Unicorn>(
242 system, kernel.Interrupts(), kernel.IsMulticore(), ARM_Unicorn::Arch::AArch64,
243 processor_id);
244 }
245 LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
246#endif
247 ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top), 219 ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top),
248 static_cast<u32>(entry_point), static_cast<u32>(arg)); 220 static_cast<u32>(entry_point), static_cast<u32>(arg));
249 ResetThreadContext64(thread->context_64, stack_top, entry_point, arg); 221 ResetThreadContext64(thread->context_64, stack_top, entry_point, arg);
@@ -255,7 +227,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
255} 227}
256 228
257void Thread::SetPriority(u32 priority) { 229void Thread::SetPriority(u32 priority) {
258 SchedulerLock lock(kernel); 230 KScopedSchedulerLock lock(kernel);
259 ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, 231 ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
260 "Invalid priority value."); 232 "Invalid priority value.");
261 nominal_priority = priority; 233 nominal_priority = priority;
@@ -279,14 +251,6 @@ VAddr Thread::GetCommandBufferAddress() const {
279 return GetTLSAddress() + command_header_offset; 251 return GetTLSAddress() + command_header_offset;
280} 252}
281 253
282Core::ARM_Interface& Thread::ArmInterface() {
283 return *arm_interface;
284}
285
286const Core::ARM_Interface& Thread::ArmInterface() const {
287 return *arm_interface;
288}
289
290void Thread::SetStatus(ThreadStatus new_status) { 254void Thread::SetStatus(ThreadStatus new_status) {
291 if (new_status == status) { 255 if (new_status == status) {
292 return; 256 return;
@@ -294,7 +258,6 @@ void Thread::SetStatus(ThreadStatus new_status) {
294 258
295 switch (new_status) { 259 switch (new_status) {
296 case ThreadStatus::Ready: 260 case ThreadStatus::Ready:
297 case ThreadStatus::Running:
298 SetSchedulingStatus(ThreadSchedStatus::Runnable); 261 SetSchedulingStatus(ThreadSchedStatus::Runnable);
299 break; 262 break;
300 case ThreadStatus::Dormant: 263 case ThreadStatus::Dormant:
@@ -401,7 +364,7 @@ bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) {
401} 364}
402 365
403ResultCode Thread::SetActivity(ThreadActivity value) { 366ResultCode Thread::SetActivity(ThreadActivity value) {
404 SchedulerLock lock(kernel); 367 KScopedSchedulerLock lock(kernel);
405 368
406 auto sched_status = GetSchedulingStatus(); 369 auto sched_status = GetSchedulingStatus();
407 370
@@ -430,7 +393,7 @@ ResultCode Thread::SetActivity(ThreadActivity value) {
430ResultCode Thread::Sleep(s64 nanoseconds) { 393ResultCode Thread::Sleep(s64 nanoseconds) {
431 Handle event_handle{}; 394 Handle event_handle{};
432 { 395 {
433 SchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); 396 KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
434 SetStatus(ThreadStatus::WaitSleep); 397 SetStatus(ThreadStatus::WaitSleep);
435 } 398 }
436 399
@@ -441,39 +404,12 @@ ResultCode Thread::Sleep(s64 nanoseconds) {
441 return RESULT_SUCCESS; 404 return RESULT_SUCCESS;
442} 405}
443 406
444std::pair<ResultCode, bool> Thread::YieldSimple() {
445 bool is_redundant = false;
446 {
447 SchedulerLock lock(kernel);
448 is_redundant = kernel.GlobalScheduler().YieldThread(this);
449 }
450 return {RESULT_SUCCESS, is_redundant};
451}
452
453std::pair<ResultCode, bool> Thread::YieldAndBalanceLoad() {
454 bool is_redundant = false;
455 {
456 SchedulerLock lock(kernel);
457 is_redundant = kernel.GlobalScheduler().YieldThreadAndBalanceLoad(this);
458 }
459 return {RESULT_SUCCESS, is_redundant};
460}
461
462std::pair<ResultCode, bool> Thread::YieldAndWaitForLoadBalancing() {
463 bool is_redundant = false;
464 {
465 SchedulerLock lock(kernel);
466 is_redundant = kernel.GlobalScheduler().YieldThreadAndWaitForLoadBalancing(this);
467 }
468 return {RESULT_SUCCESS, is_redundant};
469}
470
471void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { 407void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
472 const u32 old_state = scheduling_state; 408 const u32 old_state = scheduling_state;
473 pausing_state |= static_cast<u32>(flag); 409 pausing_state |= static_cast<u32>(flag);
474 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); 410 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
475 scheduling_state = base_scheduling | pausing_state; 411 scheduling_state = base_scheduling | pausing_state;
476 kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); 412 KScheduler::OnThreadStateChanged(kernel, this, old_state);
477} 413}
478 414
479void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { 415void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
@@ -481,23 +417,24 @@ void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
481 pausing_state &= ~static_cast<u32>(flag); 417 pausing_state &= ~static_cast<u32>(flag);
482 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); 418 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus());
483 scheduling_state = base_scheduling | pausing_state; 419 scheduling_state = base_scheduling | pausing_state;
484 kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); 420 KScheduler::OnThreadStateChanged(kernel, this, old_state);
485} 421}
486 422
487void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { 423void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
488 const u32 old_state = scheduling_state; 424 const u32 old_state = scheduling_state;
489 scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) | 425 scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
490 static_cast<u32>(new_status); 426 static_cast<u32>(new_status);
491 kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); 427 KScheduler::OnThreadStateChanged(kernel, this, old_state);
492} 428}
493 429
494void Thread::SetCurrentPriority(u32 new_priority) { 430void Thread::SetCurrentPriority(u32 new_priority) {
495 const u32 old_priority = std::exchange(current_priority, new_priority); 431 const u32 old_priority = std::exchange(current_priority, new_priority);
496 kernel.GlobalScheduler().AdjustSchedulingOnPriority(this, old_priority); 432 KScheduler::OnThreadPriorityChanged(kernel, this, kernel.CurrentScheduler()->GetCurrentThread(),
433 old_priority);
497} 434}
498 435
499ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { 436ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
500 SchedulerLock lock(kernel); 437 KScopedSchedulerLock lock(kernel);
501 const auto HighestSetCore = [](u64 mask, u32 max_cores) { 438 const auto HighestSetCore = [](u64 mask, u32 max_cores) {
502 for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) { 439 for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) {
503 if (((mask >> core) & 1) != 0) { 440 if (((mask >> core) & 1) != 0) {
@@ -518,20 +455,21 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
518 } 455 }
519 if (use_override) { 456 if (use_override) {
520 ideal_core_override = new_core; 457 ideal_core_override = new_core;
521 affinity_mask_override = new_affinity_mask;
522 } else { 458 } else {
523 const u64 old_affinity_mask = std::exchange(affinity_mask, new_affinity_mask); 459 const auto old_affinity_mask = affinity_mask;
460 affinity_mask.SetAffinityMask(new_affinity_mask);
524 ideal_core = new_core; 461 ideal_core = new_core;
525 if (old_affinity_mask != new_affinity_mask) { 462 if (old_affinity_mask.GetAffinityMask() != new_affinity_mask) {
526 const s32 old_core = processor_id; 463 const s32 old_core = processor_id;
527 if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { 464 if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) {
528 if (static_cast<s32>(ideal_core) < 0) { 465 if (static_cast<s32>(ideal_core) < 0) {
529 processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES); 466 processor_id = HighestSetCore(affinity_mask.GetAffinityMask(),
467 Core::Hardware::NUM_CPU_CORES);
530 } else { 468 } else {
531 processor_id = ideal_core; 469 processor_id = ideal_core;
532 } 470 }
533 } 471 }
534 kernel.GlobalScheduler().AdjustSchedulingOnAffinity(this, old_affinity_mask, old_core); 472 KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_affinity_mask, old_core);
535 } 473 }
536 } 474 }
537 return RESULT_SUCCESS; 475 return RESULT_SUCCESS;
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index 8daf79fac..11ef29888 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -4,6 +4,7 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <array>
7#include <functional> 8#include <functional>
8#include <string> 9#include <string>
9#include <utility> 10#include <utility>
@@ -12,6 +13,7 @@
12#include "common/common_types.h" 13#include "common/common_types.h"
13#include "common/spin_lock.h" 14#include "common/spin_lock.h"
14#include "core/arm/arm_interface.h" 15#include "core/arm/arm_interface.h"
16#include "core/hle/kernel/k_affinity_mask.h"
15#include "core/hle/kernel/object.h" 17#include "core/hle/kernel/object.h"
16#include "core/hle/kernel/synchronization_object.h" 18#include "core/hle/kernel/synchronization_object.h"
17#include "core/hle/result.h" 19#include "core/hle/result.h"
@@ -27,10 +29,10 @@ class System;
27 29
28namespace Kernel { 30namespace Kernel {
29 31
30class GlobalScheduler; 32class GlobalSchedulerContext;
31class KernelCore; 33class KernelCore;
32class Process; 34class Process;
33class Scheduler; 35class KScheduler;
34 36
35enum ThreadPriority : u32 { 37enum ThreadPriority : u32 {
36 THREADPRIO_HIGHEST = 0, ///< Highest thread priority 38 THREADPRIO_HIGHEST = 0, ///< Highest thread priority
@@ -72,7 +74,6 @@ enum ThreadProcessorId : s32 {
72}; 74};
73 75
74enum class ThreadStatus { 76enum class ThreadStatus {
75 Running, ///< Currently running
76 Ready, ///< Ready to run 77 Ready, ///< Ready to run
77 Paused, ///< Paused by SetThreadActivity or debug 78 Paused, ///< Paused by SetThreadActivity or debug
78 WaitHLEEvent, ///< Waiting for hle event to finish 79 WaitHLEEvent, ///< Waiting for hle event to finish
@@ -248,10 +249,6 @@ public:
248 249
249 void SetSynchronizationResults(SynchronizationObject* object, ResultCode result); 250 void SetSynchronizationResults(SynchronizationObject* object, ResultCode result);
250 251
251 Core::ARM_Interface& ArmInterface();
252
253 const Core::ARM_Interface& ArmInterface() const;
254
255 SynchronizationObject* GetSignalingObject() const { 252 SynchronizationObject* GetSignalingObject() const {
256 return signaling_object; 253 return signaling_object;
257 } 254 }
@@ -350,8 +347,12 @@ public:
350 347
351 void SetStatus(ThreadStatus new_status); 348 void SetStatus(ThreadStatus new_status);
352 349
353 u64 GetLastRunningTicks() const { 350 s64 GetLastScheduledTick() const {
354 return last_running_ticks; 351 return this->last_scheduled_tick;
352 }
353
354 void SetLastScheduledTick(s64 tick) {
355 this->last_scheduled_tick = tick;
355 } 356 }
356 357
357 u64 GetTotalCPUTimeTicks() const { 358 u64 GetTotalCPUTimeTicks() const {
@@ -366,10 +367,18 @@ public:
366 return processor_id; 367 return processor_id;
367 } 368 }
368 369
370 s32 GetActiveCore() const {
371 return GetProcessorID();
372 }
373
369 void SetProcessorID(s32 new_core) { 374 void SetProcessorID(s32 new_core) {
370 processor_id = new_core; 375 processor_id = new_core;
371 } 376 }
372 377
378 void SetActiveCore(s32 new_core) {
379 processor_id = new_core;
380 }
381
373 Process* GetOwnerProcess() { 382 Process* GetOwnerProcess() {
374 return owner_process; 383 return owner_process;
375 } 384 }
@@ -474,7 +483,7 @@ public:
474 return ideal_core; 483 return ideal_core;
475 } 484 }
476 485
477 u64 GetAffinityMask() const { 486 const KAffinityMask& GetAffinityMask() const {
478 return affinity_mask; 487 return affinity_mask;
479 } 488 }
480 489
@@ -483,21 +492,12 @@ public:
483 /// Sleeps this thread for the given amount of nanoseconds. 492 /// Sleeps this thread for the given amount of nanoseconds.
484 ResultCode Sleep(s64 nanoseconds); 493 ResultCode Sleep(s64 nanoseconds);
485 494
486 /// Yields this thread without rebalancing loads. 495 s64 GetYieldScheduleCount() const {
487 std::pair<ResultCode, bool> YieldSimple(); 496 return this->schedule_count;
488
489 /// Yields this thread and does a load rebalancing.
490 std::pair<ResultCode, bool> YieldAndBalanceLoad();
491
492 /// Yields this thread and if the core is left idle, loads are rebalanced
493 std::pair<ResultCode, bool> YieldAndWaitForLoadBalancing();
494
495 void IncrementYieldCount() {
496 yield_count++;
497 } 497 }
498 498
499 u64 GetYieldCount() const { 499 void SetYieldScheduleCount(s64 count) {
500 return yield_count; 500 this->schedule_count = count;
501 } 501 }
502 502
503 ThreadSchedStatus GetSchedulingStatus() const { 503 ThreadSchedStatus GetSchedulingStatus() const {
@@ -573,9 +573,59 @@ public:
573 return has_exited; 573 return has_exited;
574 } 574 }
575 575
576 class QueueEntry {
577 public:
578 constexpr QueueEntry() = default;
579
580 constexpr void Initialize() {
581 this->prev = nullptr;
582 this->next = nullptr;
583 }
584
585 constexpr Thread* GetPrev() const {
586 return this->prev;
587 }
588 constexpr Thread* GetNext() const {
589 return this->next;
590 }
591 constexpr void SetPrev(Thread* thread) {
592 this->prev = thread;
593 }
594 constexpr void SetNext(Thread* thread) {
595 this->next = thread;
596 }
597
598 private:
599 Thread* prev{};
600 Thread* next{};
601 };
602
603 QueueEntry& GetPriorityQueueEntry(s32 core) {
604 return this->per_core_priority_queue_entry[core];
605 }
606
607 const QueueEntry& GetPriorityQueueEntry(s32 core) const {
608 return this->per_core_priority_queue_entry[core];
609 }
610
611 s32 GetDisableDispatchCount() const {
612 return disable_count;
613 }
614
615 void DisableDispatch() {
616 ASSERT(GetDisableDispatchCount() >= 0);
617 disable_count++;
618 }
619
620 void EnableDispatch() {
621 ASSERT(GetDisableDispatchCount() > 0);
622 disable_count--;
623 }
624
576private: 625private:
577 friend class GlobalScheduler; 626 friend class GlobalSchedulerContext;
578 friend class Scheduler; 627 friend class KScheduler;
628 friend class Process;
579 629
580 void SetSchedulingStatus(ThreadSchedStatus new_status); 630 void SetSchedulingStatus(ThreadSchedStatus new_status);
581 void AddSchedulingFlag(ThreadSchedFlags flag); 631 void AddSchedulingFlag(ThreadSchedFlags flag);
@@ -586,15 +636,16 @@ private:
586 Common::SpinLock context_guard{}; 636 Common::SpinLock context_guard{};
587 ThreadContext32 context_32{}; 637 ThreadContext32 context_32{};
588 ThreadContext64 context_64{}; 638 ThreadContext64 context_64{};
589 std::unique_ptr<Core::ARM_Interface> arm_interface{};
590 std::shared_ptr<Common::Fiber> host_context{}; 639 std::shared_ptr<Common::Fiber> host_context{};
591 640
592 u64 thread_id = 0;
593
594 ThreadStatus status = ThreadStatus::Dormant; 641 ThreadStatus status = ThreadStatus::Dormant;
642 u32 scheduling_state = 0;
643
644 u64 thread_id = 0;
595 645
596 VAddr entry_point = 0; 646 VAddr entry_point = 0;
597 VAddr stack_top = 0; 647 VAddr stack_top = 0;
648 std::atomic_int disable_count = 0;
598 649
599 ThreadType type; 650 ThreadType type;
600 651
@@ -608,9 +659,8 @@ private:
608 u32 current_priority = 0; 659 u32 current_priority = 0;
609 660
610 u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. 661 u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
611 u64 last_running_ticks = 0; ///< CPU tick when thread was last running 662 s64 schedule_count{};
612 u64 yield_count = 0; ///< Number of redundant yields carried by this thread. 663 s64 last_scheduled_tick{};
613 ///< a redundant yield is one where no scheduling is changed
614 664
615 s32 processor_id = 0; 665 s32 processor_id = 0;
616 666
@@ -652,16 +702,16 @@ private:
652 Handle hle_time_event; 702 Handle hle_time_event;
653 SynchronizationObject* hle_object; 703 SynchronizationObject* hle_object;
654 704
655 Scheduler* scheduler = nullptr; 705 KScheduler* scheduler = nullptr;
706
707 std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
656 708
657 u32 ideal_core{0xFFFFFFFF}; 709 u32 ideal_core{0xFFFFFFFF};
658 u64 affinity_mask{0x1}; 710 KAffinityMask affinity_mask{};
659 711
660 s32 ideal_core_override = -1; 712 s32 ideal_core_override = -1;
661 u64 affinity_mask_override = 0x1;
662 u32 affinity_override_count = 0; 713 u32 affinity_override_count = 0;
663 714
664 u32 scheduling_state = 0;
665 u32 pausing_state = 0; 715 u32 pausing_state = 0;
666 bool is_running = false; 716 bool is_running = false;
667 bool is_waiting_on_sync = false; 717 bool is_waiting_on_sync = false;
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
index 95f2446c9..79628e2b4 100644
--- a/src/core/hle/kernel/time_manager.cpp
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -7,8 +7,8 @@
7#include "core/core_timing.h" 7#include "core/core_timing.h"
8#include "core/core_timing_util.h" 8#include "core/core_timing_util.h"
9#include "core/hle/kernel/handle_table.h" 9#include "core/hle/kernel/handle_table.h"
10#include "core/hle/kernel/k_scheduler.h"
10#include "core/hle/kernel/kernel.h" 11#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/scheduler.h"
12#include "core/hle/kernel/thread.h" 12#include "core/hle/kernel/thread.h"
13#include "core/hle/kernel/time_manager.h" 13#include "core/hle/kernel/time_manager.h"
14 14
@@ -18,17 +18,27 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
18 time_manager_event_type = Core::Timing::CreateEvent( 18 time_manager_event_type = Core::Timing::CreateEvent(
19 "Kernel::TimeManagerCallback", 19 "Kernel::TimeManagerCallback",
20 [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { 20 [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
21 const SchedulerLock lock(system.Kernel()); 21 const KScopedSchedulerLock lock(system.Kernel());
22 const auto proper_handle = static_cast<Handle>(thread_handle); 22 const auto proper_handle = static_cast<Handle>(thread_handle);
23 if (cancelled_events[proper_handle]) { 23
24 return; 24 std::shared_ptr<Thread> thread;
25 {
26 std::lock_guard lock{mutex};
27 if (cancelled_events[proper_handle]) {
28 return;
29 }
30 thread = system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
31 }
32
33 if (thread) {
34 // Thread can be null if process has exited
35 thread->OnWakeUp();
25 } 36 }
26 auto thread = this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
27 thread->OnWakeUp();
28 }); 37 });
29} 38}
30 39
31void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds) { 40void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 nanoseconds) {
41 std::lock_guard lock{mutex};
32 event_handle = timetask->GetGlobalHandle(); 42 event_handle = timetask->GetGlobalHandle();
33 if (nanoseconds > 0) { 43 if (nanoseconds > 0) {
34 ASSERT(timetask); 44 ASSERT(timetask);
@@ -43,6 +53,7 @@ void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64
43} 53}
44 54
45void TimeManager::UnscheduleTimeEvent(Handle event_handle) { 55void TimeManager::UnscheduleTimeEvent(Handle event_handle) {
56 std::lock_guard lock{mutex};
46 if (event_handle == InvalidHandle) { 57 if (event_handle == InvalidHandle) {
47 return; 58 return;
48 } 59 }
@@ -51,7 +62,8 @@ void TimeManager::UnscheduleTimeEvent(Handle event_handle) {
51} 62}
52 63
53void TimeManager::CancelTimeEvent(Thread* time_task) { 64void TimeManager::CancelTimeEvent(Thread* time_task) {
54 Handle event_handle = time_task->GetGlobalHandle(); 65 std::lock_guard lock{mutex};
66 const Handle event_handle = time_task->GetGlobalHandle();
55 UnscheduleTimeEvent(event_handle); 67 UnscheduleTimeEvent(event_handle);
56} 68}
57 69
diff --git a/src/core/hle/kernel/time_manager.h b/src/core/hle/kernel/time_manager.h
index 307a18765..f39df39a0 100644
--- a/src/core/hle/kernel/time_manager.h
+++ b/src/core/hle/kernel/time_manager.h
@@ -5,6 +5,7 @@
5#pragma once 5#pragma once
6 6
7#include <memory> 7#include <memory>
8#include <mutex>
8#include <unordered_map> 9#include <unordered_map>
9 10
10#include "core/hle/kernel/object.h" 11#include "core/hle/kernel/object.h"
@@ -42,6 +43,7 @@ private:
42 Core::System& system; 43 Core::System& system;
43 std::shared_ptr<Core::Timing::EventType> time_manager_event_type; 44 std::shared_ptr<Core::Timing::EventType> time_manager_event_type;
44 std::unordered_map<Handle, bool> cancelled_events; 45 std::unordered_map<Handle, bool> cancelled_events;
46 std::mutex mutex;
45}; 47};
46 48
47} // namespace Kernel 49} // namespace Kernel