summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
authorGravatar Feng Chen2021-12-18 13:57:14 +0800
committerGravatar GitHub2021-12-18 13:57:14 +0800
commite49184e6069a9d791d2df3c1958f5c4b1187e124 (patch)
treeb776caf722e0be0e680f67b0ad0842628162ef1c /src/core/hle/kernel
parentImplement convert legacy to generic (diff)
parentMerge pull request #7570 from ameerj/favorites-expanded (diff)
downloadyuzu-e49184e6069a9d791d2df3c1958f5c4b1187e124.tar.gz
yuzu-e49184e6069a9d791d2df3c1958f5c4b1187e124.tar.xz
yuzu-e49184e6069a9d791d2df3c1958f5c4b1187e124.zip
Merge branch 'yuzu-emu:master' into convert_legacy
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/init/init_slab_setup.cpp2
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp92
-rw-r--r--src/core/hle/kernel/k_auto_object.h4
-rw-r--r--src/core/hle/kernel/k_class_token.cpp5
-rw-r--r--src/core/hle/kernel/k_code_memory.cpp146
-rw-r--r--src/core/hle/kernel/k_code_memory.h66
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp245
-rw-r--r--src/core/hle/kernel/k_condition_variable.h2
-rw-r--r--src/core/hle/kernel/k_handle_table.cpp6
-rw-r--r--src/core/hle/kernel/k_handle_table.h2
-rw-r--r--src/core/hle/kernel/k_light_condition_variable.cpp80
-rw-r--r--src/core/hle/kernel/k_light_condition_variable.h58
-rw-r--r--src/core/hle/kernel/k_light_lock.cpp72
-rw-r--r--src/core/hle/kernel/k_light_lock.h2
-rw-r--r--src/core/hle/kernel/k_memory_block.h20
-rw-r--r--src/core/hle/kernel/k_page_linked_list.h4
-rw-r--r--src/core/hle/kernel/k_page_table.cpp123
-rw-r--r--src/core/hle/kernel/k_page_table.h10
-rw-r--r--src/core/hle/kernel/k_process.cpp30
-rw-r--r--src/core/hle/kernel/k_process.h1
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp101
-rw-r--r--src/core/hle/kernel/k_scheduler.h2
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h10
-rw-r--r--src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h1
-rw-r--r--src/core/hle/kernel/k_server_session.cpp3
-rw-r--r--src/core/hle/kernel/k_synchronization_object.cpp151
-rw-r--r--src/core/hle/kernel/k_synchronization_object.h32
-rw-r--r--src/core/hle/kernel/k_thread.cpp246
-rw-r--r--src/core/hle/kernel/k_thread.h72
-rw-r--r--src/core/hle/kernel/k_thread_queue.cpp49
-rw-r--r--src/core/hle/kernel/k_thread_queue.h74
-rw-r--r--src/core/hle/kernel/kernel.cpp103
-rw-r--r--src/core/hle/kernel/kernel.h9
-rw-r--r--src/core/hle/kernel/service_thread.cpp33
-rw-r--r--src/core/hle/kernel/svc.cpp309
-rw-r--r--src/core/hle/kernel/svc_wrap.h27
-rw-r--r--src/core/hle/kernel/time_manager.cpp6
37 files changed, 1564 insertions, 634 deletions
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp
index 8ff0f695d..36fc0944a 100644
--- a/src/core/hle/kernel/init/init_slab_setup.cpp
+++ b/src/core/hle/kernel/init/init_slab_setup.cpp
@@ -9,6 +9,7 @@
9#include "core/core.h" 9#include "core/core.h"
10#include "core/hardware_properties.h" 10#include "core/hardware_properties.h"
11#include "core/hle/kernel/init/init_slab_setup.h" 11#include "core/hle/kernel/init/init_slab_setup.h"
12#include "core/hle/kernel/k_code_memory.h"
12#include "core/hle/kernel/k_event.h" 13#include "core/hle/kernel/k_event.h"
13#include "core/hle/kernel/k_memory_layout.h" 14#include "core/hle/kernel/k_memory_layout.h"
14#include "core/hle/kernel/k_memory_manager.h" 15#include "core/hle/kernel/k_memory_manager.h"
@@ -32,6 +33,7 @@ namespace Kernel::Init {
32 HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \ 33 HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \
33 HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \ 34 HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \
34 HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \ 35 HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \
36 HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ##__VA_ARGS__) \
35 HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__) \ 37 HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__) \
36 HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) 38 HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__)
37 39
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
index 1b429bc1e..783c69858 100644
--- a/src/core/hle/kernel/k_address_arbiter.cpp
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -8,6 +8,7 @@
8#include "core/hle/kernel/k_scheduler.h" 8#include "core/hle/kernel/k_scheduler.h"
9#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 9#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
10#include "core/hle/kernel/k_thread.h" 10#include "core/hle/kernel/k_thread.h"
11#include "core/hle/kernel/k_thread_queue.h"
11#include "core/hle/kernel/kernel.h" 12#include "core/hle/kernel/kernel.h"
12#include "core/hle/kernel/svc_results.h" 13#include "core/hle/kernel/svc_results.h"
13#include "core/hle/kernel/time_manager.h" 14#include "core/hle/kernel/time_manager.h"
@@ -28,7 +29,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
28 29
29bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { 30bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
30 auto& monitor = system.Monitor(); 31 auto& monitor = system.Monitor();
31 const auto current_core = system.CurrentCoreIndex(); 32 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
32 33
33 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. 34 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
34 // TODO(bunnei): We should call CanAccessAtomic(..) here. 35 // TODO(bunnei): We should call CanAccessAtomic(..) here.
@@ -58,7 +59,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
58 59
59bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { 60bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
60 auto& monitor = system.Monitor(); 61 auto& monitor = system.Monitor();
61 const auto current_core = system.CurrentCoreIndex(); 62 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
62 63
63 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. 64 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
64 // TODO(bunnei): We should call CanAccessAtomic(..) here. 65 // TODO(bunnei): We should call CanAccessAtomic(..) here.
@@ -85,6 +86,27 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32
85 return true; 86 return true;
86} 87}
87 88
89class ThreadQueueImplForKAddressArbiter final : public KThreadQueue {
90public:
91 explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t)
92 : KThreadQueue(kernel_), m_tree(t) {}
93
94 void CancelWait(KThread* waiting_thread, ResultCode wait_result,
95 bool cancel_timer_task) override {
96 // If the thread is waiting on an address arbiter, remove it from the tree.
97 if (waiting_thread->IsWaitingForAddressArbiter()) {
98 m_tree->erase(m_tree->iterator_to(*waiting_thread));
99 waiting_thread->ClearAddressArbiter();
100 }
101
102 // Invoke the base cancel wait handler.
103 KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
104 }
105
106private:
107 KAddressArbiter::ThreadTree* m_tree;
108};
109
88} // namespace 110} // namespace
89 111
90ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) { 112ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
@@ -96,14 +118,14 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
96 auto it = thread_tree.nfind_light({addr, -1}); 118 auto it = thread_tree.nfind_light({addr, -1});
97 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && 119 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
98 (it->GetAddressArbiterKey() == addr)) { 120 (it->GetAddressArbiterKey() == addr)) {
121 // End the thread's wait.
99 KThread* target_thread = std::addressof(*it); 122 KThread* target_thread = std::addressof(*it);
100 target_thread->SetSyncedObject(nullptr, ResultSuccess); 123 target_thread->EndWait(ResultSuccess);
101 124
102 ASSERT(target_thread->IsWaitingForAddressArbiter()); 125 ASSERT(target_thread->IsWaitingForAddressArbiter());
103 target_thread->Wakeup(); 126 target_thread->ClearAddressArbiter();
104 127
105 it = thread_tree.erase(it); 128 it = thread_tree.erase(it);
106 target_thread->ClearAddressArbiter();
107 ++num_waiters; 129 ++num_waiters;
108 } 130 }
109 } 131 }
@@ -129,14 +151,14 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32
129 auto it = thread_tree.nfind_light({addr, -1}); 151 auto it = thread_tree.nfind_light({addr, -1});
130 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && 152 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
131 (it->GetAddressArbiterKey() == addr)) { 153 (it->GetAddressArbiterKey() == addr)) {
154 // End the thread's wait.
132 KThread* target_thread = std::addressof(*it); 155 KThread* target_thread = std::addressof(*it);
133 target_thread->SetSyncedObject(nullptr, ResultSuccess); 156 target_thread->EndWait(ResultSuccess);
134 157
135 ASSERT(target_thread->IsWaitingForAddressArbiter()); 158 ASSERT(target_thread->IsWaitingForAddressArbiter());
136 target_thread->Wakeup(); 159 target_thread->ClearAddressArbiter();
137 160
138 it = thread_tree.erase(it); 161 it = thread_tree.erase(it);
139 target_thread->ClearAddressArbiter();
140 ++num_waiters; 162 ++num_waiters;
141 } 163 }
142 } 164 }
@@ -197,14 +219,14 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
197 219
198 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && 220 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
199 (it->GetAddressArbiterKey() == addr)) { 221 (it->GetAddressArbiterKey() == addr)) {
222 // End the thread's wait.
200 KThread* target_thread = std::addressof(*it); 223 KThread* target_thread = std::addressof(*it);
201 target_thread->SetSyncedObject(nullptr, ResultSuccess); 224 target_thread->EndWait(ResultSuccess);
202 225
203 ASSERT(target_thread->IsWaitingForAddressArbiter()); 226 ASSERT(target_thread->IsWaitingForAddressArbiter());
204 target_thread->Wakeup(); 227 target_thread->ClearAddressArbiter();
205 228
206 it = thread_tree.erase(it); 229 it = thread_tree.erase(it);
207 target_thread->ClearAddressArbiter();
208 ++num_waiters; 230 ++num_waiters;
209 } 231 }
210 } 232 }
@@ -214,6 +236,7 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
214ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) { 236ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
215 // Prepare to wait. 237 // Prepare to wait.
216 KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); 238 KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
239 ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
217 240
218 { 241 {
219 KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; 242 KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
@@ -224,9 +247,6 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
224 return ResultTerminationRequested; 247 return ResultTerminationRequested;
225 } 248 }
226 249
227 // Set the synced object.
228 cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
229
230 // Read the value from userspace. 250 // Read the value from userspace.
231 s32 user_value{}; 251 s32 user_value{};
232 bool succeeded{}; 252 bool succeeded{};
@@ -256,31 +276,20 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
256 // Set the arbiter. 276 // Set the arbiter.
257 cur_thread->SetAddressArbiter(&thread_tree, addr); 277 cur_thread->SetAddressArbiter(&thread_tree, addr);
258 thread_tree.insert(*cur_thread); 278 thread_tree.insert(*cur_thread);
259 cur_thread->SetState(ThreadState::Waiting);
260 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
261 }
262
263 // Cancel the timer wait.
264 kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
265 279
266 // Remove from the address arbiter. 280 // Wait for the thread to finish.
267 { 281 cur_thread->BeginWait(std::addressof(wait_queue));
268 KScopedSchedulerLock sl(kernel); 282 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
269
270 if (cur_thread->IsWaitingForAddressArbiter()) {
271 thread_tree.erase(thread_tree.iterator_to(*cur_thread));
272 cur_thread->ClearAddressArbiter();
273 }
274 } 283 }
275 284
276 // Get the result. 285 // Get the result.
277 KSynchronizationObject* dummy{}; 286 return cur_thread->GetWaitResult();
278 return cur_thread->GetWaitResult(&dummy);
279} 287}
280 288
281ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { 289ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
282 // Prepare to wait. 290 // Prepare to wait.
283 KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); 291 KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
292 ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
284 293
285 { 294 {
286 KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; 295 KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
@@ -291,9 +300,6 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
291 return ResultTerminationRequested; 300 return ResultTerminationRequested;
292 } 301 }
293 302
294 // Set the synced object.
295 cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
296
297 // Read the value from userspace. 303 // Read the value from userspace.
298 s32 user_value{}; 304 s32 user_value{};
299 if (!ReadFromUser(system, &user_value, addr)) { 305 if (!ReadFromUser(system, &user_value, addr)) {
@@ -316,26 +322,14 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
316 // Set the arbiter. 322 // Set the arbiter.
317 cur_thread->SetAddressArbiter(&thread_tree, addr); 323 cur_thread->SetAddressArbiter(&thread_tree, addr);
318 thread_tree.insert(*cur_thread); 324 thread_tree.insert(*cur_thread);
319 cur_thread->SetState(ThreadState::Waiting);
320 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
321 }
322
323 // Cancel the timer wait.
324 kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
325 325
326 // Remove from the address arbiter. 326 // Wait for the thread to finish.
327 { 327 cur_thread->BeginWait(std::addressof(wait_queue));
328 KScopedSchedulerLock sl(kernel); 328 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
329
330 if (cur_thread->IsWaitingForAddressArbiter()) {
331 thread_tree.erase(thread_tree.iterator_to(*cur_thread));
332 cur_thread->ClearAddressArbiter();
333 }
334 } 329 }
335 330
336 // Get the result. 331 // Get the result.
337 KSynchronizationObject* dummy{}; 332 return cur_thread->GetWaitResult();
338 return cur_thread->GetWaitResult(&dummy);
339} 333}
340 334
341} // namespace Kernel 335} // namespace Kernel
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h
index e4fcdbc67..165b76747 100644
--- a/src/core/hle/kernel/k_auto_object.h
+++ b/src/core/hle/kernel/k_auto_object.h
@@ -170,6 +170,10 @@ public:
170 } 170 }
171 } 171 }
172 172
173 const std::string& GetName() const {
174 return name;
175 }
176
173private: 177private:
174 void RegisterWithKernel(); 178 void RegisterWithKernel();
175 void UnregisterWithKernel(); 179 void UnregisterWithKernel();
diff --git a/src/core/hle/kernel/k_class_token.cpp b/src/core/hle/kernel/k_class_token.cpp
index 0be0027be..21e2fe494 100644
--- a/src/core/hle/kernel/k_class_token.cpp
+++ b/src/core/hle/kernel/k_class_token.cpp
@@ -6,6 +6,7 @@
6#include "core/hle/kernel/k_class_token.h" 6#include "core/hle/kernel/k_class_token.h"
7#include "core/hle/kernel/k_client_port.h" 7#include "core/hle/kernel/k_client_port.h"
8#include "core/hle/kernel/k_client_session.h" 8#include "core/hle/kernel/k_client_session.h"
9#include "core/hle/kernel/k_code_memory.h"
9#include "core/hle/kernel/k_event.h" 10#include "core/hle/kernel/k_event.h"
10#include "core/hle/kernel/k_port.h" 11#include "core/hle/kernel/k_port.h"
11#include "core/hle/kernel/k_process.h" 12#include "core/hle/kernel/k_process.h"
@@ -48,7 +49,7 @@ static_assert(ClassToken<KWritableEvent> == 0b10001001'00000000);
48static_assert(ClassToken<KTransferMemory> == 0b10010001'00000000); 49static_assert(ClassToken<KTransferMemory> == 0b10010001'00000000);
49// static_assert(ClassToken<KDeviceAddressSpace> == 0b01100001'00000000); 50// static_assert(ClassToken<KDeviceAddressSpace> == 0b01100001'00000000);
50// static_assert(ClassToken<KSessionRequest> == 0b10100001'00000000); 51// static_assert(ClassToken<KSessionRequest> == 0b10100001'00000000);
51// static_assert(ClassToken<KCodeMemory> == 0b11000001'00000000); 52static_assert(ClassToken<KCodeMemory> == 0b11000001'00000000);
52 53
53// Ensure that the token hierarchy is correct. 54// Ensure that the token hierarchy is correct.
54 55
@@ -79,7 +80,7 @@ static_assert(ClassToken<KWritableEvent> == ((0b10001001 << 8) | ClassToken<KAut
79static_assert(ClassToken<KTransferMemory> == ((0b10010001 << 8) | ClassToken<KAutoObject>)); 80static_assert(ClassToken<KTransferMemory> == ((0b10010001 << 8) | ClassToken<KAutoObject>));
80// static_assert(ClassToken<KDeviceAddressSpace> == ((0b01100001 << 8) | ClassToken<KAutoObject>)); 81// static_assert(ClassToken<KDeviceAddressSpace> == ((0b01100001 << 8) | ClassToken<KAutoObject>));
81// static_assert(ClassToken<KSessionRequest> == ((0b10100001 << 8) | ClassToken<KAutoObject>)); 82// static_assert(ClassToken<KSessionRequest> == ((0b10100001 << 8) | ClassToken<KAutoObject>));
82// static_assert(ClassToken<KCodeMemory> == ((0b11000001 << 8) | ClassToken<KAutoObject>)); 83static_assert(ClassToken<KCodeMemory> == ((0b11000001 << 8) | ClassToken<KAutoObject>));
83 84
84// Ensure that the token hierarchy reflects the class hierarchy. 85// Ensure that the token hierarchy reflects the class hierarchy.
85 86
diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp
new file mode 100644
index 000000000..d69f7ffb7
--- /dev/null
+++ b/src/core/hle/kernel/k_code_memory.cpp
@@ -0,0 +1,146 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/common_types.h"
6#include "core/device_memory.h"
7#include "core/hle/kernel/k_auto_object.h"
8#include "core/hle/kernel/k_code_memory.h"
9#include "core/hle/kernel/k_light_lock.h"
10#include "core/hle/kernel/k_memory_block.h"
11#include "core/hle/kernel/k_page_linked_list.h"
12#include "core/hle/kernel/k_page_table.h"
13#include "core/hle/kernel/k_process.h"
14#include "core/hle/kernel/slab_helpers.h"
15#include "core/hle/kernel/svc_types.h"
16#include "core/hle/result.h"
17
18namespace Kernel {
19
20KCodeMemory::KCodeMemory(KernelCore& kernel_)
21 : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_lock(kernel_) {}
22
23ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
24 // Set members.
25 m_owner = kernel.CurrentProcess();
26
27 // Get the owner page table.
28 auto& page_table = m_owner->PageTable();
29
30 // Construct the page group.
31 KMemoryInfo kBlockInfo = page_table.QueryInfo(addr);
32 m_page_group = KPageLinkedList(kBlockInfo.GetAddress(), kBlockInfo.GetNumPages());
33
34 // Lock the memory.
35 R_TRY(page_table.LockForCodeMemory(addr, size))
36
37 // Clear the memory.
38 for (const auto& block : m_page_group.Nodes()) {
39 std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
40 }
41
42 // Set remaining tracking members.
43 m_address = addr;
44 m_is_initialized = true;
45 m_is_owner_mapped = false;
46 m_is_mapped = false;
47
48 // We succeeded.
49 return ResultSuccess;
50}
51
52void KCodeMemory::Finalize() {
53 // Unlock.
54 if (!m_is_mapped && !m_is_owner_mapped) {
55 const size_t size = m_page_group.GetNumPages() * PageSize;
56 m_owner->PageTable().UnlockForCodeMemory(m_address, size);
57 }
58}
59
60ResultCode KCodeMemory::Map(VAddr address, size_t size) {
61 // Validate the size.
62 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
63
64 // Lock ourselves.
65 KScopedLightLock lk(m_lock);
66
67 // Ensure we're not already mapped.
68 R_UNLESS(!m_is_mapped, ResultInvalidState);
69
70 // Map the memory.
71 R_TRY(kernel.CurrentProcess()->PageTable().MapPages(
72 address, m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
73
74 // Mark ourselves as mapped.
75 m_is_mapped = true;
76
77 return ResultSuccess;
78}
79
80ResultCode KCodeMemory::Unmap(VAddr address, size_t size) {
81 // Validate the size.
82 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
83
84 // Lock ourselves.
85 KScopedLightLock lk(m_lock);
86
87 // Unmap the memory.
88 R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, m_page_group,
89 KMemoryState::CodeOut));
90
91 // Mark ourselves as unmapped.
92 m_is_mapped = false;
93
94 return ResultSuccess;
95}
96
97ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) {
98 // Validate the size.
99 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
100
101 // Lock ourselves.
102 KScopedLightLock lk(m_lock);
103
104 // Ensure we're not already mapped.
105 R_UNLESS(!m_is_owner_mapped, ResultInvalidState);
106
107 // Convert the memory permission.
108 KMemoryPermission k_perm{};
109 switch (perm) {
110 case Svc::MemoryPermission::Read:
111 k_perm = KMemoryPermission::UserRead;
112 break;
113 case Svc::MemoryPermission::ReadExecute:
114 k_perm = KMemoryPermission::UserReadExecute;
115 break;
116 default:
117 break;
118 }
119
120 // Map the memory.
121 R_TRY(
122 m_owner->PageTable().MapPages(address, m_page_group, KMemoryState::GeneratedCode, k_perm));
123
124 // Mark ourselves as mapped.
125 m_is_owner_mapped = true;
126
127 return ResultSuccess;
128}
129
130ResultCode KCodeMemory::UnmapFromOwner(VAddr address, size_t size) {
131 // Validate the size.
132 R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize);
133
134 // Lock ourselves.
135 KScopedLightLock lk(m_lock);
136
137 // Unmap the memory.
138 R_TRY(m_owner->PageTable().UnmapPages(address, m_page_group, KMemoryState::GeneratedCode));
139
140 // Mark ourselves as unmapped.
141 m_is_owner_mapped = false;
142
143 return ResultSuccess;
144}
145
146} // namespace Kernel \ No newline at end of file
diff --git a/src/core/hle/kernel/k_code_memory.h b/src/core/hle/kernel/k_code_memory.h
new file mode 100644
index 000000000..e0ba19a53
--- /dev/null
+++ b/src/core/hle/kernel/k_code_memory.h
@@ -0,0 +1,66 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/common_types.h"
8#include "core/device_memory.h"
9#include "core/hle/kernel/k_auto_object.h"
10#include "core/hle/kernel/k_light_lock.h"
11#include "core/hle/kernel/k_page_linked_list.h"
12#include "core/hle/kernel/k_process.h"
13#include "core/hle/kernel/slab_helpers.h"
14#include "core/hle/kernel/svc_types.h"
15#include "core/hle/result.h"
16
17namespace Kernel {
18
19enum class CodeMemoryOperation : u32 {
20 Map = 0,
21 MapToOwner = 1,
22 Unmap = 2,
23 UnmapFromOwner = 3,
24};
25
26class KCodeMemory final
27 : public KAutoObjectWithSlabHeapAndContainer<KCodeMemory, KAutoObjectWithList> {
28 KERNEL_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject);
29
30public:
31 explicit KCodeMemory(KernelCore& kernel_);
32
33 ResultCode Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
34 void Finalize();
35
36 ResultCode Map(VAddr address, size_t size);
37 ResultCode Unmap(VAddr address, size_t size);
38 ResultCode MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm);
39 ResultCode UnmapFromOwner(VAddr address, size_t size);
40
41 bool IsInitialized() const {
42 return m_is_initialized;
43 }
44 static void PostDestroy([[maybe_unused]] uintptr_t arg) {}
45
46 KProcess* GetOwner() const {
47 return m_owner;
48 }
49 VAddr GetSourceAddress() const {
50 return m_address;
51 }
52 size_t GetSize() const {
53 return m_is_initialized ? m_page_group.GetNumPages() * PageSize : 0;
54 }
55
56private:
57 KPageLinkedList m_page_group{};
58 KProcess* m_owner{};
59 VAddr m_address{};
60 KLightLock m_lock;
61 bool m_is_initialized{};
62 bool m_is_owner_mapped{};
63 bool m_is_mapped{};
64};
65
66} // namespace Kernel
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index 7fa9b8cc3..aadcc297a 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -11,6 +11,7 @@
11#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 11#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
12#include "core/hle/kernel/k_synchronization_object.h" 12#include "core/hle/kernel/k_synchronization_object.h"
13#include "core/hle/kernel/k_thread.h" 13#include "core/hle/kernel/k_thread.h"
14#include "core/hle/kernel/k_thread_queue.h"
14#include "core/hle/kernel/kernel.h" 15#include "core/hle/kernel/kernel.h"
15#include "core/hle/kernel/svc_common.h" 16#include "core/hle/kernel/svc_common.h"
16#include "core/hle/kernel/svc_results.h" 17#include "core/hle/kernel/svc_results.h"
@@ -33,7 +34,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
33bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, 34bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
34 u32 new_orr_mask) { 35 u32 new_orr_mask) {
35 auto& monitor = system.Monitor(); 36 auto& monitor = system.Monitor();
36 const auto current_core = system.CurrentCoreIndex(); 37 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
37 38
38 // Load the value from the address. 39 // Load the value from the address.
39 const auto expected = monitor.ExclusiveRead32(current_core, address); 40 const auto expected = monitor.ExclusiveRead32(current_core, address);
@@ -57,6 +58,48 @@ bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero
57 return true; 58 return true;
58} 59}
59 60
61class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue {
62public:
63 explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_)
64 : KThreadQueue(kernel_) {}
65
66 void CancelWait(KThread* waiting_thread, ResultCode wait_result,
67 bool cancel_timer_task) override {
68 // Remove the thread as a waiter from its owner.
69 waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
70
71 // Invoke the base cancel wait handler.
72 KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
73 }
74};
75
76class ThreadQueueImplForKConditionVariableWaitConditionVariable final : public KThreadQueue {
77private:
78 KConditionVariable::ThreadTree* m_tree;
79
80public:
81 explicit ThreadQueueImplForKConditionVariableWaitConditionVariable(
82 KernelCore& kernel_, KConditionVariable::ThreadTree* t)
83 : KThreadQueue(kernel_), m_tree(t) {}
84
85 void CancelWait(KThread* waiting_thread, ResultCode wait_result,
86 bool cancel_timer_task) override {
87 // Remove the thread as a waiter from its owner.
88 if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
89 owner->RemoveWaiter(waiting_thread);
90 }
91
92 // If the thread is waiting on a condvar, remove it from the tree.
93 if (waiting_thread->IsWaitingForConditionVariable()) {
94 m_tree->erase(m_tree->iterator_to(*waiting_thread));
95 waiting_thread->ClearConditionVariable();
96 }
97
98 // Invoke the base cancel wait handler.
99 KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
100 }
101};
102
60} // namespace 103} // namespace
61 104
62KConditionVariable::KConditionVariable(Core::System& system_) 105KConditionVariable::KConditionVariable(Core::System& system_)
@@ -78,84 +121,77 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
78 121
79 // Determine the next tag. 122 // Determine the next tag.
80 u32 next_value{}; 123 u32 next_value{};
81 if (next_owner_thread) { 124 if (next_owner_thread != nullptr) {
82 next_value = next_owner_thread->GetAddressKeyValue(); 125 next_value = next_owner_thread->GetAddressKeyValue();
83 if (num_waiters > 1) { 126 if (num_waiters > 1) {
84 next_value |= Svc::HandleWaitMask; 127 next_value |= Svc::HandleWaitMask;
85 } 128 }
86 129
87 next_owner_thread->SetSyncedObject(nullptr, ResultSuccess); 130 // Write the value to userspace.
88 next_owner_thread->Wakeup(); 131 ResultCode result{ResultSuccess};
89 } 132 if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
90 133 result = ResultSuccess;
91 // Write the value to userspace. 134 } else {
92 if (!WriteToUser(system, addr, std::addressof(next_value))) { 135 result = ResultInvalidCurrentMemory;
93 if (next_owner_thread) {
94 next_owner_thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory);
95 } 136 }
96 137
97 return ResultInvalidCurrentMemory; 138 // Signal the next owner thread.
139 next_owner_thread->EndWait(result);
140 return result;
141 } else {
142 // Just write the value to userspace.
143 R_UNLESS(WriteToUser(system, addr, std::addressof(next_value)),
144 ResultInvalidCurrentMemory);
145
146 return ResultSuccess;
98 } 147 }
99 } 148 }
100
101 return ResultSuccess;
102} 149}
103 150
104ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) { 151ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
105 KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); 152 KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
153 ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
106 154
107 // Wait for the address. 155 // Wait for the address.
156 KThread* owner_thread{};
108 { 157 {
109 KScopedAutoObject<KThread> owner_thread; 158 KScopedSchedulerLock sl(kernel);
110 ASSERT(owner_thread.IsNull());
111 {
112 KScopedSchedulerLock sl(kernel);
113 cur_thread->SetSyncedObject(nullptr, ResultSuccess);
114 159
115 // Check if the thread should terminate. 160 // Check if the thread should terminate.
116 R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested); 161 R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
117 162
118 { 163 // Read the tag from userspace.
119 // Read the tag from userspace. 164 u32 test_tag{};
120 u32 test_tag{}; 165 R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), ResultInvalidCurrentMemory);
121 R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr),
122 ResultInvalidCurrentMemory);
123
124 // If the tag isn't the handle (with wait mask), we're done.
125 R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), ResultSuccess);
126
127 // Get the lock owner thread.
128 owner_thread =
129 kernel.CurrentProcess()->GetHandleTable().GetObjectWithoutPseudoHandle<KThread>(
130 handle);
131 R_UNLESS(owner_thread.IsNotNull(), ResultInvalidHandle);
132
133 // Update the lock.
134 cur_thread->SetAddressKey(addr, value);
135 owner_thread->AddWaiter(cur_thread);
136 cur_thread->SetState(ThreadState::Waiting);
137 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
138 cur_thread->SetMutexWaitAddressForDebugging(addr);
139 }
140 }
141 ASSERT(owner_thread.IsNotNull());
142 }
143 166
144 // Remove the thread as a waiter from the lock owner. 167 // If the tag isn't the handle (with wait mask), we're done.
145 { 168 R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
146 KScopedSchedulerLock sl(kernel); 169
147 KThread* owner_thread = cur_thread->GetLockOwner(); 170 // Get the lock owner thread.
148 if (owner_thread != nullptr) { 171 owner_thread = kernel.CurrentProcess()
149 owner_thread->RemoveWaiter(cur_thread); 172 ->GetHandleTable()
150 } 173 .GetObjectWithoutPseudoHandle<KThread>(handle)
174 .ReleasePointerUnsafe();
175 R_UNLESS(owner_thread != nullptr, ResultInvalidHandle);
176
177 // Update the lock.
178 cur_thread->SetAddressKey(addr, value);
179 owner_thread->AddWaiter(cur_thread);
180
181 // Begin waiting.
182 cur_thread->BeginWait(std::addressof(wait_queue));
183 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
184 cur_thread->SetMutexWaitAddressForDebugging(addr);
151 } 185 }
152 186
187 // Close our reference to the owner thread, now that the wait is over.
188 owner_thread->Close();
189
153 // Get the wait result. 190 // Get the wait result.
154 KSynchronizationObject* dummy{}; 191 return cur_thread->GetWaitResult();
155 return cur_thread->GetWaitResult(std::addressof(dummy));
156} 192}
157 193
158KThread* KConditionVariable::SignalImpl(KThread* thread) { 194void KConditionVariable::SignalImpl(KThread* thread) {
159 // Check pre-conditions. 195 // Check pre-conditions.
160 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 196 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
161 197
@@ -169,18 +205,16 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) {
169 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. 205 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
170 // TODO(bunnei): We should call CanAccessAtomic(..) here. 206 // TODO(bunnei): We should call CanAccessAtomic(..) here.
171 can_access = true; 207 can_access = true;
172 if (can_access) { 208 if (can_access) [[likely]] {
173 UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag, 209 UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
174 Svc::HandleWaitMask); 210 Svc::HandleWaitMask);
175 } 211 }
176 } 212 }
177 213
178 KThread* thread_to_close = nullptr; 214 if (can_access) [[likely]] {
179 if (can_access) {
180 if (prev_tag == Svc::InvalidHandle) { 215 if (prev_tag == Svc::InvalidHandle) {
181 // If nobody held the lock previously, we're all good. 216 // If nobody held the lock previously, we're all good.
182 thread->SetSyncedObject(nullptr, ResultSuccess); 217 thread->EndWait(ResultSuccess);
183 thread->Wakeup();
184 } else { 218 } else {
185 // Get the previous owner. 219 // Get the previous owner.
186 KThread* owner_thread = kernel.CurrentProcess() 220 KThread* owner_thread = kernel.CurrentProcess()
@@ -189,33 +223,22 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) {
189 static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask)) 223 static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask))
190 .ReleasePointerUnsafe(); 224 .ReleasePointerUnsafe();
191 225
192 if (owner_thread) { 226 if (owner_thread) [[likely]] {
193 // Add the thread as a waiter on the owner. 227 // Add the thread as a waiter on the owner.
194 owner_thread->AddWaiter(thread); 228 owner_thread->AddWaiter(thread);
195 thread_to_close = owner_thread; 229 owner_thread->Close();
196 } else { 230 } else {
197 // The lock was tagged with a thread that doesn't exist. 231 // The lock was tagged with a thread that doesn't exist.
198 thread->SetSyncedObject(nullptr, ResultInvalidState); 232 thread->EndWait(ResultInvalidState);
199 thread->Wakeup();
200 } 233 }
201 } 234 }
202 } else { 235 } else {
203 // If the address wasn't accessible, note so. 236 // If the address wasn't accessible, note so.
204 thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory); 237 thread->EndWait(ResultInvalidCurrentMemory);
205 thread->Wakeup();
206 } 238 }
207
208 return thread_to_close;
209} 239}
210 240
211void KConditionVariable::Signal(u64 cv_key, s32 count) { 241void KConditionVariable::Signal(u64 cv_key, s32 count) {
212 // Prepare for signaling.
213 constexpr int MaxThreads = 16;
214
215 KLinkedList<KThread> thread_list{kernel};
216 std::array<KThread*, MaxThreads> thread_array;
217 s32 num_to_close{};
218
219 // Perform signaling. 242 // Perform signaling.
220 s32 num_waiters{}; 243 s32 num_waiters{};
221 { 244 {
@@ -226,14 +249,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
226 (it->GetConditionVariableKey() == cv_key)) { 249 (it->GetConditionVariableKey() == cv_key)) {
227 KThread* target_thread = std::addressof(*it); 250 KThread* target_thread = std::addressof(*it);
228 251
229 if (KThread* thread = SignalImpl(target_thread); thread != nullptr) { 252 this->SignalImpl(target_thread);
230 if (num_to_close < MaxThreads) {
231 thread_array[num_to_close++] = thread;
232 } else {
233 thread_list.push_back(*thread);
234 }
235 }
236
237 it = thread_tree.erase(it); 253 it = thread_tree.erase(it);
238 target_thread->ClearConditionVariable(); 254 target_thread->ClearConditionVariable();
239 ++num_waiters; 255 ++num_waiters;
@@ -245,27 +261,16 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
245 WriteToUser(system, cv_key, std::addressof(has_waiter_flag)); 261 WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
246 } 262 }
247 } 263 }
248
249 // Close threads in the array.
250 for (auto i = 0; i < num_to_close; ++i) {
251 thread_array[i]->Close();
252 }
253
254 // Close threads in the list.
255 for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) {
256 (*it).Close();
257 }
258} 264}
259 265
260ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { 266ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
261 // Prepare to wait. 267 // Prepare to wait.
262 KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); 268 KThread* cur_thread = GetCurrentThreadPointer(kernel);
269 ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(
270 kernel, std::addressof(thread_tree));
263 271
264 { 272 {
265 KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; 273 KScopedSchedulerLockAndSleep slp(kernel, cur_thread, timeout);
266
267 // Set the synced object.
268 cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
269 274
270 // Check that the thread isn't terminating. 275 // Check that the thread isn't terminating.
271 if (cur_thread->IsTerminationRequested()) { 276 if (cur_thread->IsTerminationRequested()) {
@@ -290,8 +295,7 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
290 } 295 }
291 296
292 // Wake up the next owner. 297 // Wake up the next owner.
293 next_owner_thread->SetSyncedObject(nullptr, ResultSuccess); 298 next_owner_thread->EndWait(ResultSuccess);
294 next_owner_thread->Wakeup();
295 } 299 }
296 300
297 // Write to the cv key. 301 // Write to the cv key.
@@ -308,40 +312,21 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
308 } 312 }
309 } 313 }
310 314
311 // Update condition variable tracking. 315 // If timeout is zero, time out.
312 { 316 R_UNLESS(timeout != 0, ResultTimedOut);
313 cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
314 thread_tree.insert(*cur_thread);
315 }
316 317
317 // If the timeout is non-zero, set the thread as waiting. 318 // Update condition variable tracking.
318 if (timeout != 0) { 319 cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
319 cur_thread->SetState(ThreadState::Waiting); 320 thread_tree.insert(*cur_thread);
320 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
321 cur_thread->SetMutexWaitAddressForDebugging(addr);
322 }
323 }
324
325 // Cancel the timer wait.
326 kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
327
328 // Remove from the condition variable.
329 {
330 KScopedSchedulerLock sl(kernel);
331
332 if (KThread* owner = cur_thread->GetLockOwner(); owner != nullptr) {
333 owner->RemoveWaiter(cur_thread);
334 }
335 321
336 if (cur_thread->IsWaitingForConditionVariable()) { 322 // Begin waiting.
337 thread_tree.erase(thread_tree.iterator_to(*cur_thread)); 323 cur_thread->BeginWait(std::addressof(wait_queue));
338 cur_thread->ClearConditionVariable(); 324 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
339 } 325 cur_thread->SetMutexWaitAddressForDebugging(addr);
340 } 326 }
341 327
342 // Get the result. 328 // Get the wait result.
343 KSynchronizationObject* dummy{}; 329 return cur_thread->GetWaitResult();
344 return cur_thread->GetWaitResult(std::addressof(dummy));
345} 330}
346 331
347} // namespace Kernel 332} // namespace Kernel
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h
index 861dbd420..5e4815d08 100644
--- a/src/core/hle/kernel/k_condition_variable.h
+++ b/src/core/hle/kernel/k_condition_variable.h
@@ -34,7 +34,7 @@ public:
34 [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout); 34 [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
35 35
36private: 36private:
37 [[nodiscard]] KThread* SignalImpl(KThread* thread); 37 void SignalImpl(KThread* thread);
38 38
39 ThreadTree thread_tree; 39 ThreadTree thread_tree;
40 40
diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp
index e90fc0628..cf95f0852 100644
--- a/src/core/hle/kernel/k_handle_table.cpp
+++ b/src/core/hle/kernel/k_handle_table.cpp
@@ -13,6 +13,7 @@ ResultCode KHandleTable::Finalize() {
13 // Get the table and clear our record of it. 13 // Get the table and clear our record of it.
14 u16 saved_table_size = 0; 14 u16 saved_table_size = 0;
15 { 15 {
16 KScopedDisableDispatch dd(kernel);
16 KScopedSpinLock lk(m_lock); 17 KScopedSpinLock lk(m_lock);
17 18
18 std::swap(m_table_size, saved_table_size); 19 std::swap(m_table_size, saved_table_size);
@@ -43,6 +44,7 @@ bool KHandleTable::Remove(Handle handle) {
43 // Find the object and free the entry. 44 // Find the object and free the entry.
44 KAutoObject* obj = nullptr; 45 KAutoObject* obj = nullptr;
45 { 46 {
47 KScopedDisableDispatch dd(kernel);
46 KScopedSpinLock lk(m_lock); 48 KScopedSpinLock lk(m_lock);
47 49
48 if (this->IsValidHandle(handle)) { 50 if (this->IsValidHandle(handle)) {
@@ -62,6 +64,7 @@ bool KHandleTable::Remove(Handle handle) {
62} 64}
63 65
64ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { 66ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
67 KScopedDisableDispatch dd(kernel);
65 KScopedSpinLock lk(m_lock); 68 KScopedSpinLock lk(m_lock);
66 69
67 // Never exceed our capacity. 70 // Never exceed our capacity.
@@ -84,6 +87,7 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
84} 87}
85 88
86ResultCode KHandleTable::Reserve(Handle* out_handle) { 89ResultCode KHandleTable::Reserve(Handle* out_handle) {
90 KScopedDisableDispatch dd(kernel);
87 KScopedSpinLock lk(m_lock); 91 KScopedSpinLock lk(m_lock);
88 92
89 // Never exceed our capacity. 93 // Never exceed our capacity.
@@ -94,6 +98,7 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) {
94} 98}
95 99
96void KHandleTable::Unreserve(Handle handle) { 100void KHandleTable::Unreserve(Handle handle) {
101 KScopedDisableDispatch dd(kernel);
97 KScopedSpinLock lk(m_lock); 102 KScopedSpinLock lk(m_lock);
98 103
99 // Unpack the handle. 104 // Unpack the handle.
@@ -112,6 +117,7 @@ void KHandleTable::Unreserve(Handle handle) {
112} 117}
113 118
114void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { 119void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) {
120 KScopedDisableDispatch dd(kernel);
115 KScopedSpinLock lk(m_lock); 121 KScopedSpinLock lk(m_lock);
116 122
117 // Unpack the handle. 123 // Unpack the handle.
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h
index 95ec905ae..4b114ec2f 100644
--- a/src/core/hle/kernel/k_handle_table.h
+++ b/src/core/hle/kernel/k_handle_table.h
@@ -68,6 +68,7 @@ public:
68 template <typename T = KAutoObject> 68 template <typename T = KAutoObject>
69 KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { 69 KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
70 // Lock and look up in table. 70 // Lock and look up in table.
71 KScopedDisableDispatch dd(kernel);
71 KScopedSpinLock lk(m_lock); 72 KScopedSpinLock lk(m_lock);
72 73
73 if constexpr (std::is_same_v<T, KAutoObject>) { 74 if constexpr (std::is_same_v<T, KAutoObject>) {
@@ -122,6 +123,7 @@ public:
122 size_t num_opened; 123 size_t num_opened;
123 { 124 {
124 // Lock the table. 125 // Lock the table.
126 KScopedDisableDispatch dd(kernel);
125 KScopedSpinLock lk(m_lock); 127 KScopedSpinLock lk(m_lock);
126 for (num_opened = 0; num_opened < num_handles; num_opened++) { 128 for (num_opened = 0; num_opened < num_handles; num_opened++) {
127 // Get the current handle. 129 // Get the current handle.
diff --git a/src/core/hle/kernel/k_light_condition_variable.cpp b/src/core/hle/kernel/k_light_condition_variable.cpp
new file mode 100644
index 000000000..a8001fffc
--- /dev/null
+++ b/src/core/hle/kernel/k_light_condition_variable.cpp
@@ -0,0 +1,80 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/hle/kernel/k_light_condition_variable.h"
6#include "core/hle/kernel/k_scheduler.h"
7#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
8#include "core/hle/kernel/k_thread_queue.h"
9#include "core/hle/kernel/svc_results.h"
10
11namespace Kernel {
12
13namespace {
14
15class ThreadQueueImplForKLightConditionVariable final : public KThreadQueue {
16public:
17 ThreadQueueImplForKLightConditionVariable(KernelCore& kernel_, KThread::WaiterList* wl,
18 bool term)
19 : KThreadQueue(kernel_), m_wait_list(wl), m_allow_terminating_thread(term) {}
20
21 void CancelWait(KThread* waiting_thread, ResultCode wait_result,
22 bool cancel_timer_task) override {
23 // Only process waits if we're allowed to.
24 if (ResultTerminationRequested == wait_result && m_allow_terminating_thread) {
25 return;
26 }
27
28 // Remove the thread from the waiting thread from the light condition variable.
29 m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
30
31 // Invoke the base cancel wait handler.
32 KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
33 }
34
35private:
36 KThread::WaiterList* m_wait_list;
37 bool m_allow_terminating_thread;
38};
39
40} // namespace
41
42void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_terminating_thread) {
43 // Create thread queue.
44 KThread* owner = GetCurrentThreadPointer(kernel);
45
46 ThreadQueueImplForKLightConditionVariable wait_queue(kernel, std::addressof(wait_list),
47 allow_terminating_thread);
48
49 // Sleep the thread.
50 {
51 KScopedSchedulerLockAndSleep lk(kernel, owner, timeout);
52
53 if (!allow_terminating_thread && owner->IsTerminationRequested()) {
54 lk.CancelSleep();
55 return;
56 }
57
58 lock->Unlock();
59
60 // Add the thread to the queue.
61 wait_list.push_back(*owner);
62
63 // Begin waiting.
64 owner->BeginWait(std::addressof(wait_queue));
65 }
66
67 // Re-acquire the lock.
68 lock->Lock();
69}
70
71void KLightConditionVariable::Broadcast() {
72 KScopedSchedulerLock lk(kernel);
73
74 // Signal all threads.
75 for (auto it = wait_list.begin(); it != wait_list.end(); it = wait_list.erase(it)) {
76 it->EndWait(ResultSuccess);
77 }
78}
79
80} // namespace Kernel
diff --git a/src/core/hle/kernel/k_light_condition_variable.h b/src/core/hle/kernel/k_light_condition_variable.h
index fb0ad783a..5d6d7f128 100644
--- a/src/core/hle/kernel/k_light_condition_variable.h
+++ b/src/core/hle/kernel/k_light_condition_variable.h
@@ -2,72 +2,24 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7
8#pragma once 5#pragma once
9 6
10#include "common/common_types.h" 7#include "common/common_types.h"
11#include "core/hle/kernel/k_scheduler.h" 8#include "core/hle/kernel/k_thread.h"
12#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
13#include "core/hle/kernel/time_manager.h"
14 9
15namespace Kernel { 10namespace Kernel {
11
16class KernelCore; 12class KernelCore;
13class KLightLock;
17 14
18class KLightConditionVariable { 15class KLightConditionVariable {
19public: 16public:
20 explicit KLightConditionVariable(KernelCore& kernel_) : kernel{kernel_} {} 17 explicit KLightConditionVariable(KernelCore& kernel_) : kernel{kernel_} {}
21 18
22 void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true) { 19 void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true);
23 WaitImpl(lock, timeout, allow_terminating_thread); 20 void Broadcast();
24 }
25
26 void Broadcast() {
27 KScopedSchedulerLock lk{kernel};
28
29 // Signal all threads.
30 for (auto& thread : wait_list) {
31 thread.SetState(ThreadState::Runnable);
32 }
33 }
34 21
35private: 22private:
36 void WaitImpl(KLightLock* lock, s64 timeout, bool allow_terminating_thread) {
37 KThread* owner = GetCurrentThreadPointer(kernel);
38
39 // Sleep the thread.
40 {
41 KScopedSchedulerLockAndSleep lk{kernel, owner, timeout};
42
43 if (!allow_terminating_thread && owner->IsTerminationRequested()) {
44 lk.CancelSleep();
45 return;
46 }
47
48 lock->Unlock();
49
50 // Set the thread as waiting.
51 GetCurrentThread(kernel).SetState(ThreadState::Waiting);
52
53 // Add the thread to the queue.
54 wait_list.push_back(GetCurrentThread(kernel));
55 }
56
57 // Remove the thread from the wait list.
58 {
59 KScopedSchedulerLock sl{kernel};
60
61 wait_list.erase(wait_list.iterator_to(GetCurrentThread(kernel)));
62 }
63
64 // Cancel the task that the sleep setup.
65 kernel.TimeManager().UnscheduleTimeEvent(owner);
66
67 // Re-acquire the lock.
68 lock->Lock();
69 }
70
71 KernelCore& kernel; 23 KernelCore& kernel;
72 KThread::WaiterList wait_list{}; 24 KThread::WaiterList wait_list{};
73}; 25};
diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp
index 0896e705f..4620342eb 100644
--- a/src/core/hle/kernel/k_light_lock.cpp
+++ b/src/core/hle/kernel/k_light_lock.cpp
@@ -5,44 +5,59 @@
5#include "core/hle/kernel/k_light_lock.h" 5#include "core/hle/kernel/k_light_lock.h"
6#include "core/hle/kernel/k_scheduler.h" 6#include "core/hle/kernel/k_scheduler.h"
7#include "core/hle/kernel/k_thread.h" 7#include "core/hle/kernel/k_thread.h"
8#include "core/hle/kernel/k_thread_queue.h"
8#include "core/hle/kernel/kernel.h" 9#include "core/hle/kernel/kernel.h"
9 10
10namespace Kernel { 11namespace Kernel {
11 12
13namespace {
14
15class ThreadQueueImplForKLightLock final : public KThreadQueue {
16public:
17 explicit ThreadQueueImplForKLightLock(KernelCore& kernel_) : KThreadQueue(kernel_) {}
18
19 void CancelWait(KThread* waiting_thread, ResultCode wait_result,
20 bool cancel_timer_task) override {
21 // Remove the thread as a waiter from its owner.
22 if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
23 owner->RemoveWaiter(waiting_thread);
24 }
25
26 // Invoke the base cancel wait handler.
27 KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
28 }
29};
30
31} // namespace
32
12void KLightLock::Lock() { 33void KLightLock::Lock() {
13 const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)); 34 const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
14 const uintptr_t cur_thread_tag = (cur_thread | 1);
15 35
16 while (true) { 36 while (true) {
17 uintptr_t old_tag = tag.load(std::memory_order_relaxed); 37 uintptr_t old_tag = tag.load(std::memory_order_relaxed);
18 38
19 while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1, 39 while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1),
20 std::memory_order_acquire)) { 40 std::memory_order_acquire)) {
21 if ((old_tag | 1) == cur_thread_tag) {
22 return;
23 }
24 } 41 }
25 42
26 if ((old_tag == 0) || ((old_tag | 1) == cur_thread_tag)) { 43 if (old_tag == 0 || this->LockSlowPath(old_tag | 1, cur_thread)) {
27 break; 44 break;
28 } 45 }
29
30 LockSlowPath(old_tag | 1, cur_thread);
31 } 46 }
32} 47}
33 48
34void KLightLock::Unlock() { 49void KLightLock::Unlock() {
35 const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)); 50 const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
51
36 uintptr_t expected = cur_thread; 52 uintptr_t expected = cur_thread;
37 do { 53 if (!tag.compare_exchange_strong(expected, 0, std::memory_order_release)) {
38 if (expected != cur_thread) { 54 this->UnlockSlowPath(cur_thread);
39 return UnlockSlowPath(cur_thread); 55 }
40 }
41 } while (!tag.compare_exchange_weak(expected, 0, std::memory_order_release));
42} 56}
43 57
44void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) { 58bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
45 KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread); 59 KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread);
60 ThreadQueueImplForKLightLock wait_queue(kernel);
46 61
47 // Pend the current thread waiting on the owner thread. 62 // Pend the current thread waiting on the owner thread.
48 { 63 {
@@ -50,7 +65,7 @@ void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
50 65
51 // Ensure we actually have locking to do. 66 // Ensure we actually have locking to do.
52 if (tag.load(std::memory_order_relaxed) != _owner) { 67 if (tag.load(std::memory_order_relaxed) != _owner) {
53 return; 68 return false;
54 } 69 }
55 70
56 // Add the current thread as a waiter on the owner. 71 // Add the current thread as a waiter on the owner.
@@ -58,22 +73,15 @@ void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
58 cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag))); 73 cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
59 owner_thread->AddWaiter(cur_thread); 74 owner_thread->AddWaiter(cur_thread);
60 75
61 // Set thread states. 76 // Begin waiting to hold the lock.
62 cur_thread->SetState(ThreadState::Waiting); 77 cur_thread->BeginWait(std::addressof(wait_queue));
63 78
64 if (owner_thread->IsSuspended()) { 79 if (owner_thread->IsSuspended()) {
65 owner_thread->ContinueIfHasKernelWaiters(); 80 owner_thread->ContinueIfHasKernelWaiters();
66 } 81 }
67 } 82 }
68 83
69 // We're no longer waiting on the lock owner. 84 return true;
70 {
71 KScopedSchedulerLock sl{kernel};
72
73 if (KThread* owner_thread = cur_thread->GetLockOwner(); owner_thread != nullptr) {
74 owner_thread->RemoveWaiter(cur_thread);
75 }
76 }
77} 85}
78 86
79void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) { 87void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
@@ -81,22 +89,20 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
81 89
82 // Unlock. 90 // Unlock.
83 { 91 {
84 KScopedSchedulerLock sl{kernel}; 92 KScopedSchedulerLock sl(kernel);
85 93
86 // Get the next owner. 94 // Get the next owner.
87 s32 num_waiters = 0; 95 s32 num_waiters;
88 KThread* next_owner = owner_thread->RemoveWaiterByKey( 96 KThread* next_owner = owner_thread->RemoveWaiterByKey(
89 std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag))); 97 std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag)));
90 98
91 // Pass the lock to the next owner. 99 // Pass the lock to the next owner.
92 uintptr_t next_tag = 0; 100 uintptr_t next_tag = 0;
93 if (next_owner != nullptr) { 101 if (next_owner != nullptr) {
94 next_tag = reinterpret_cast<uintptr_t>(next_owner); 102 next_tag =
95 if (num_waiters > 1) { 103 reinterpret_cast<uintptr_t>(next_owner) | static_cast<uintptr_t>(num_waiters > 1);
96 next_tag |= 0x1;
97 }
98 104
99 next_owner->SetState(ThreadState::Runnable); 105 next_owner->EndWait(ResultSuccess);
100 106
101 if (next_owner->IsSuspended()) { 107 if (next_owner->IsSuspended()) {
102 next_owner->ContinueIfHasKernelWaiters(); 108 next_owner->ContinueIfHasKernelWaiters();
@@ -110,7 +116,7 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
110 } 116 }
111 117
112 // Write the new tag value. 118 // Write the new tag value.
113 tag.store(next_tag); 119 tag.store(next_tag, std::memory_order_release);
114 } 120 }
115} 121}
116 122
diff --git a/src/core/hle/kernel/k_light_lock.h b/src/core/hle/kernel/k_light_lock.h
index ad853661d..4163b8a85 100644
--- a/src/core/hle/kernel/k_light_lock.h
+++ b/src/core/hle/kernel/k_light_lock.h
@@ -20,7 +20,7 @@ public:
20 20
21 void Unlock(); 21 void Unlock();
22 22
23 void LockSlowPath(uintptr_t owner, uintptr_t cur_thread); 23 bool LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
24 24
25 void UnlockSlowPath(uintptr_t cur_thread); 25 void UnlockSlowPath(uintptr_t cur_thread);
26 26
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h
index a7fdb5fb8..fd491146f 100644
--- a/src/core/hle/kernel/k_memory_block.h
+++ b/src/core/hle/kernel/k_memory_block.h
@@ -131,6 +131,26 @@ enum class KMemoryPermission : u8 {
131 131
132 UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write | 132 UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write |
133 Svc::MemoryPermission::Execute), 133 Svc::MemoryPermission::Execute),
134
135 KernelShift = 3,
136
137 KernelRead = Read << KernelShift,
138 KernelWrite = Write << KernelShift,
139 KernelExecute = Execute << KernelShift,
140
141 NotMapped = (1 << (2 * KernelShift)),
142
143 KernelReadWrite = KernelRead | KernelWrite,
144 KernelReadExecute = KernelRead | KernelExecute,
145
146 UserRead = Read | KernelRead,
147 UserWrite = Write | KernelWrite,
148 UserExecute = Execute,
149
150 UserReadWrite = UserRead | UserWrite,
151 UserReadExecute = UserRead | UserExecute,
152
153 IpcLockChangeMask = NotMapped | UserReadWrite
134}; 154};
135DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission); 155DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission);
136 156
diff --git a/src/core/hle/kernel/k_page_linked_list.h b/src/core/hle/kernel/k_page_linked_list.h
index 3362fb236..0e2ae582a 100644
--- a/src/core/hle/kernel/k_page_linked_list.h
+++ b/src/core/hle/kernel/k_page_linked_list.h
@@ -27,6 +27,10 @@ public:
27 return num_pages; 27 return num_pages;
28 } 28 }
29 29
30 constexpr std::size_t GetSize() const {
31 return GetNumPages() * PageSize;
32 }
33
30 private: 34 private:
31 u64 addr{}; 35 u64 addr{};
32 std::size_t num_pages{}; 36 std::size_t num_pages{};
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 526b87241..99982e5a3 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -368,6 +368,33 @@ ResultCode KPageTable::UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, st
368 return ResultSuccess; 368 return ResultSuccess;
369} 369}
370 370
371ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
372 KPageTable& src_page_table, VAddr src_addr) {
373 std::lock_guard lock{page_table_lock};
374
375 const std::size_t num_pages{size / PageSize};
376
377 // Check that the memory is mapped in the destination process.
378 size_t num_allocator_blocks;
379 R_TRY(CheckMemoryState(&num_allocator_blocks, dst_addr, size, KMemoryState::All,
380 KMemoryState::SharedCode, KMemoryPermission::UserReadWrite,
381 KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
382 KMemoryAttribute::None));
383
384 // Check that the memory is mapped in the source process.
385 R_TRY(src_page_table.CheckMemoryState(src_addr, size, KMemoryState::FlagCanMapProcess,
386 KMemoryState::FlagCanMapProcess, KMemoryPermission::None,
387 KMemoryPermission::None, KMemoryAttribute::All,
388 KMemoryAttribute::None));
389
390 CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap));
391
392 // Apply the memory block update.
393 block_manager->Update(dst_addr, num_pages, KMemoryState::Free, KMemoryPermission::None,
394 KMemoryAttribute::None);
395
396 return ResultSuccess;
397}
371void KPageTable::MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end) { 398void KPageTable::MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end) {
372 auto node{page_linked_list.Nodes().begin()}; 399 auto node{page_linked_list.Nodes().begin()};
373 PAddr map_addr{node->GetAddress()}; 400 PAddr map_addr{node->GetAddress()};
@@ -685,8 +712,8 @@ ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
685 return ResultSuccess; 712 return ResultSuccess;
686} 713}
687 714
688ResultCode KPageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size, 715ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
689 KMemoryPermission perm) { 716 KMemoryPermission perm) {
690 717
691 std::lock_guard lock{page_table_lock}; 718 std::lock_guard lock{page_table_lock};
692 719
@@ -942,6 +969,60 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
942 return ResultSuccess; 969 return ResultSuccess;
943} 970}
944 971
972ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
973 std::lock_guard lock{page_table_lock};
974
975 KMemoryPermission new_perm = KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite;
976
977 KMemoryPermission old_perm{};
978
979 if (const ResultCode result{CheckMemoryState(
980 nullptr, &old_perm, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
981 KMemoryState::FlagCanCodeMemory, KMemoryPermission::Mask,
982 KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)};
983 result.IsError()) {
984 return result;
985 }
986
987 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
988
989 block_manager->UpdateLock(
990 addr, size / PageSize,
991 [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
992 block->ShareToDevice(permission);
993 },
994 new_perm);
995
996 return ResultSuccess;
997}
998
999ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) {
1000 std::lock_guard lock{page_table_lock};
1001
1002 KMemoryPermission new_perm = KMemoryPermission::UserReadWrite;
1003
1004 KMemoryPermission old_perm{};
1005
1006 if (const ResultCode result{CheckMemoryState(
1007 nullptr, &old_perm, nullptr, addr, size, KMemoryState::FlagCanCodeMemory,
1008 KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, KMemoryPermission::None,
1009 KMemoryAttribute::All, KMemoryAttribute::Locked)};
1010 result.IsError()) {
1011 return result;
1012 }
1013
1014 new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm;
1015
1016 block_manager->UpdateLock(
1017 addr, size / PageSize,
1018 [](KMemoryBlockManager::iterator block, KMemoryPermission permission) {
1019 block->UnshareToDevice(permission);
1020 },
1021 new_perm);
1022
1023 return ResultSuccess;
1024}
1025
945ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) { 1026ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
946 block_manager = std::make_unique<KMemoryBlockManager>(start, end); 1027 block_manager = std::make_unique<KMemoryBlockManager>(start, end);
947 1028
@@ -1231,4 +1312,42 @@ ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermissi
1231 return ResultSuccess; 1312 return ResultSuccess;
1232} 1313}
1233 1314
1315ResultCode KPageTable::CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size,
1316 KMemoryState state_mask, KMemoryState state,
1317 KMemoryPermission perm_mask, KMemoryPermission perm,
1318 KMemoryAttribute attr_mask, KMemoryAttribute attr) const {
1319 // Get information about the first block.
1320 const VAddr last_addr = addr + size - 1;
1321 KMemoryBlockManager::const_iterator it{block_manager->FindIterator(addr)};
1322 KMemoryInfo info = it->GetMemoryInfo();
1323
1324 // If the start address isn't aligned, we need a block.
1325 const size_t blocks_for_start_align =
1326 (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0;
1327
1328 while (true) {
1329 // Validate against the provided masks.
1330 R_TRY(CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr));
1331
1332 // Break once we're done.
1333 if (last_addr <= info.GetLastAddress()) {
1334 break;
1335 }
1336
1337 // Advance our iterator.
1338 it++;
1339 info = it->GetMemoryInfo();
1340 }
1341
1342 // If the end address isn't aligned, we need a block.
1343 const size_t blocks_for_end_align =
1344 (Common::AlignUp(addr + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
1345
1346 if (out_blocks_needed != nullptr) {
1347 *out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
1348 }
1349
1350 return ResultSuccess;
1351}
1352
1234} // namespace Kernel 1353} // namespace Kernel
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 770c4841c..d784aa67e 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -33,6 +33,8 @@ public:
33 KMemoryPermission perm); 33 KMemoryPermission perm);
34 ResultCode MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); 34 ResultCode MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
35 ResultCode UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); 35 ResultCode UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
36 ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
37 VAddr src_addr);
36 ResultCode MapPhysicalMemory(VAddr addr, std::size_t size); 38 ResultCode MapPhysicalMemory(VAddr addr, std::size_t size);
37 ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size); 39 ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size);
38 ResultCode UnmapMemory(VAddr addr, std::size_t size); 40 ResultCode UnmapMemory(VAddr addr, std::size_t size);
@@ -41,7 +43,7 @@ public:
41 ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state, 43 ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state,
42 KMemoryPermission perm); 44 KMemoryPermission perm);
43 ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state); 45 ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state);
44 ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, KMemoryPermission perm); 46 ResultCode SetProcessMemoryPermission(VAddr addr, std::size_t size, KMemoryPermission perm);
45 KMemoryInfo QueryInfo(VAddr addr); 47 KMemoryInfo QueryInfo(VAddr addr);
46 ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm); 48 ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
47 ResultCode ResetTransferMemory(VAddr addr, std::size_t size); 49 ResultCode ResetTransferMemory(VAddr addr, std::size_t size);
@@ -55,6 +57,8 @@ public:
55 KMemoryPermission perm, PAddr map_addr = 0); 57 KMemoryPermission perm, PAddr map_addr = 0);
56 ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size); 58 ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
57 ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size); 59 ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
60 ResultCode LockForCodeMemory(VAddr addr, std::size_t size);
61 ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size);
58 62
59 Common::PageTable& PageTableImpl() { 63 Common::PageTable& PageTableImpl() {
60 return page_table_impl; 64 return page_table_impl;
@@ -115,6 +119,10 @@ private:
115 return CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask, 119 return CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask,
116 perm, attr_mask, attr, ignore_attr); 120 perm, attr_mask, attr, ignore_attr);
117 } 121 }
122 ResultCode CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size,
123 KMemoryState state_mask, KMemoryState state,
124 KMemoryPermission perm_mask, KMemoryPermission perm,
125 KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
118 126
119 std::recursive_mutex page_table_lock; 127 std::recursive_mutex page_table_lock;
120 std::unique_ptr<KMemoryBlockManager> block_manager; 128 std::unique_ptr<KMemoryBlockManager> block_manager;
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 76fd8c285..90dda40dc 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -60,6 +60,7 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
60 thread->GetContext64().cpu_registers[0] = 0; 60 thread->GetContext64().cpu_registers[0] = 0;
61 thread->GetContext32().cpu_registers[1] = thread_handle; 61 thread->GetContext32().cpu_registers[1] = thread_handle;
62 thread->GetContext64().cpu_registers[1] = thread_handle; 62 thread->GetContext64().cpu_registers[1] = thread_handle;
63 thread->DisableDispatch();
63 64
64 auto& kernel = system.Kernel(); 65 auto& kernel = system.Kernel();
65 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires 66 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
@@ -227,12 +228,15 @@ void KProcess::PinCurrentThread() {
227 const s32 core_id = GetCurrentCoreId(kernel); 228 const s32 core_id = GetCurrentCoreId(kernel);
228 KThread* cur_thread = GetCurrentThreadPointer(kernel); 229 KThread* cur_thread = GetCurrentThreadPointer(kernel);
229 230
230 // Pin it. 231 // If the thread isn't terminated, pin it.
231 PinThread(core_id, cur_thread); 232 if (!cur_thread->IsTerminationRequested()) {
232 cur_thread->Pin(); 233 // Pin it.
234 PinThread(core_id, cur_thread);
235 cur_thread->Pin();
233 236
234 // An update is needed. 237 // An update is needed.
235 KScheduler::SetSchedulerUpdateNeeded(kernel); 238 KScheduler::SetSchedulerUpdateNeeded(kernel);
239 }
236} 240}
237 241
238void KProcess::UnpinCurrentThread() { 242void KProcess::UnpinCurrentThread() {
@@ -250,6 +254,20 @@ void KProcess::UnpinCurrentThread() {
250 KScheduler::SetSchedulerUpdateNeeded(kernel); 254 KScheduler::SetSchedulerUpdateNeeded(kernel);
251} 255}
252 256
257void KProcess::UnpinThread(KThread* thread) {
258 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
259
260 // Get the thread's core id.
261 const auto core_id = thread->GetActiveCore();
262
263 // Unpin it.
264 UnpinThread(core_id, thread);
265 thread->Unpin();
266
267 // An update is needed.
268 KScheduler::SetSchedulerUpdateNeeded(kernel);
269}
270
253ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, 271ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
254 [[maybe_unused]] size_t size) { 272 [[maybe_unused]] size_t size) {
255 // Lock ourselves, to prevent concurrent access. 273 // Lock ourselves, to prevent concurrent access.
@@ -528,7 +546,7 @@ void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
528 std::lock_guard lock{HLE::g_hle_lock}; 546 std::lock_guard lock{HLE::g_hle_lock};
529 const auto ReprotectSegment = [&](const CodeSet::Segment& segment, 547 const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
530 KMemoryPermission permission) { 548 KMemoryPermission permission) {
531 page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission); 549 page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
532 }; 550 };
533 551
534 kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(), 552 kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index 8a8c1fcbb..cb93c7e24 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -347,6 +347,7 @@ public:
347 347
348 void PinCurrentThread(); 348 void PinCurrentThread();
349 void UnpinCurrentThread(); 349 void UnpinCurrentThread();
350 void UnpinThread(KThread* thread);
350 351
351 KLightLock& GetStateLock() { 352 KLightLock& GetStateLock() {
352 return state_lock; 353 return state_lock;
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 6a7d80d03..277201de4 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -240,8 +240,8 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s3
240 240
241 // If the thread is runnable, we want to change its priority in the queue. 241 // If the thread is runnable, we want to change its priority in the queue.
242 if (thread->GetRawState() == ThreadState::Runnable) { 242 if (thread->GetRawState() == ThreadState::Runnable) {
243 GetPriorityQueue(kernel).ChangePriority( 243 GetPriorityQueue(kernel).ChangePriority(old_priority,
244 old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread); 244 thread == kernel.GetCurrentEmuThread(), thread);
245 IncrementScheduledCount(thread); 245 IncrementScheduledCount(thread);
246 SetSchedulerUpdateNeeded(kernel); 246 SetSchedulerUpdateNeeded(kernel);
247 } 247 }
@@ -360,7 +360,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
360} 360}
361 361
362bool KScheduler::CanSchedule(KernelCore& kernel) { 362bool KScheduler::CanSchedule(KernelCore& kernel) {
363 return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1; 363 return kernel.GetCurrentEmuThread()->GetDisableDispatchCount() <= 1;
364} 364}
365 365
366bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) { 366bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
@@ -376,20 +376,30 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
376} 376}
377 377
378void KScheduler::DisableScheduling(KernelCore& kernel) { 378void KScheduler::DisableScheduling(KernelCore& kernel) {
379 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { 379 // If we are shutting down the kernel, none of this is relevant anymore.
380 ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0); 380 if (kernel.IsShuttingDown()) {
381 scheduler->GetCurrentThread()->DisableDispatch(); 381 return;
382 } 382 }
383
384 ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0);
385 GetCurrentThreadPointer(kernel)->DisableDispatch();
383} 386}
384 387
385void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { 388void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
386 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { 389 // If we are shutting down the kernel, none of this is relevant anymore.
387 ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1); 390 if (kernel.IsShuttingDown()) {
388 if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) { 391 return;
389 scheduler->GetCurrentThread()->EnableDispatch(); 392 }
390 } 393
394 auto* current_thread = GetCurrentThreadPointer(kernel);
395
396 ASSERT(current_thread->GetDisableDispatchCount() >= 1);
397
398 if (current_thread->GetDisableDispatchCount() > 1) {
399 current_thread->EnableDispatch();
400 } else {
401 RescheduleCores(kernel, cores_needing_scheduling);
391 } 402 }
392 RescheduleCores(kernel, cores_needing_scheduling);
393} 403}
394 404
395u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { 405u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
@@ -617,13 +627,17 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c
617 state.highest_priority_thread = nullptr; 627 state.highest_priority_thread = nullptr;
618} 628}
619 629
620KScheduler::~KScheduler() { 630void KScheduler::Finalize() {
621 if (idle_thread) { 631 if (idle_thread) {
622 idle_thread->Close(); 632 idle_thread->Close();
623 idle_thread = nullptr; 633 idle_thread = nullptr;
624 } 634 }
625} 635}
626 636
637KScheduler::~KScheduler() {
638 ASSERT(!idle_thread);
639}
640
627KThread* KScheduler::GetCurrentThread() const { 641KThread* KScheduler::GetCurrentThread() const {
628 if (auto result = current_thread.load(); result) { 642 if (auto result = current_thread.load(); result) {
629 return result; 643 return result;
@@ -642,10 +656,12 @@ void KScheduler::RescheduleCurrentCore() {
642 if (phys_core.IsInterrupted()) { 656 if (phys_core.IsInterrupted()) {
643 phys_core.ClearInterrupt(); 657 phys_core.ClearInterrupt();
644 } 658 }
659
645 guard.Lock(); 660 guard.Lock();
646 if (state.needs_scheduling.load()) { 661 if (state.needs_scheduling.load()) {
647 Schedule(); 662 Schedule();
648 } else { 663 } else {
664 GetCurrentThread()->EnableDispatch();
649 guard.Unlock(); 665 guard.Unlock();
650 } 666 }
651} 667}
@@ -655,26 +671,33 @@ void KScheduler::OnThreadStart() {
655} 671}
656 672
657void KScheduler::Unload(KThread* thread) { 673void KScheduler::Unload(KThread* thread) {
674 ASSERT(thread);
675
658 LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); 676 LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
659 677
660 if (thread) { 678 if (thread->IsCallingSvc()) {
661 if (thread->IsCallingSvc()) { 679 thread->ClearIsCallingSvc();
662 thread->ClearIsCallingSvc(); 680 }
663 } 681
664 if (!thread->IsTerminationRequested()) { 682 auto& physical_core = system.Kernel().PhysicalCore(core_id);
665 prev_thread = thread; 683 if (!physical_core.IsInitialized()) {
666 684 return;
667 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); 685 }
668 cpu_core.SaveContext(thread->GetContext32()); 686
669 cpu_core.SaveContext(thread->GetContext64()); 687 Core::ARM_Interface& cpu_core = physical_core.ArmInterface();
670 // Save the TPIDR_EL0 system register in case it was modified. 688 cpu_core.SaveContext(thread->GetContext32());
671 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); 689 cpu_core.SaveContext(thread->GetContext64());
672 cpu_core.ClearExclusiveState(); 690 // Save the TPIDR_EL0 system register in case it was modified.
673 } else { 691 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
674 prev_thread = nullptr; 692 cpu_core.ClearExclusiveState();
675 } 693
676 thread->context_guard.Unlock(); 694 if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) {
695 prev_thread = thread;
696 } else {
697 prev_thread = nullptr;
677 } 698 }
699
700 thread->context_guard.Unlock();
678} 701}
679 702
680void KScheduler::Reload(KThread* thread) { 703void KScheduler::Reload(KThread* thread) {
@@ -683,11 +706,6 @@ void KScheduler::Reload(KThread* thread) {
683 if (thread) { 706 if (thread) {
684 ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); 707 ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
685 708
686 auto* const thread_owner_process = thread->GetOwnerProcess();
687 if (thread_owner_process != nullptr) {
688 system.Kernel().MakeCurrentProcess(thread_owner_process);
689 }
690
691 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); 709 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
692 cpu_core.LoadContext(thread->GetContext32()); 710 cpu_core.LoadContext(thread->GetContext32());
693 cpu_core.LoadContext(thread->GetContext64()); 711 cpu_core.LoadContext(thread->GetContext64());
@@ -705,7 +723,7 @@ void KScheduler::SwitchContextStep2() {
705} 723}
706 724
707void KScheduler::ScheduleImpl() { 725void KScheduler::ScheduleImpl() {
708 KThread* previous_thread = current_thread.load(); 726 KThread* previous_thread = GetCurrentThread();
709 KThread* next_thread = state.highest_priority_thread; 727 KThread* next_thread = state.highest_priority_thread;
710 728
711 state.needs_scheduling = false; 729 state.needs_scheduling = false;
@@ -717,10 +735,15 @@ void KScheduler::ScheduleImpl() {
717 735
718 // If we're not actually switching thread, there's nothing to do. 736 // If we're not actually switching thread, there's nothing to do.
719 if (next_thread == current_thread.load()) { 737 if (next_thread == current_thread.load()) {
738 previous_thread->EnableDispatch();
720 guard.Unlock(); 739 guard.Unlock();
721 return; 740 return;
722 } 741 }
723 742
743 if (next_thread->GetCurrentCore() != core_id) {
744 next_thread->SetCurrentCore(core_id);
745 }
746
724 current_thread.store(next_thread); 747 current_thread.store(next_thread);
725 748
726 KProcess* const previous_process = system.Kernel().CurrentProcess(); 749 KProcess* const previous_process = system.Kernel().CurrentProcess();
@@ -731,11 +754,7 @@ void KScheduler::ScheduleImpl() {
731 Unload(previous_thread); 754 Unload(previous_thread);
732 755
733 std::shared_ptr<Common::Fiber>* old_context; 756 std::shared_ptr<Common::Fiber>* old_context;
734 if (previous_thread != nullptr) { 757 old_context = &previous_thread->GetHostContext();
735 old_context = &previous_thread->GetHostContext();
736 } else {
737 old_context = &idle_thread->GetHostContext();
738 }
739 guard.Unlock(); 758 guard.Unlock();
740 759
741 Common::Fiber::YieldTo(*old_context, *switch_fiber); 760 Common::Fiber::YieldTo(*old_context, *switch_fiber);
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 7df288438..82fcd99e7 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -33,6 +33,8 @@ public:
33 explicit KScheduler(Core::System& system_, s32 core_id_); 33 explicit KScheduler(Core::System& system_, s32 core_id_);
34 ~KScheduler(); 34 ~KScheduler();
35 35
36 void Finalize();
37
36 /// Reschedules to the next available thread (call after current thread is suspended) 38 /// Reschedules to the next available thread (call after current thread is suspended)
37 void RescheduleCurrentCore(); 39 void RescheduleCurrentCore();
38 40
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
index c571f2992..93c47f1b1 100644
--- a/src/core/hle/kernel/k_scheduler_lock.h
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -23,6 +23,11 @@ public:
23 } 23 }
24 24
25 void Lock() { 25 void Lock() {
26 // If we are shutting down the kernel, none of this is relevant anymore.
27 if (kernel.IsShuttingDown()) {
28 return;
29 }
30
26 if (IsLockedByCurrentThread()) { 31 if (IsLockedByCurrentThread()) {
27 // If we already own the lock, we can just increment the count. 32 // If we already own the lock, we can just increment the count.
28 ASSERT(lock_count > 0); 33 ASSERT(lock_count > 0);
@@ -43,6 +48,11 @@ public:
43 } 48 }
44 49
45 void Unlock() { 50 void Unlock() {
51 // If we are shutting down the kernel, none of this is relevant anymore.
52 if (kernel.IsShuttingDown()) {
53 return;
54 }
55
46 ASSERT(IsLockedByCurrentThread()); 56 ASSERT(IsLockedByCurrentThread());
47 ASSERT(lock_count > 0); 57 ASSERT(lock_count > 0);
48 58
diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
index 61dc2858f..2995c492d 100644
--- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
+++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h
@@ -8,6 +8,7 @@
8#pragma once 8#pragma once
9 9
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "core/hle/kernel/global_scheduler_context.h"
11#include "core/hle/kernel/k_thread.h" 12#include "core/hle/kernel/k_thread.h"
12#include "core/hle/kernel/kernel.h" 13#include "core/hle/kernel/kernel.h"
13#include "core/hle/kernel/time_manager.h" 14#include "core/hle/kernel/time_manager.h"
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index 2bd53ccbd..d4e4a6b06 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -175,8 +175,7 @@ ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
175 { 175 {
176 KScopedSchedulerLock lock(kernel); 176 KScopedSchedulerLock lock(kernel);
177 if (!context.IsThreadWaiting()) { 177 if (!context.IsThreadWaiting()) {
178 context.GetThread().Wakeup(); 178 context.GetThread().EndWait(result);
179 context.GetThread().SetSyncedObject(nullptr, result);
180 } 179 }
181 } 180 }
182 181
diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp
index f168b4f21..e4c5eb74f 100644
--- a/src/core/hle/kernel/k_synchronization_object.cpp
+++ b/src/core/hle/kernel/k_synchronization_object.cpp
@@ -8,11 +8,66 @@
8#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 8#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
9#include "core/hle/kernel/k_synchronization_object.h" 9#include "core/hle/kernel/k_synchronization_object.h"
10#include "core/hle/kernel/k_thread.h" 10#include "core/hle/kernel/k_thread.h"
11#include "core/hle/kernel/k_thread_queue.h"
11#include "core/hle/kernel/kernel.h" 12#include "core/hle/kernel/kernel.h"
12#include "core/hle/kernel/svc_results.h" 13#include "core/hle/kernel/svc_results.h"
13 14
14namespace Kernel { 15namespace Kernel {
15 16
17namespace {
18
19class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait {
20public:
21 ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel_, KSynchronizationObject** o,
22 KSynchronizationObject::ThreadListNode* n, s32 c)
23 : KThreadQueueWithoutEndWait(kernel_), m_objects(o), m_nodes(n), m_count(c) {}
24
25 void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
26 ResultCode wait_result) override {
27 // Determine the sync index, and unlink all nodes.
28 s32 sync_index = -1;
29 for (auto i = 0; i < m_count; ++i) {
30 // Check if this is the signaled object.
31 if (m_objects[i] == signaled_object && sync_index == -1) {
32 sync_index = i;
33 }
34
35 // Unlink the current node from the current object.
36 m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
37 }
38
39 // Set the waiting thread's sync index.
40 waiting_thread->SetSyncedIndex(sync_index);
41
42 // Set the waiting thread as not cancellable.
43 waiting_thread->ClearCancellable();
44
45 // Invoke the base end wait handler.
46 KThreadQueue::EndWait(waiting_thread, wait_result);
47 }
48
49 void CancelWait(KThread* waiting_thread, ResultCode wait_result,
50 bool cancel_timer_task) override {
51 // Remove all nodes from our list.
52 for (auto i = 0; i < m_count; ++i) {
53 m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
54 }
55
56 // Set the waiting thread as not cancellable.
57 waiting_thread->ClearCancellable();
58
59 // Invoke the base cancel wait handler.
60 KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
61 }
62
63private:
64 KSynchronizationObject** m_objects;
65 KSynchronizationObject::ThreadListNode* m_nodes;
66 s32 m_count;
67};
68
69} // namespace
70
16void KSynchronizationObject::Finalize() { 71void KSynchronizationObject::Finalize() {
17 this->OnFinalizeSynchronizationObject(); 72 this->OnFinalizeSynchronizationObject();
18 KAutoObject::Finalize(); 73 KAutoObject::Finalize();
@@ -25,11 +80,19 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
25 std::vector<ThreadListNode> thread_nodes(num_objects); 80 std::vector<ThreadListNode> thread_nodes(num_objects);
26 81
27 // Prepare for wait. 82 // Prepare for wait.
28 KThread* thread = kernel_ctx.CurrentScheduler()->GetCurrentThread(); 83 KThread* thread = GetCurrentThreadPointer(kernel_ctx);
84 ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel_ctx, objects,
85 thread_nodes.data(), num_objects);
29 86
30 { 87 {
31 // Setup the scheduling lock and sleep. 88 // Setup the scheduling lock and sleep.
32 KScopedSchedulerLockAndSleep slp{kernel_ctx, thread, timeout}; 89 KScopedSchedulerLockAndSleep slp(kernel_ctx, thread, timeout);
90
91 // Check if the thread should terminate.
92 if (thread->IsTerminationRequested()) {
93 slp.CancelSleep();
94 return ResultTerminationRequested;
95 }
33 96
34 // Check if any of the objects are already signaled. 97 // Check if any of the objects are already signaled.
35 for (auto i = 0; i < num_objects; ++i) { 98 for (auto i = 0; i < num_objects; ++i) {
@@ -48,12 +111,6 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
48 return ResultTimedOut; 111 return ResultTimedOut;
49 } 112 }
50 113
51 // Check if the thread should terminate.
52 if (thread->IsTerminationRequested()) {
53 slp.CancelSleep();
54 return ResultTerminationRequested;
55 }
56
57 // Check if waiting was canceled. 114 // Check if waiting was canceled.
58 if (thread->IsWaitCancelled()) { 115 if (thread->IsWaitCancelled()) {
59 slp.CancelSleep(); 116 slp.CancelSleep();
@@ -66,73 +123,25 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
66 thread_nodes[i].thread = thread; 123 thread_nodes[i].thread = thread;
67 thread_nodes[i].next = nullptr; 124 thread_nodes[i].next = nullptr;
68 125
69 if (objects[i]->thread_list_tail == nullptr) { 126 objects[i]->LinkNode(std::addressof(thread_nodes[i]));
70 objects[i]->thread_list_head = std::addressof(thread_nodes[i]);
71 } else {
72 objects[i]->thread_list_tail->next = std::addressof(thread_nodes[i]);
73 }
74
75 objects[i]->thread_list_tail = std::addressof(thread_nodes[i]);
76 } 127 }
77 128
78 // For debugging only 129 // Mark the thread as cancellable.
79 thread->SetWaitObjectsForDebugging({objects, static_cast<std::size_t>(num_objects)});
80
81 // Mark the thread as waiting.
82 thread->SetCancellable(); 130 thread->SetCancellable();
83 thread->SetSyncedObject(nullptr, ResultTimedOut);
84 thread->SetState(ThreadState::Waiting);
85 thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
86 }
87 131
88 // The lock/sleep is done, so we should be able to get our result. 132 // Clear the thread's synced index.
133 thread->SetSyncedIndex(-1);
89 134
90 // Thread is no longer cancellable. 135 // Wait for an object to be signaled.
91 thread->ClearCancellable(); 136 thread->BeginWait(std::addressof(wait_queue));
92 137 thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
93 // For debugging only 138 }
94 thread->SetWaitObjectsForDebugging({});
95 139
96 // Cancel the timer as needed. 140 // Set the output index.
97 kernel_ctx.TimeManager().UnscheduleTimeEvent(thread); 141 *out_index = thread->GetSyncedIndex();
98 142
99 // Get the wait result. 143 // Get the wait result.
100 ResultCode wait_result{ResultSuccess}; 144 return thread->GetWaitResult();
101 s32 sync_index = -1;
102 {
103 KScopedSchedulerLock lock(kernel_ctx);
104 KSynchronizationObject* synced_obj;
105 wait_result = thread->GetWaitResult(std::addressof(synced_obj));
106
107 for (auto i = 0; i < num_objects; ++i) {
108 // Unlink the object from the list.
109 ThreadListNode* prev_ptr =
110 reinterpret_cast<ThreadListNode*>(std::addressof(objects[i]->thread_list_head));
111 ThreadListNode* prev_val = nullptr;
112 ThreadListNode *prev, *tail_prev;
113
114 do {
115 prev = prev_ptr;
116 prev_ptr = prev_ptr->next;
117 tail_prev = prev_val;
118 prev_val = prev_ptr;
119 } while (prev_ptr != std::addressof(thread_nodes[i]));
120
121 if (objects[i]->thread_list_tail == std::addressof(thread_nodes[i])) {
122 objects[i]->thread_list_tail = tail_prev;
123 }
124
125 prev->next = thread_nodes[i].next;
126
127 if (objects[i] == synced_obj) {
128 sync_index = i;
129 }
130 }
131 }
132
133 // Set output.
134 *out_index = sync_index;
135 return wait_result;
136} 145}
137 146
138KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_) 147KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
@@ -141,7 +150,7 @@ KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
141KSynchronizationObject::~KSynchronizationObject() = default; 150KSynchronizationObject::~KSynchronizationObject() = default;
142 151
143void KSynchronizationObject::NotifyAvailable(ResultCode result) { 152void KSynchronizationObject::NotifyAvailable(ResultCode result) {
144 KScopedSchedulerLock lock(kernel); 153 KScopedSchedulerLock sl(kernel);
145 154
146 // If we're not signaled, we've nothing to notify. 155 // If we're not signaled, we've nothing to notify.
147 if (!this->IsSignaled()) { 156 if (!this->IsSignaled()) {
@@ -150,11 +159,7 @@ void KSynchronizationObject::NotifyAvailable(ResultCode result) {
150 159
151 // Iterate over each thread. 160 // Iterate over each thread.
152 for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { 161 for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
153 KThread* thread = cur_node->thread; 162 cur_node->thread->NotifyAvailable(this, result);
154 if (thread->GetState() == ThreadState::Waiting) {
155 thread->SetSyncedObject(this, result);
156 thread->SetState(ThreadState::Runnable);
157 }
158 } 163 }
159} 164}
160 165
diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h
index 898e58e16..ec235437b 100644
--- a/src/core/hle/kernel/k_synchronization_object.h
+++ b/src/core/hle/kernel/k_synchronization_object.h
@@ -35,6 +35,38 @@ public:
35 35
36 [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const; 36 [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const;
37 37
38 void LinkNode(ThreadListNode* node_) {
39 // Link the node to the list.
40 if (thread_list_tail == nullptr) {
41 thread_list_head = node_;
42 } else {
43 thread_list_tail->next = node_;
44 }
45
46 thread_list_tail = node_;
47 }
48
49 void UnlinkNode(ThreadListNode* node_) {
50 // Unlink the node from the list.
51 ThreadListNode* prev_ptr =
52 reinterpret_cast<ThreadListNode*>(std::addressof(thread_list_head));
53 ThreadListNode* prev_val = nullptr;
54 ThreadListNode *prev, *tail_prev;
55
56 do {
57 prev = prev_ptr;
58 prev_ptr = prev_ptr->next;
59 tail_prev = prev_val;
60 prev_val = prev_ptr;
61 } while (prev_ptr != node_);
62
63 if (thread_list_tail == node_) {
64 thread_list_tail = tail_prev;
65 }
66
67 prev->next = node_->next;
68 }
69
38protected: 70protected:
39 explicit KSynchronizationObject(KernelCore& kernel); 71 explicit KSynchronizationObject(KernelCore& kernel);
40 ~KSynchronizationObject() override; 72 ~KSynchronizationObject() override;
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index db65ce79a..752592e2e 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -13,6 +13,9 @@
13#include "common/common_types.h" 13#include "common/common_types.h"
14#include "common/fiber.h" 14#include "common/fiber.h"
15#include "common/logging/log.h" 15#include "common/logging/log.h"
16#include "common/scope_exit.h"
17#include "common/settings.h"
18#include "common/thread_queue_list.h"
16#include "core/core.h" 19#include "core/core.h"
17#include "core/cpu_manager.h" 20#include "core/cpu_manager.h"
18#include "core/hardware_properties.h" 21#include "core/hardware_properties.h"
@@ -56,6 +59,34 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context,
56 59
57namespace Kernel { 60namespace Kernel {
58 61
62namespace {
63
64class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait {
65public:
66 explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel_)
67 : KThreadQueueWithoutEndWait(kernel_) {}
68};
69
70class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue {
71public:
72 explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel_, KThread::WaiterList* wl)
73 : KThreadQueue(kernel_), m_wait_list(wl) {}
74
75 void CancelWait(KThread* waiting_thread, ResultCode wait_result,
76 bool cancel_timer_task) override {
77 // Remove the thread from the wait list.
78 m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
79
80 // Invoke the base cancel wait handler.
81 KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
82 }
83
84private:
85 KThread::WaiterList* m_wait_list;
86};
87
88} // namespace
89
59KThread::KThread(KernelCore& kernel_) 90KThread::KThread(KernelCore& kernel_)
60 : KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {} 91 : KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {}
61KThread::~KThread() = default; 92KThread::~KThread() = default;
@@ -82,6 +113,8 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
82 [[fallthrough]]; 113 [[fallthrough]];
83 case ThreadType::HighPriority: 114 case ThreadType::HighPriority:
84 [[fallthrough]]; 115 [[fallthrough]];
116 case ThreadType::Dummy:
117 [[fallthrough]];
85 case ThreadType::User: 118 case ThreadType::User:
86 ASSERT(((owner == nullptr) || 119 ASSERT(((owner == nullptr) ||
87 (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask())); 120 (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
@@ -127,11 +160,8 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
127 priority = prio; 160 priority = prio;
128 base_priority = prio; 161 base_priority = prio;
129 162
130 // Set sync object and waiting lock to null.
131 synced_object = nullptr;
132
133 // Initialize sleeping queue. 163 // Initialize sleeping queue.
134 sleeping_queue = nullptr; 164 wait_queue = nullptr;
135 165
136 // Set suspend flags. 166 // Set suspend flags.
137 suspend_request_flags = 0; 167 suspend_request_flags = 0;
@@ -184,7 +214,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
184 // Setup the stack parameters. 214 // Setup the stack parameters.
185 StackParameters& sp = GetStackParameters(); 215 StackParameters& sp = GetStackParameters();
186 sp.cur_thread = this; 216 sp.cur_thread = this;
187 sp.disable_count = 1; 217 sp.disable_count = 0;
188 SetInExceptionHandler(); 218 SetInExceptionHandler();
189 219
190 // Set thread ID. 220 // Set thread ID.
@@ -211,15 +241,16 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint
211 // Initialize the thread. 241 // Initialize the thread.
212 R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); 242 R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
213 243
214 // Initialize host context. 244 // Initialize emulation parameters.
215 thread->host_context = 245 thread->host_context =
216 std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter); 246 std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter);
247 thread->is_single_core = !Settings::values.use_multi_core.GetValue();
217 248
218 return ResultSuccess; 249 return ResultSuccess;
219} 250}
220 251
221ResultCode KThread::InitializeDummyThread(KThread* thread) { 252ResultCode KThread::InitializeDummyThread(KThread* thread) {
222 return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Main); 253 return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Dummy);
223} 254}
224 255
225ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { 256ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
@@ -273,11 +304,14 @@ void KThread::Finalize() {
273 304
274 auto it = waiter_list.begin(); 305 auto it = waiter_list.begin();
275 while (it != waiter_list.end()) { 306 while (it != waiter_list.end()) {
276 // The thread shouldn't be a kernel waiter. 307 // Clear the lock owner
277 it->SetLockOwner(nullptr); 308 it->SetLockOwner(nullptr);
278 it->SetSyncedObject(nullptr, ResultInvalidState); 309
279 it->Wakeup(); 310 // Erase the waiter from our list.
280 it = waiter_list.erase(it); 311 it = waiter_list.erase(it);
312
313 // Cancel the thread's wait.
314 it->CancelWait(ResultInvalidState, true);
281 } 315 }
282 } 316 }
283 317
@@ -294,15 +328,12 @@ bool KThread::IsSignaled() const {
294 return signaled; 328 return signaled;
295} 329}
296 330
297void KThread::Wakeup() { 331void KThread::OnTimer() {
298 KScopedSchedulerLock sl{kernel}; 332 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
299 333
334 // If we're waiting, cancel the wait.
300 if (GetState() == ThreadState::Waiting) { 335 if (GetState() == ThreadState::Waiting) {
301 if (sleeping_queue != nullptr) { 336 wait_queue->CancelWait(this, ResultTimedOut, false);
302 sleeping_queue->WakeupThread(this);
303 } else {
304 SetState(ThreadState::Runnable);
305 }
306 } 337 }
307} 338}
308 339
@@ -327,7 +358,7 @@ void KThread::StartTermination() {
327 358
328 // Signal. 359 // Signal.
329 signaled = true; 360 signaled = true;
330 NotifyAvailable(); 361 KSynchronizationObject::NotifyAvailable();
331 362
332 // Clear previous thread in KScheduler. 363 // Clear previous thread in KScheduler.
333 KScheduler::ClearPreviousThread(kernel, this); 364 KScheduler::ClearPreviousThread(kernel, this);
@@ -475,30 +506,32 @@ ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_m
475 return ResultSuccess; 506 return ResultSuccess;
476} 507}
477 508
478ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) { 509ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
479 ASSERT(parent != nullptr); 510 ASSERT(parent != nullptr);
480 ASSERT(v_affinity_mask != 0); 511 ASSERT(v_affinity_mask != 0);
481 KScopedLightLock lk{activity_pause_lock}; 512 KScopedLightLock lk(activity_pause_lock);
482 513
483 // Set the core mask. 514 // Set the core mask.
484 u64 p_affinity_mask = 0; 515 u64 p_affinity_mask = 0;
485 { 516 {
486 KScopedSchedulerLock sl{kernel}; 517 KScopedSchedulerLock sl(kernel);
487 ASSERT(num_core_migration_disables >= 0); 518 ASSERT(num_core_migration_disables >= 0);
488 519
489 // If the core id is no-update magic, preserve the ideal core id. 520 // If we're updating, set our ideal virtual core.
490 if (cpu_core_id == Svc::IdealCoreNoUpdate) { 521 if (core_id_ != Svc::IdealCoreNoUpdate) {
491 cpu_core_id = virtual_ideal_core_id; 522 virtual_ideal_core_id = core_id_;
492 R_UNLESS(((1ULL << cpu_core_id) & v_affinity_mask) != 0, ResultInvalidCombination); 523 } else {
524 // Preserve our ideal core id.
525 core_id_ = virtual_ideal_core_id;
526 R_UNLESS(((1ULL << core_id_) & v_affinity_mask) != 0, ResultInvalidCombination);
493 } 527 }
494 528
495 // Set the virtual core/affinity mask. 529 // Set our affinity mask.
496 virtual_ideal_core_id = cpu_core_id;
497 virtual_affinity_mask = v_affinity_mask; 530 virtual_affinity_mask = v_affinity_mask;
498 531
499 // Translate the virtual core to a physical core. 532 // Translate the virtual core to a physical core.
500 if (cpu_core_id >= 0) { 533 if (core_id_ >= 0) {
501 cpu_core_id = Core::Hardware::VirtualToPhysicalCoreMap[cpu_core_id]; 534 core_id_ = Core::Hardware::VirtualToPhysicalCoreMap[core_id_];
502 } 535 }
503 536
504 // Translate the virtual affinity mask to a physical one. 537 // Translate the virtual affinity mask to a physical one.
@@ -513,7 +546,7 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
513 const KAffinityMask old_mask = physical_affinity_mask; 546 const KAffinityMask old_mask = physical_affinity_mask;
514 547
515 // Set our new ideals. 548 // Set our new ideals.
516 physical_ideal_core_id = cpu_core_id; 549 physical_ideal_core_id = core_id_;
517 physical_affinity_mask.SetAffinityMask(p_affinity_mask); 550 physical_affinity_mask.SetAffinityMask(p_affinity_mask);
518 551
519 if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { 552 if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
@@ -531,18 +564,18 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
531 } 564 }
532 } else { 565 } else {
533 // Otherwise, we edit the original affinity for restoration later. 566 // Otherwise, we edit the original affinity for restoration later.
534 original_physical_ideal_core_id = cpu_core_id; 567 original_physical_ideal_core_id = core_id_;
535 original_physical_affinity_mask.SetAffinityMask(p_affinity_mask); 568 original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
536 } 569 }
537 } 570 }
538 571
539 // Update the pinned waiter list. 572 // Update the pinned waiter list.
573 ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, std::addressof(pinned_waiter_list));
540 { 574 {
541 bool retry_update{}; 575 bool retry_update{};
542 bool thread_is_pinned{};
543 do { 576 do {
544 // Lock the scheduler. 577 // Lock the scheduler.
545 KScopedSchedulerLock sl{kernel}; 578 KScopedSchedulerLock sl(kernel);
546 579
547 // Don't do any further management if our termination has been requested. 580 // Don't do any further management if our termination has been requested.
548 R_SUCCEED_IF(IsTerminationRequested()); 581 R_SUCCEED_IF(IsTerminationRequested());
@@ -570,12 +603,9 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
570 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), 603 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
571 ResultTerminationRequested); 604 ResultTerminationRequested);
572 605
573 // Note that the thread was pinned.
574 thread_is_pinned = true;
575
576 // Wait until the thread isn't pinned any more. 606 // Wait until the thread isn't pinned any more.
577 pinned_waiter_list.push_back(GetCurrentThread(kernel)); 607 pinned_waiter_list.push_back(GetCurrentThread(kernel));
578 GetCurrentThread(kernel).SetState(ThreadState::Waiting); 608 GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_));
579 } else { 609 } else {
580 // If the thread isn't pinned, release the scheduler lock and retry until it's 610 // If the thread isn't pinned, release the scheduler lock and retry until it's
581 // not current. 611 // not current.
@@ -583,16 +613,6 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
583 } 613 }
584 } 614 }
585 } while (retry_update); 615 } while (retry_update);
586
587 // If the thread was pinned, it no longer is, and we should remove the current thread from
588 // our waiter list.
589 if (thread_is_pinned) {
590 // Lock the scheduler.
591 KScopedSchedulerLock sl{kernel};
592
593 // Remove from the list.
594 pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
595 }
596 } 616 }
597 617
598 return ResultSuccess; 618 return ResultSuccess;
@@ -641,15 +661,9 @@ void KThread::WaitCancel() {
641 KScopedSchedulerLock sl{kernel}; 661 KScopedSchedulerLock sl{kernel};
642 662
643 // Check if we're waiting and cancellable. 663 // Check if we're waiting and cancellable.
644 if (GetState() == ThreadState::Waiting && cancellable) { 664 if (this->GetState() == ThreadState::Waiting && cancellable) {
645 if (sleeping_queue != nullptr) { 665 wait_cancelled = false;
646 sleeping_queue->WakeupThread(this); 666 wait_queue->CancelWait(this, ResultCancelled, true);
647 wait_cancelled = true;
648 } else {
649 SetSyncedObject(nullptr, ResultCancelled);
650 SetState(ThreadState::Runnable);
651 wait_cancelled = false;
652 }
653 } else { 667 } else {
654 // Otherwise, note that we cancelled a wait. 668 // Otherwise, note that we cancelled a wait.
655 wait_cancelled = true; 669 wait_cancelled = true;
@@ -700,60 +714,59 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
700 // Set the activity. 714 // Set the activity.
701 { 715 {
702 // Lock the scheduler. 716 // Lock the scheduler.
703 KScopedSchedulerLock sl{kernel}; 717 KScopedSchedulerLock sl(kernel);
704 718
705 // Verify our state. 719 // Verify our state.
706 const auto cur_state = GetState(); 720 const auto cur_state = this->GetState();
707 R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable), 721 R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable),
708 ResultInvalidState); 722 ResultInvalidState);
709 723
710 // Either pause or resume. 724 // Either pause or resume.
711 if (activity == Svc::ThreadActivity::Paused) { 725 if (activity == Svc::ThreadActivity::Paused) {
712 // Verify that we're not suspended. 726 // Verify that we're not suspended.
713 R_UNLESS(!IsSuspendRequested(SuspendType::Thread), ResultInvalidState); 727 R_UNLESS(!this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
714 728
715 // Suspend. 729 // Suspend.
716 RequestSuspend(SuspendType::Thread); 730 this->RequestSuspend(SuspendType::Thread);
717 } else { 731 } else {
718 ASSERT(activity == Svc::ThreadActivity::Runnable); 732 ASSERT(activity == Svc::ThreadActivity::Runnable);
719 733
720 // Verify that we're suspended. 734 // Verify that we're suspended.
721 R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState); 735 R_UNLESS(this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
722 736
723 // Resume. 737 // Resume.
724 Resume(SuspendType::Thread); 738 this->Resume(SuspendType::Thread);
725 } 739 }
726 } 740 }
727 741
728 // If the thread is now paused, update the pinned waiter list. 742 // If the thread is now paused, update the pinned waiter list.
729 if (activity == Svc::ThreadActivity::Paused) { 743 if (activity == Svc::ThreadActivity::Paused) {
730 bool thread_is_pinned{}; 744 ThreadQueueImplForKThreadSetProperty wait_queue_(kernel,
731 bool thread_is_current{}; 745 std::addressof(pinned_waiter_list));
746
747 bool thread_is_current;
732 do { 748 do {
733 // Lock the scheduler. 749 // Lock the scheduler.
734 KScopedSchedulerLock sl{kernel}; 750 KScopedSchedulerLock sl(kernel);
735 751
736 // Don't do any further management if our termination has been requested. 752 // Don't do any further management if our termination has been requested.
737 R_SUCCEED_IF(IsTerminationRequested()); 753 R_SUCCEED_IF(this->IsTerminationRequested());
754
755 // By default, treat the thread as not current.
756 thread_is_current = false;
738 757
739 // Check whether the thread is pinned. 758 // Check whether the thread is pinned.
740 if (GetStackParameters().is_pinned) { 759 if (this->GetStackParameters().is_pinned) {
741 // Verify that the current thread isn't terminating. 760 // Verify that the current thread isn't terminating.
742 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), 761 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
743 ResultTerminationRequested); 762 ResultTerminationRequested);
744 763
745 // Note that the thread was pinned and not current.
746 thread_is_pinned = true;
747 thread_is_current = false;
748
749 // Wait until the thread isn't pinned any more. 764 // Wait until the thread isn't pinned any more.
750 pinned_waiter_list.push_back(GetCurrentThread(kernel)); 765 pinned_waiter_list.push_back(GetCurrentThread(kernel));
751 GetCurrentThread(kernel).SetState(ThreadState::Waiting); 766 GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_));
752 } else { 767 } else {
753 // Check if the thread is currently running. 768 // Check if the thread is currently running.
754 // If it is, we'll need to retry. 769 // If it is, we'll need to retry.
755 thread_is_current = false;
756
757 for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) { 770 for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
758 if (kernel.Scheduler(i).GetCurrentThread() == this) { 771 if (kernel.Scheduler(i).GetCurrentThread() == this) {
759 thread_is_current = true; 772 thread_is_current = true;
@@ -762,16 +775,6 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
762 } 775 }
763 } 776 }
764 } while (thread_is_current); 777 } while (thread_is_current);
765
766 // If the thread was pinned, it no longer is, and we should remove the current thread from
767 // our waiter list.
768 if (thread_is_pinned) {
769 // Lock the scheduler.
770 KScopedSchedulerLock sl{kernel};
771
772 // Remove from the list.
773 pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
774 }
775 } 778 }
776 779
777 return ResultSuccess; 780 return ResultSuccess;
@@ -966,6 +969,9 @@ ResultCode KThread::Run() {
966 969
967 // Set our state and finish. 970 // Set our state and finish.
968 SetState(ThreadState::Runnable); 971 SetState(ThreadState::Runnable);
972
973 DisableDispatch();
974
969 return ResultSuccess; 975 return ResultSuccess;
970 } 976 }
971} 977}
@@ -996,27 +1002,61 @@ ResultCode KThread::Sleep(s64 timeout) {
996 ASSERT(this == GetCurrentThreadPointer(kernel)); 1002 ASSERT(this == GetCurrentThreadPointer(kernel));
997 ASSERT(timeout > 0); 1003 ASSERT(timeout > 0);
998 1004
1005 ThreadQueueImplForKThreadSleep wait_queue_(kernel);
999 { 1006 {
1000 // Setup the scheduling lock and sleep. 1007 // Setup the scheduling lock and sleep.
1001 KScopedSchedulerLockAndSleep slp{kernel, this, timeout}; 1008 KScopedSchedulerLockAndSleep slp(kernel, this, timeout);
1002 1009
1003 // Check if the thread should terminate. 1010 // Check if the thread should terminate.
1004 if (IsTerminationRequested()) { 1011 if (this->IsTerminationRequested()) {
1005 slp.CancelSleep(); 1012 slp.CancelSleep();
1006 return ResultTerminationRequested; 1013 return ResultTerminationRequested;
1007 } 1014 }
1008 1015
1009 // Mark the thread as waiting. 1016 // Wait for the sleep to end.
1010 SetState(ThreadState::Waiting); 1017 this->BeginWait(std::addressof(wait_queue_));
1011 SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); 1018 SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
1012 } 1019 }
1013 1020
1014 // The lock/sleep is done. 1021 return ResultSuccess;
1022}
1015 1023
1016 // Cancel the timer. 1024void KThread::BeginWait(KThreadQueue* queue) {
1017 kernel.TimeManager().UnscheduleTimeEvent(this); 1025 // Set our state as waiting.
1026 SetState(ThreadState::Waiting);
1018 1027
1019 return ResultSuccess; 1028 // Set our wait queue.
1029 wait_queue = queue;
1030}
1031
1032void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) {
1033 // Lock the scheduler.
1034 KScopedSchedulerLock sl(kernel);
1035
1036 // If we're waiting, notify our queue that we're available.
1037 if (GetState() == ThreadState::Waiting) {
1038 wait_queue->NotifyAvailable(this, signaled_object, wait_result_);
1039 }
1040}
1041
1042void KThread::EndWait(ResultCode wait_result_) {
1043 // Lock the scheduler.
1044 KScopedSchedulerLock sl(kernel);
1045
1046 // If we're waiting, notify our queue that we're available.
1047 if (GetState() == ThreadState::Waiting) {
1048 wait_queue->EndWait(this, wait_result_);
1049 }
1050}
1051
1052void KThread::CancelWait(ResultCode wait_result_, bool cancel_timer_task) {
1053 // Lock the scheduler.
1054 KScopedSchedulerLock sl(kernel);
1055
1056 // If we're waiting, notify our queue that we're available.
1057 if (GetState() == ThreadState::Waiting) {
1058 wait_queue->CancelWait(this, wait_result_, cancel_timer_task);
1059 }
1020} 1060}
1021 1061
1022void KThread::SetState(ThreadState state) { 1062void KThread::SetState(ThreadState state) {
@@ -1050,4 +1090,26 @@ s32 GetCurrentCoreId(KernelCore& kernel) {
1050 return GetCurrentThread(kernel).GetCurrentCore(); 1090 return GetCurrentThread(kernel).GetCurrentCore();
1051} 1091}
1052 1092
1093KScopedDisableDispatch::~KScopedDisableDispatch() {
1094 // If we are shutting down the kernel, none of this is relevant anymore.
1095 if (kernel.IsShuttingDown()) {
1096 return;
1097 }
1098
1099 // Skip the reschedule if single-core, as dispatch tracking is disabled here.
1100 if (!Settings::values.use_multi_core.GetValue()) {
1101 return;
1102 }
1103
1104 if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) {
1105 auto scheduler = kernel.CurrentScheduler();
1106
1107 if (scheduler) {
1108 scheduler->RescheduleCurrentCore();
1109 }
1110 } else {
1111 GetCurrentThread(kernel).EnableDispatch();
1112 }
1113}
1114
1053} // namespace Kernel 1115} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index c77f44ad4..c8a08bd71 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -48,6 +48,7 @@ enum class ThreadType : u32 {
48 Kernel = 1, 48 Kernel = 1,
49 HighPriority = 2, 49 HighPriority = 2,
50 User = 3, 50 User = 3,
51 Dummy = 100, // Special thread type for emulation purposes only
51}; 52};
52DECLARE_ENUM_FLAG_OPERATORS(ThreadType); 53DECLARE_ENUM_FLAG_OPERATORS(ThreadType);
53 54
@@ -161,8 +162,6 @@ public:
161 } 162 }
162 } 163 }
163 164
164 void Wakeup();
165
166 void SetBasePriority(s32 value); 165 void SetBasePriority(s32 value);
167 166
168 [[nodiscard]] ResultCode Run(); 167 [[nodiscard]] ResultCode Run();
@@ -197,13 +196,19 @@ public:
197 196
198 void Suspend(); 197 void Suspend();
199 198
200 void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) { 199 constexpr void SetSyncedIndex(s32 index) {
201 synced_object = obj; 200 synced_index = index;
201 }
202
203 [[nodiscard]] constexpr s32 GetSyncedIndex() const {
204 return synced_index;
205 }
206
207 constexpr void SetWaitResult(ResultCode wait_res) {
202 wait_result = wait_res; 208 wait_result = wait_res;
203 } 209 }
204 210
205 [[nodiscard]] ResultCode GetWaitResult(KSynchronizationObject** out) const { 211 [[nodiscard]] constexpr ResultCode GetWaitResult() const {
206 *out = synced_object;
207 return wait_result; 212 return wait_result;
208 } 213 }
209 214
@@ -374,6 +379,8 @@ public:
374 379
375 [[nodiscard]] bool IsSignaled() const override; 380 [[nodiscard]] bool IsSignaled() const override;
376 381
382 void OnTimer();
383
377 static void PostDestroy(uintptr_t arg); 384 static void PostDestroy(uintptr_t arg);
378 385
379 [[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread); 386 [[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread);
@@ -446,20 +453,39 @@ public:
446 return per_core_priority_queue_entry[core]; 453 return per_core_priority_queue_entry[core];
447 } 454 }
448 455
449 void SetSleepingQueue(KThreadQueue* q) { 456 [[nodiscard]] bool IsKernelThread() const {
450 sleeping_queue = q; 457 return GetActiveCore() == 3;
458 }
459
460 [[nodiscard]] bool IsDispatchTrackingDisabled() const {
461 return is_single_core || IsKernelThread();
451 } 462 }
452 463
453 [[nodiscard]] s32 GetDisableDispatchCount() const { 464 [[nodiscard]] s32 GetDisableDispatchCount() const {
465 if (IsDispatchTrackingDisabled()) {
466 // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
467 return 1;
468 }
469
454 return this->GetStackParameters().disable_count; 470 return this->GetStackParameters().disable_count;
455 } 471 }
456 472
457 void DisableDispatch() { 473 void DisableDispatch() {
474 if (IsDispatchTrackingDisabled()) {
475 // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
476 return;
477 }
478
458 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); 479 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
459 this->GetStackParameters().disable_count++; 480 this->GetStackParameters().disable_count++;
460 } 481 }
461 482
462 void EnableDispatch() { 483 void EnableDispatch() {
484 if (IsDispatchTrackingDisabled()) {
485 // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
486 return;
487 }
488
463 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); 489 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
464 this->GetStackParameters().disable_count--; 490 this->GetStackParameters().disable_count--;
465 } 491 }
@@ -573,6 +599,15 @@ public:
573 address_key_value = val; 599 address_key_value = val;
574 } 600 }
575 601
602 void ClearWaitQueue() {
603 wait_queue = nullptr;
604 }
605
606 void BeginWait(KThreadQueue* queue);
607 void NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_);
608 void EndWait(ResultCode wait_result_);
609 void CancelWait(ResultCode wait_result_, bool cancel_timer_task);
610
576 [[nodiscard]] bool HasWaiters() const { 611 [[nodiscard]] bool HasWaiters() const {
577 return !waiter_list.empty(); 612 return !waiter_list.empty();
578 } 613 }
@@ -667,7 +702,6 @@ private:
667 KAffinityMask physical_affinity_mask{}; 702 KAffinityMask physical_affinity_mask{};
668 u64 thread_id{}; 703 u64 thread_id{};
669 std::atomic<s64> cpu_time{}; 704 std::atomic<s64> cpu_time{};
670 KSynchronizationObject* synced_object{};
671 VAddr address_key{}; 705 VAddr address_key{};
672 KProcess* parent{}; 706 KProcess* parent{};
673 VAddr kernel_stack_top{}; 707 VAddr kernel_stack_top{};
@@ -677,13 +711,14 @@ private:
677 s64 schedule_count{}; 711 s64 schedule_count{};
678 s64 last_scheduled_tick{}; 712 s64 last_scheduled_tick{};
679 std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{}; 713 std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
680 KThreadQueue* sleeping_queue{}; 714 KThreadQueue* wait_queue{};
681 WaiterList waiter_list{}; 715 WaiterList waiter_list{};
682 WaiterList pinned_waiter_list{}; 716 WaiterList pinned_waiter_list{};
683 KThread* lock_owner{}; 717 KThread* lock_owner{};
684 u32 address_key_value{}; 718 u32 address_key_value{};
685 u32 suspend_request_flags{}; 719 u32 suspend_request_flags{};
686 u32 suspend_allowed_flags{}; 720 u32 suspend_allowed_flags{};
721 s32 synced_index{};
687 ResultCode wait_result{ResultSuccess}; 722 ResultCode wait_result{ResultSuccess};
688 s32 base_priority{}; 723 s32 base_priority{};
689 s32 physical_ideal_core_id{}; 724 s32 physical_ideal_core_id{};
@@ -708,6 +743,7 @@ private:
708 743
709 // For emulation 744 // For emulation
710 std::shared_ptr<Common::Fiber> host_context{}; 745 std::shared_ptr<Common::Fiber> host_context{};
746 bool is_single_core{};
711 747
712 // For debugging 748 // For debugging
713 std::vector<KSynchronizationObject*> wait_objects_for_debugging; 749 std::vector<KSynchronizationObject*> wait_objects_for_debugging;
@@ -752,4 +788,20 @@ public:
752 } 788 }
753}; 789};
754 790
791class KScopedDisableDispatch {
792public:
793 [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} {
794 // If we are shutting down the kernel, none of this is relevant anymore.
795 if (kernel.IsShuttingDown()) {
796 return;
797 }
798 GetCurrentThread(kernel).DisableDispatch();
799 }
800
801 ~KScopedDisableDispatch();
802
803private:
804 KernelCore& kernel;
805};
806
755} // namespace Kernel 807} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread_queue.cpp b/src/core/hle/kernel/k_thread_queue.cpp
new file mode 100644
index 000000000..d5248b547
--- /dev/null
+++ b/src/core/hle/kernel/k_thread_queue.cpp
@@ -0,0 +1,49 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/hle/kernel/k_thread_queue.h"
6#include "core/hle/kernel/kernel.h"
7#include "core/hle/kernel/time_manager.h"
8
9namespace Kernel {
10
11void KThreadQueue::NotifyAvailable([[maybe_unused]] KThread* waiting_thread,
12 [[maybe_unused]] KSynchronizationObject* signaled_object,
13 [[maybe_unused]] ResultCode wait_result) {}
14
15void KThreadQueue::EndWait(KThread* waiting_thread, ResultCode wait_result) {
16 // Set the thread's wait result.
17 waiting_thread->SetWaitResult(wait_result);
18
19 // Set the thread as runnable.
20 waiting_thread->SetState(ThreadState::Runnable);
21
22 // Clear the thread's wait queue.
23 waiting_thread->ClearWaitQueue();
24
25 // Cancel the thread task.
26 kernel.TimeManager().UnscheduleTimeEvent(waiting_thread);
27}
28
29void KThreadQueue::CancelWait(KThread* waiting_thread, ResultCode wait_result,
30 bool cancel_timer_task) {
31 // Set the thread's wait result.
32 waiting_thread->SetWaitResult(wait_result);
33
34 // Set the thread as runnable.
35 waiting_thread->SetState(ThreadState::Runnable);
36
37 // Clear the thread's wait queue.
38 waiting_thread->ClearWaitQueue();
39
40 // Cancel the thread task.
41 if (cancel_timer_task) {
42 kernel.TimeManager().UnscheduleTimeEvent(waiting_thread);
43 }
44}
45
46void KThreadQueueWithoutEndWait::EndWait([[maybe_unused]] KThread* waiting_thread,
47 [[maybe_unused]] ResultCode wait_result) {}
48
49} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread_queue.h b/src/core/hle/kernel/k_thread_queue.h
index 35d471dc5..ccb718e49 100644
--- a/src/core/hle/kernel/k_thread_queue.h
+++ b/src/core/hle/kernel/k_thread_queue.h
@@ -4,6 +4,7 @@
4 4
5#pragma once 5#pragma once
6 6
7#include "core/hle/kernel/k_scheduler.h"
7#include "core/hle/kernel/k_thread.h" 8#include "core/hle/kernel/k_thread.h"
8 9
9namespace Kernel { 10namespace Kernel {
@@ -11,71 +12,24 @@ namespace Kernel {
11class KThreadQueue { 12class KThreadQueue {
12public: 13public:
13 explicit KThreadQueue(KernelCore& kernel_) : kernel{kernel_} {} 14 explicit KThreadQueue(KernelCore& kernel_) : kernel{kernel_} {}
15 virtual ~KThreadQueue() = default;
14 16
15 bool IsEmpty() const { 17 virtual void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
16 return wait_list.empty(); 18 ResultCode wait_result);
17 } 19 virtual void EndWait(KThread* waiting_thread, ResultCode wait_result);
18 20 virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
19 KThread::WaiterList::iterator begin() { 21 bool cancel_timer_task);
20 return wait_list.begin();
21 }
22 KThread::WaiterList::iterator end() {
23 return wait_list.end();
24 }
25
26 bool SleepThread(KThread* t) {
27 KScopedSchedulerLock sl{kernel};
28
29 // If the thread needs terminating, don't enqueue it.
30 if (t->IsTerminationRequested()) {
31 return false;
32 }
33
34 // Set the thread's queue and mark it as waiting.
35 t->SetSleepingQueue(this);
36 t->SetState(ThreadState::Waiting);
37
38 // Add the thread to the queue.
39 wait_list.push_back(*t);
40
41 return true;
42 }
43
44 void WakeupThread(KThread* t) {
45 KScopedSchedulerLock sl{kernel};
46
47 // Remove the thread from the queue.
48 wait_list.erase(wait_list.iterator_to(*t));
49
50 // Mark the thread as no longer sleeping.
51 t->SetState(ThreadState::Runnable);
52 t->SetSleepingQueue(nullptr);
53 }
54
55 KThread* WakeupFrontThread() {
56 KScopedSchedulerLock sl{kernel};
57
58 if (wait_list.empty()) {
59 return nullptr;
60 } else {
61 // Remove the thread from the queue.
62 auto it = wait_list.begin();
63 KThread* thread = std::addressof(*it);
64 wait_list.erase(it);
65
66 ASSERT(thread->GetState() == ThreadState::Waiting);
67
68 // Mark the thread as no longer sleeping.
69 thread->SetState(ThreadState::Runnable);
70 thread->SetSleepingQueue(nullptr);
71
72 return thread;
73 }
74 }
75 22
76private: 23private:
77 KernelCore& kernel; 24 KernelCore& kernel;
78 KThread::WaiterList wait_list{}; 25 KThread::WaiterList wait_list{};
79}; 26};
80 27
28class KThreadQueueWithoutEndWait : public KThreadQueue {
29public:
30 explicit KThreadQueueWithoutEndWait(KernelCore& kernel_) : KThreadQueue(kernel_) {}
31
32 void EndWait(KThread* waiting_thread, ResultCode wait_result) override final;
33};
34
81} // namespace Kernel 35} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index e42a6d36f..2e4e4cb1c 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -14,6 +14,7 @@
14#include "common/assert.h" 14#include "common/assert.h"
15#include "common/logging/log.h" 15#include "common/logging/log.h"
16#include "common/microprofile.h" 16#include "common/microprofile.h"
17#include "common/scope_exit.h"
17#include "common/thread.h" 18#include "common/thread.h"
18#include "common/thread_worker.h" 19#include "common/thread_worker.h"
19#include "core/arm/arm_interface.h" 20#include "core/arm/arm_interface.h"
@@ -83,12 +84,16 @@ struct KernelCore::Impl {
83 } 84 }
84 85
85 void InitializeCores() { 86 void InitializeCores() {
86 for (auto& core : cores) { 87 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
87 core.Initialize(current_process->Is64BitProcess()); 88 cores[core_id].Initialize(current_process->Is64BitProcess());
89 system.Memory().SetCurrentPageTable(*current_process, core_id);
88 } 90 }
89 } 91 }
90 92
91 void Shutdown() { 93 void Shutdown() {
94 is_shutting_down.store(true, std::memory_order_relaxed);
95 SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
96
92 process_list.clear(); 97 process_list.clear();
93 98
94 // Close all open server ports. 99 // Close all open server ports.
@@ -123,15 +128,6 @@ struct KernelCore::Impl {
123 next_user_process_id = KProcess::ProcessIDMin; 128 next_user_process_id = KProcess::ProcessIDMin;
124 next_thread_id = 1; 129 next_thread_id = 1;
125 130
126 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
127 if (suspend_threads[core_id]) {
128 suspend_threads[core_id]->Close();
129 suspend_threads[core_id] = nullptr;
130 }
131
132 schedulers[core_id].reset();
133 }
134
135 cores.clear(); 131 cores.clear();
136 132
137 global_handle_table->Finalize(); 133 global_handle_table->Finalize();
@@ -159,6 +155,16 @@ struct KernelCore::Impl {
159 CleanupObject(time_shared_mem); 155 CleanupObject(time_shared_mem);
160 CleanupObject(system_resource_limit); 156 CleanupObject(system_resource_limit);
161 157
158 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
159 if (suspend_threads[core_id]) {
160 suspend_threads[core_id]->Close();
161 suspend_threads[core_id] = nullptr;
162 }
163
164 schedulers[core_id]->Finalize();
165 schedulers[core_id].reset();
166 }
167
162 // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others 168 // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
163 next_host_thread_id = Core::Hardware::NUM_CPU_CORES; 169 next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
164 170
@@ -245,13 +251,11 @@ struct KernelCore::Impl {
245 KScopedSchedulerLock lock(kernel); 251 KScopedSchedulerLock lock(kernel);
246 global_scheduler_context->PreemptThreads(); 252 global_scheduler_context->PreemptThreads();
247 } 253 }
248 const auto time_interval = std::chrono::nanoseconds{ 254 const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
249 Core::Timing::msToCycles(std::chrono::milliseconds(10))};
250 system.CoreTiming().ScheduleEvent(time_interval, preemption_event); 255 system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
251 }); 256 });
252 257
253 const auto time_interval = 258 const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
254 std::chrono::nanoseconds{Core::Timing::msToCycles(std::chrono::milliseconds(10))};
255 system.CoreTiming().ScheduleEvent(time_interval, preemption_event); 259 system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
256 } 260 }
257 261
@@ -267,14 +271,6 @@ struct KernelCore::Impl {
267 271
268 void MakeCurrentProcess(KProcess* process) { 272 void MakeCurrentProcess(KProcess* process) {
269 current_process = process; 273 current_process = process;
270 if (process == nullptr) {
271 return;
272 }
273
274 const u32 core_id = GetCurrentHostThreadID();
275 if (core_id < Core::Hardware::NUM_CPU_CORES) {
276 system.Memory().SetCurrentPageTable(*process, core_id);
277 }
278 } 274 }
279 275
280 static inline thread_local u32 host_thread_id = UINT32_MAX; 276 static inline thread_local u32 host_thread_id = UINT32_MAX;
@@ -300,15 +296,16 @@ struct KernelCore::Impl {
300 // Gets the dummy KThread for the caller, allocating a new one if this is the first time 296 // Gets the dummy KThread for the caller, allocating a new one if this is the first time
301 KThread* GetHostDummyThread() { 297 KThread* GetHostDummyThread() {
302 auto make_thread = [this]() { 298 auto make_thread = [this]() {
303 std::unique_ptr<KThread> thread = std::make_unique<KThread>(system.Kernel()); 299 std::lock_guard lk(dummy_thread_lock);
300 auto& thread = dummy_threads.emplace_back(std::make_unique<KThread>(system.Kernel()));
304 KAutoObject::Create(thread.get()); 301 KAutoObject::Create(thread.get());
305 ASSERT(KThread::InitializeDummyThread(thread.get()).IsSuccess()); 302 ASSERT(KThread::InitializeDummyThread(thread.get()).IsSuccess());
306 thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId())); 303 thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId()));
307 return thread; 304 return thread.get();
308 }; 305 };
309 306
310 thread_local auto thread = make_thread(); 307 thread_local KThread* saved_thread = make_thread();
311 return thread.get(); 308 return saved_thread;
312 } 309 }
313 310
314 /// Registers a CPU core thread by allocating a host thread ID for it 311 /// Registers a CPU core thread by allocating a host thread ID for it
@@ -343,7 +340,16 @@ struct KernelCore::Impl {
343 is_phantom_mode_for_singlecore = value; 340 is_phantom_mode_for_singlecore = value;
344 } 341 }
345 342
343 bool IsShuttingDown() const {
344 return is_shutting_down.load(std::memory_order_relaxed);
345 }
346
346 KThread* GetCurrentEmuThread() { 347 KThread* GetCurrentEmuThread() {
348 // If we are shutting down the kernel, none of this is relevant anymore.
349 if (IsShuttingDown()) {
350 return {};
351 }
352
347 const auto thread_id = GetCurrentHostThreadID(); 353 const auto thread_id = GetCurrentHostThreadID();
348 if (thread_id >= Core::Hardware::NUM_CPU_CORES) { 354 if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
349 return GetHostDummyThread(); 355 return GetHostDummyThread();
@@ -695,6 +701,12 @@ struct KernelCore::Impl {
695 return port; 701 return port;
696 } 702 }
697 703
704 std::mutex server_ports_lock;
705 std::mutex server_sessions_lock;
706 std::mutex registered_objects_lock;
707 std::mutex registered_in_use_objects_lock;
708 std::mutex dummy_thread_lock;
709
698 std::atomic<u32> next_object_id{0}; 710 std::atomic<u32> next_object_id{0};
699 std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin}; 711 std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin};
700 std::atomic<u64> next_user_process_id{KProcess::ProcessIDMin}; 712 std::atomic<u64> next_user_process_id{KProcess::ProcessIDMin};
@@ -725,10 +737,6 @@ struct KernelCore::Impl {
725 std::unordered_set<KServerSession*> server_sessions; 737 std::unordered_set<KServerSession*> server_sessions;
726 std::unordered_set<KAutoObject*> registered_objects; 738 std::unordered_set<KAutoObject*> registered_objects;
727 std::unordered_set<KAutoObject*> registered_in_use_objects; 739 std::unordered_set<KAutoObject*> registered_in_use_objects;
728 std::mutex server_ports_lock;
729 std::mutex server_sessions_lock;
730 std::mutex registered_objects_lock;
731 std::mutex registered_in_use_objects_lock;
732 740
733 std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor; 741 std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor;
734 std::vector<Kernel::PhysicalCore> cores; 742 std::vector<Kernel::PhysicalCore> cores;
@@ -753,7 +761,11 @@ struct KernelCore::Impl {
753 std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{}; 761 std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
754 std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; 762 std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
755 763
764 // Specifically tracked to be automatically destroyed with kernel
765 std::vector<std::unique_ptr<KThread>> dummy_threads;
766
756 bool is_multicore{}; 767 bool is_multicore{};
768 std::atomic_bool is_shutting_down{};
757 bool is_phantom_mode_for_singlecore{}; 769 bool is_phantom_mode_for_singlecore{};
758 u32 single_core_thread_id{}; 770 u32 single_core_thread_id{};
759 771
@@ -839,16 +851,20 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const {
839 return impl->cores[id]; 851 return impl->cores[id];
840} 852}
841 853
854size_t KernelCore::CurrentPhysicalCoreIndex() const {
855 const u32 core_id = impl->GetCurrentHostThreadID();
856 if (core_id >= Core::Hardware::NUM_CPU_CORES) {
857 return Core::Hardware::NUM_CPU_CORES - 1;
858 }
859 return core_id;
860}
861
842Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { 862Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
843 u32 core_id = impl->GetCurrentHostThreadID(); 863 return impl->cores[CurrentPhysicalCoreIndex()];
844 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
845 return impl->cores[core_id];
846} 864}
847 865
848const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { 866const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
849 u32 core_id = impl->GetCurrentHostThreadID(); 867 return impl->cores[CurrentPhysicalCoreIndex()];
850 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
851 return impl->cores[core_id];
852} 868}
853 869
854Kernel::KScheduler* KernelCore::CurrentScheduler() { 870Kernel::KScheduler* KernelCore::CurrentScheduler() {
@@ -1051,6 +1067,9 @@ void KernelCore::Suspend(bool in_suspention) {
1051 impl->suspend_threads[core_id]->SetState(state); 1067 impl->suspend_threads[core_id]->SetState(state);
1052 impl->suspend_threads[core_id]->SetWaitReasonForDebugging( 1068 impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
1053 ThreadWaitReasonForDebugging::Suspended); 1069 ThreadWaitReasonForDebugging::Suspended);
1070 if (!should_suspend) {
1071 impl->suspend_threads[core_id]->DisableDispatch();
1072 }
1054 } 1073 }
1055 } 1074 }
1056} 1075}
@@ -1059,19 +1078,21 @@ bool KernelCore::IsMulticore() const {
1059 return impl->is_multicore; 1078 return impl->is_multicore;
1060} 1079}
1061 1080
1081bool KernelCore::IsShuttingDown() const {
1082 return impl->IsShuttingDown();
1083}
1084
1062void KernelCore::ExceptionalExit() { 1085void KernelCore::ExceptionalExit() {
1063 exception_exited = true; 1086 exception_exited = true;
1064 Suspend(true); 1087 Suspend(true);
1065} 1088}
1066 1089
1067void KernelCore::EnterSVCProfile() { 1090void KernelCore::EnterSVCProfile() {
1068 std::size_t core = impl->GetCurrentHostThreadID(); 1091 impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
1069 impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
1070} 1092}
1071 1093
1072void KernelCore::ExitSVCProfile() { 1094void KernelCore::ExitSVCProfile() {
1073 std::size_t core = impl->GetCurrentHostThreadID(); 1095 MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]);
1074 MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
1075} 1096}
1076 1097
1077std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { 1098std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index d2ceae950..b9b423908 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -53,6 +53,7 @@ class KSharedMemoryInfo;
53class KThread; 53class KThread;
54class KTransferMemory; 54class KTransferMemory;
55class KWritableEvent; 55class KWritableEvent;
56class KCodeMemory;
56class PhysicalCore; 57class PhysicalCore;
57class ServiceThread; 58class ServiceThread;
58class Synchronization; 59class Synchronization;
@@ -148,6 +149,9 @@ public:
148 /// Gets the an instance of the respective physical CPU core. 149 /// Gets the an instance of the respective physical CPU core.
149 const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; 150 const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
150 151
152 /// Gets the current physical core index for the running host thread.
153 std::size_t CurrentPhysicalCoreIndex() const;
154
151 /// Gets the sole instance of the Scheduler at the current running core. 155 /// Gets the sole instance of the Scheduler at the current running core.
152 Kernel::KScheduler* CurrentScheduler(); 156 Kernel::KScheduler* CurrentScheduler();
153 157
@@ -271,6 +275,8 @@ public:
271 275
272 bool IsMulticore() const; 276 bool IsMulticore() const;
273 277
278 bool IsShuttingDown() const;
279
274 void EnterSVCProfile(); 280 void EnterSVCProfile();
275 281
276 void ExitSVCProfile(); 282 void ExitSVCProfile();
@@ -326,6 +332,8 @@ public:
326 return slab_heap_container->transfer_memory; 332 return slab_heap_container->transfer_memory;
327 } else if constexpr (std::is_same_v<T, KWritableEvent>) { 333 } else if constexpr (std::is_same_v<T, KWritableEvent>) {
328 return slab_heap_container->writeable_event; 334 return slab_heap_container->writeable_event;
335 } else if constexpr (std::is_same_v<T, KCodeMemory>) {
336 return slab_heap_container->code_memory;
329 } 337 }
330 } 338 }
331 339
@@ -377,6 +385,7 @@ private:
377 KSlabHeap<KThread> thread; 385 KSlabHeap<KThread> thread;
378 KSlabHeap<KTransferMemory> transfer_memory; 386 KSlabHeap<KTransferMemory> transfer_memory;
379 KSlabHeap<KWritableEvent> writeable_event; 387 KSlabHeap<KWritableEvent> writeable_event;
388 KSlabHeap<KCodeMemory> code_memory;
380 }; 389 };
381 390
382 std::unique_ptr<SlabHeapContainer> slab_heap_container; 391 std::unique_ptr<SlabHeapContainer> slab_heap_container;
diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp
index 6721b6276..03f3dec10 100644
--- a/src/core/hle/kernel/service_thread.cpp
+++ b/src/core/hle/kernel/service_thread.cpp
@@ -25,24 +25,27 @@ public:
25 void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context); 25 void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context);
26 26
27private: 27private:
28 std::vector<std::thread> threads; 28 std::vector<std::jthread> threads;
29 std::queue<std::function<void()>> requests; 29 std::queue<std::function<void()>> requests;
30 std::mutex queue_mutex; 30 std::mutex queue_mutex;
31 std::condition_variable condition; 31 std::condition_variable_any condition;
32 const std::string service_name; 32 const std::string service_name;
33 bool stop{};
34}; 33};
35 34
36ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name) 35ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name)
37 : service_name{name} { 36 : service_name{name} {
38 for (std::size_t i = 0; i < num_threads; ++i) 37 for (std::size_t i = 0; i < num_threads; ++i) {
39 threads.emplace_back([this, &kernel] { 38 threads.emplace_back([this, &kernel](std::stop_token stop_token) {
40 Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str()); 39 Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str());
41 40
42 // Wait for first request before trying to acquire a render context 41 // Wait for first request before trying to acquire a render context
43 { 42 {
44 std::unique_lock lock{queue_mutex}; 43 std::unique_lock lock{queue_mutex};
45 condition.wait(lock, [this] { return stop || !requests.empty(); }); 44 condition.wait(lock, stop_token, [this] { return !requests.empty(); });
45 }
46
47 if (stop_token.stop_requested()) {
48 return;
46 } 49 }
47 50
48 kernel.RegisterHostThread(); 51 kernel.RegisterHostThread();
@@ -52,10 +55,16 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
52 55
53 { 56 {
54 std::unique_lock lock{queue_mutex}; 57 std::unique_lock lock{queue_mutex};
55 condition.wait(lock, [this] { return stop || !requests.empty(); }); 58 condition.wait(lock, stop_token, [this] { return !requests.empty(); });
56 if (stop || requests.empty()) { 59
60 if (stop_token.stop_requested()) {
57 return; 61 return;
58 } 62 }
63
64 if (requests.empty()) {
65 continue;
66 }
67
59 task = std::move(requests.front()); 68 task = std::move(requests.front());
60 requests.pop(); 69 requests.pop();
61 } 70 }
@@ -63,6 +72,7 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
63 task(); 72 task();
64 } 73 }
65 }); 74 });
75 }
66} 76}
67 77
68void ServiceThread::Impl::QueueSyncRequest(KSession& session, 78void ServiceThread::Impl::QueueSyncRequest(KSession& session,
@@ -88,12 +98,9 @@ void ServiceThread::Impl::QueueSyncRequest(KSession& session,
88} 98}
89 99
90ServiceThread::Impl::~Impl() { 100ServiceThread::Impl::~Impl() {
91 {
92 std::unique_lock lock{queue_mutex};
93 stop = true;
94 }
95 condition.notify_all(); 101 condition.notify_all();
96 for (std::thread& thread : threads) { 102 for (auto& thread : threads) {
103 thread.request_stop();
97 thread.join(); 104 thread.join();
98 } 105 }
99} 106}
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index f9d99bc51..a9f7438ea 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -18,6 +18,7 @@
18#include "core/core_timing.h" 18#include "core/core_timing.h"
19#include "core/hle/kernel/k_client_port.h" 19#include "core/hle/kernel/k_client_port.h"
20#include "core/hle/kernel/k_client_session.h" 20#include "core/hle/kernel/k_client_session.h"
21#include "core/hle/kernel/k_code_memory.h"
21#include "core/hle/kernel/k_event.h" 22#include "core/hle/kernel/k_event.h"
22#include "core/hle/kernel/k_handle_table.h" 23#include "core/hle/kernel/k_handle_table.h"
23#include "core/hle/kernel/k_memory_block.h" 24#include "core/hle/kernel/k_memory_block.h"
@@ -31,6 +32,7 @@
31#include "core/hle/kernel/k_shared_memory.h" 32#include "core/hle/kernel/k_shared_memory.h"
32#include "core/hle/kernel/k_synchronization_object.h" 33#include "core/hle/kernel/k_synchronization_object.h"
33#include "core/hle/kernel/k_thread.h" 34#include "core/hle/kernel/k_thread.h"
35#include "core/hle/kernel/k_thread_queue.h"
34#include "core/hle/kernel/k_transfer_memory.h" 36#include "core/hle/kernel/k_transfer_memory.h"
35#include "core/hle/kernel/k_writable_event.h" 37#include "core/hle/kernel/k_writable_event.h"
36#include "core/hle/kernel/kernel.h" 38#include "core/hle/kernel/kernel.h"
@@ -307,26 +309,29 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
307 309
308/// Makes a blocking IPC call to an OS service. 310/// Makes a blocking IPC call to an OS service.
309static ResultCode SendSyncRequest(Core::System& system, Handle handle) { 311static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
310
311 auto& kernel = system.Kernel(); 312 auto& kernel = system.Kernel();
312 313
314 // Create the wait queue.
315 KThreadQueue wait_queue(kernel);
316
317 // Get the client session from its handle.
318 KScopedAutoObject session =
319 kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle);
320 R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
321
322 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
323
313 auto thread = kernel.CurrentScheduler()->GetCurrentThread(); 324 auto thread = kernel.CurrentScheduler()->GetCurrentThread();
314 { 325 {
315 KScopedSchedulerLock lock(kernel); 326 KScopedSchedulerLock lock(kernel);
316 thread->SetState(ThreadState::Waiting); 327
317 thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); 328 // This is a synchronous request, so we should wait for our request to complete.
318 329 GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue));
319 { 330 GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
320 KScopedAutoObject session = 331 session->SendSyncRequest(&GetCurrentThread(kernel), system.Memory(), system.CoreTiming());
321 kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle);
322 R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
323 LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
324 session->SendSyncRequest(thread, system.Memory(), system.CoreTiming());
325 }
326 } 332 }
327 333
328 KSynchronizationObject* dummy{}; 334 return thread->GetWaitResult();
329 return thread->GetWaitResult(std::addressof(dummy));
330} 335}
331 336
332static ResultCode SendSyncRequest32(Core::System& system, Handle handle) { 337static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
@@ -873,7 +878,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
873 const u64 thread_ticks = current_thread->GetCpuTime(); 878 const u64 thread_ticks = current_thread->GetCpuTime();
874 879
875 out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); 880 out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
876 } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { 881 } else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) {
877 out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; 882 out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
878 } 883 }
879 884
@@ -887,7 +892,8 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
887 return ResultInvalidHandle; 892 return ResultInvalidHandle;
888 } 893 }
889 894
890 if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id != system.CurrentCoreIndex()) { 895 if (info_sub_id != 0xFFFFFFFFFFFFFFFF &&
896 info_sub_id != system.Kernel().CurrentPhysicalCoreIndex()) {
891 LOG_ERROR(Kernel_SVC, "Core is not the current core, got {}", info_sub_id); 897 LOG_ERROR(Kernel_SVC, "Core is not the current core, got {}", info_sub_id);
892 return ResultInvalidCombination; 898 return ResultInvalidCombination;
893 } 899 }
@@ -1169,6 +1175,8 @@ static u32 GetCurrentProcessorNumber32(Core::System& system) {
1169 return GetCurrentProcessorNumber(system); 1175 return GetCurrentProcessorNumber(system);
1170} 1176}
1171 1177
1178namespace {
1179
1172constexpr bool IsValidSharedMemoryPermission(Svc::MemoryPermission perm) { 1180constexpr bool IsValidSharedMemoryPermission(Svc::MemoryPermission perm) {
1173 switch (perm) { 1181 switch (perm) {
1174 case Svc::MemoryPermission::Read: 1182 case Svc::MemoryPermission::Read:
@@ -1179,10 +1187,40 @@ constexpr bool IsValidSharedMemoryPermission(Svc::MemoryPermission perm) {
1179 } 1187 }
1180} 1188}
1181 1189
1182constexpr bool IsValidRemoteSharedMemoryPermission(Svc::MemoryPermission perm) { 1190[[maybe_unused]] constexpr bool IsValidRemoteSharedMemoryPermission(Svc::MemoryPermission perm) {
1183 return IsValidSharedMemoryPermission(perm) || perm == Svc::MemoryPermission::DontCare; 1191 return IsValidSharedMemoryPermission(perm) || perm == Svc::MemoryPermission::DontCare;
1184} 1192}
1185 1193
1194constexpr bool IsValidProcessMemoryPermission(Svc::MemoryPermission perm) {
1195 switch (perm) {
1196 case Svc::MemoryPermission::None:
1197 case Svc::MemoryPermission::Read:
1198 case Svc::MemoryPermission::ReadWrite:
1199 case Svc::MemoryPermission::ReadExecute:
1200 return true;
1201 default:
1202 return false;
1203 }
1204}
1205
1206constexpr bool IsValidMapCodeMemoryPermission(Svc::MemoryPermission perm) {
1207 return perm == Svc::MemoryPermission::ReadWrite;
1208}
1209
1210constexpr bool IsValidMapToOwnerCodeMemoryPermission(Svc::MemoryPermission perm) {
1211 return perm == Svc::MemoryPermission::Read || perm == Svc::MemoryPermission::ReadExecute;
1212}
1213
1214constexpr bool IsValidUnmapCodeMemoryPermission(Svc::MemoryPermission perm) {
1215 return perm == Svc::MemoryPermission::None;
1216}
1217
1218constexpr bool IsValidUnmapFromOwnerCodeMemoryPermission(Svc::MemoryPermission perm) {
1219 return perm == Svc::MemoryPermission::None;
1220}
1221
1222} // Anonymous namespace
1223
1186static ResultCode MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, 1224static ResultCode MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address,
1187 u64 size, Svc::MemoryPermission map_perm) { 1225 u64 size, Svc::MemoryPermission map_perm) {
1188 LOG_TRACE(Kernel_SVC, 1226 LOG_TRACE(Kernel_SVC,
@@ -1262,6 +1300,223 @@ static ResultCode UnmapSharedMemory32(Core::System& system, Handle shmem_handle,
1262 return UnmapSharedMemory(system, shmem_handle, address, size); 1300 return UnmapSharedMemory(system, shmem_handle, address, size);
1263} 1301}
1264 1302
1303static ResultCode SetProcessMemoryPermission(Core::System& system, Handle process_handle,
1304 VAddr address, u64 size, Svc::MemoryPermission perm) {
1305 LOG_TRACE(Kernel_SVC,
1306 "called, process_handle=0x{:X}, addr=0x{:X}, size=0x{:X}, permissions=0x{:08X}",
1307 process_handle, address, size, perm);
1308
1309 // Validate the address/size.
1310 R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
1311 R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
1312 R_UNLESS(size > 0, ResultInvalidSize);
1313 R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
1314
1315 // Validate the memory permission.
1316 R_UNLESS(IsValidProcessMemoryPermission(perm), ResultInvalidNewMemoryPermission);
1317
1318 // Get the process from its handle.
1319 KScopedAutoObject process =
1320 system.CurrentProcess()->GetHandleTable().GetObject<KProcess>(process_handle);
1321 R_UNLESS(process.IsNotNull(), ResultInvalidHandle);
1322
1323 // Validate that the address is in range.
1324 auto& page_table = process->PageTable();
1325 R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory);
1326
1327 // Set the memory permission.
1328 return page_table.SetProcessMemoryPermission(address, size, ConvertToKMemoryPermission(perm));
1329}
1330
1331static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
1332 VAddr src_address, u64 size) {
1333 LOG_TRACE(Kernel_SVC,
1334 "called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
1335 dst_address, process_handle, src_address, size);
1336
1337 // Validate the address/size.
1338 R_UNLESS(Common::IsAligned(dst_address, PageSize), ResultInvalidAddress);
1339 R_UNLESS(Common::IsAligned(src_address, PageSize), ResultInvalidAddress);
1340 R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
1341 R_UNLESS(size > 0, ResultInvalidSize);
1342 R_UNLESS((dst_address < dst_address + size), ResultInvalidCurrentMemory);
1343 R_UNLESS((src_address < src_address + size), ResultInvalidCurrentMemory);
1344
1345 // Get the processes.
1346 KProcess* dst_process = system.CurrentProcess();
1347 KScopedAutoObject src_process =
1348 dst_process->GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
1349 R_UNLESS(src_process.IsNotNull(), ResultInvalidHandle);
1350
1351 // Get the page tables.
1352 auto& dst_pt = dst_process->PageTable();
1353 auto& src_pt = src_process->PageTable();
1354
1355 // Validate that the mapping is in range.
1356 R_UNLESS(src_pt.Contains(src_address, size), ResultInvalidCurrentMemory);
1357 R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState::SharedCode),
1358 ResultInvalidMemoryRegion);
1359
1360 // Create a new page group.
1361 KMemoryInfo kBlockInfo = dst_pt.QueryInfo(dst_address);
1362 KPageLinkedList pg(kBlockInfo.GetAddress(), kBlockInfo.GetNumPages());
1363
1364 // Map the group.
1365 R_TRY(dst_pt.MapPages(dst_address, pg, KMemoryState::SharedCode,
1366 KMemoryPermission::UserReadWrite));
1367
1368 return ResultSuccess;
1369}
1370
1371static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle,
1372 VAddr src_address, u64 size) {
1373 LOG_TRACE(Kernel_SVC,
1374 "called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}",
1375 dst_address, process_handle, src_address, size);
1376
1377 // Validate the address/size.
1378 R_UNLESS(Common::IsAligned(dst_address, PageSize), ResultInvalidAddress);
1379 R_UNLESS(Common::IsAligned(src_address, PageSize), ResultInvalidAddress);
1380 R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
1381 R_UNLESS(size > 0, ResultInvalidSize);
1382 R_UNLESS((dst_address < dst_address + size), ResultInvalidCurrentMemory);
1383 R_UNLESS((src_address < src_address + size), ResultInvalidCurrentMemory);
1384
1385 // Get the processes.
1386 KProcess* dst_process = system.CurrentProcess();
1387 KScopedAutoObject src_process =
1388 dst_process->GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
1389 R_UNLESS(src_process.IsNotNull(), ResultInvalidHandle);
1390
1391 // Get the page tables.
1392 auto& dst_pt = dst_process->PageTable();
1393 auto& src_pt = src_process->PageTable();
1394
1395 // Validate that the mapping is in range.
1396 R_UNLESS(src_pt.Contains(src_address, size), ResultInvalidCurrentMemory);
1397 R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState::SharedCode),
1398 ResultInvalidMemoryRegion);
1399
1400 // Unmap the memory.
1401 R_TRY(dst_pt.UnmapProcessMemory(dst_address, size, src_pt, src_address));
1402
1403 return ResultSuccess;
1404}
1405
1406static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) {
1407 LOG_TRACE(Kernel_SVC, "called, handle_out=0x{:X}, address=0x{:X}, size=0x{:X}",
1408 static_cast<void*>(out), address, size);
1409 // Get kernel instance.
1410 auto& kernel = system.Kernel();
1411
1412 // Validate address / size.
1413 R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
1414 R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
1415 R_UNLESS(size > 0, ResultInvalidSize);
1416 R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
1417
1418 // Create the code memory.
1419
1420 KCodeMemory* code_mem = KCodeMemory::Create(kernel);
1421 R_UNLESS(code_mem != nullptr, ResultOutOfResource);
1422
1423 // Verify that the region is in range.
1424 R_UNLESS(system.CurrentProcess()->PageTable().Contains(address, size),
1425 ResultInvalidCurrentMemory);
1426
1427 // Initialize the code memory.
1428 R_TRY(code_mem->Initialize(system.DeviceMemory(), address, size));
1429
1430 // Register the code memory.
1431 KCodeMemory::Register(kernel, code_mem);
1432
1433 // Add the code memory to the handle table.
1434 R_TRY(system.CurrentProcess()->GetHandleTable().Add(out, code_mem));
1435
1436 code_mem->Close();
1437
1438 return ResultSuccess;
1439}
1440
1441static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation,
1442 VAddr address, size_t size, Svc::MemoryPermission perm) {
1443
1444 LOG_TRACE(Kernel_SVC,
1445 "called, code_memory_handle=0x{:X}, operation=0x{:X}, address=0x{:X}, size=0x{:X}, "
1446 "permission=0x{:X}",
1447 code_memory_handle, operation, address, size, perm);
1448
1449 // Validate the address / size.
1450 R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress);
1451 R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize);
1452 R_UNLESS(size > 0, ResultInvalidSize);
1453 R_UNLESS((address < address + size), ResultInvalidCurrentMemory);
1454
1455 // Get the code memory from its handle.
1456 KScopedAutoObject code_mem =
1457 system.CurrentProcess()->GetHandleTable().GetObject<KCodeMemory>(code_memory_handle);
1458 R_UNLESS(code_mem.IsNotNull(), ResultInvalidHandle);
1459
1460 // NOTE: Here, Atmosphere extends the SVC to allow code memory operations on one's own process.
1461 // This enables homebrew usage of these SVCs for JIT.
1462
1463 // Perform the operation.
1464 switch (static_cast<CodeMemoryOperation>(operation)) {
1465 case CodeMemoryOperation::Map: {
1466 // Check that the region is in range.
1467 R_UNLESS(
1468 system.CurrentProcess()->PageTable().CanContain(address, size, KMemoryState::CodeOut),
1469 ResultInvalidMemoryRegion);
1470
1471 // Check the memory permission.
1472 R_UNLESS(IsValidMapCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
1473
1474 // Map the memory.
1475 R_TRY(code_mem->Map(address, size));
1476 } break;
1477 case CodeMemoryOperation::Unmap: {
1478 // Check that the region is in range.
1479 R_UNLESS(
1480 system.CurrentProcess()->PageTable().CanContain(address, size, KMemoryState::CodeOut),
1481 ResultInvalidMemoryRegion);
1482
1483 // Check the memory permission.
1484 R_UNLESS(IsValidUnmapCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
1485
1486 // Unmap the memory.
1487 R_TRY(code_mem->Unmap(address, size));
1488 } break;
1489 case CodeMemoryOperation::MapToOwner: {
1490 // Check that the region is in range.
1491 R_UNLESS(code_mem->GetOwner()->PageTable().CanContain(address, size,
1492 KMemoryState::GeneratedCode),
1493 ResultInvalidMemoryRegion);
1494
1495 // Check the memory permission.
1496 R_UNLESS(IsValidMapToOwnerCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
1497
1498 // Map the memory to its owner.
1499 R_TRY(code_mem->MapToOwner(address, size, perm));
1500 } break;
1501 case CodeMemoryOperation::UnmapFromOwner: {
1502 // Check that the region is in range.
1503 R_UNLESS(code_mem->GetOwner()->PageTable().CanContain(address, size,
1504 KMemoryState::GeneratedCode),
1505 ResultInvalidMemoryRegion);
1506
1507 // Check the memory permission.
1508 R_UNLESS(IsValidUnmapFromOwnerCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission);
1509
1510 // Unmap the memory from its owner.
1511 R_TRY(code_mem->UnmapFromOwner(address, size));
1512 } break;
1513 default:
1514 return ResultInvalidEnumValue;
1515 }
1516
1517 return ResultSuccess;
1518}
1519
1265static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address, 1520static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address,
1266 VAddr page_info_address, Handle process_handle, 1521 VAddr page_info_address, Handle process_handle,
1267 VAddr address) { 1522 VAddr address) {
@@ -1459,10 +1714,14 @@ static void ExitProcess32(Core::System& system) {
1459 ExitProcess(system); 1714 ExitProcess(system);
1460} 1715}
1461 1716
1462static constexpr bool IsValidVirtualCoreId(int32_t core_id) { 1717namespace {
1718
1719constexpr bool IsValidVirtualCoreId(int32_t core_id) {
1463 return (0 <= core_id && core_id < static_cast<int32_t>(Core::Hardware::NUM_CPU_CORES)); 1720 return (0 <= core_id && core_id < static_cast<int32_t>(Core::Hardware::NUM_CPU_CORES));
1464} 1721}
1465 1722
1723} // Anonymous namespace
1724
1466/// Creates a new thread 1725/// Creates a new thread
1467static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg, 1726static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, u64 arg,
1468 VAddr stack_bottom, u32 priority, s32 core_id) { 1727 VAddr stack_bottom, u32 priority, s32 core_id) {
@@ -1846,7 +2105,9 @@ static ResultCode ResetSignal32(Core::System& system, Handle handle) {
1846 return ResetSignal(system, handle); 2105 return ResetSignal(system, handle);
1847} 2106}
1848 2107
1849static constexpr bool IsValidTransferMemoryPermission(MemoryPermission perm) { 2108namespace {
2109
2110constexpr bool IsValidTransferMemoryPermission(MemoryPermission perm) {
1850 switch (perm) { 2111 switch (perm) {
1851 case MemoryPermission::None: 2112 case MemoryPermission::None:
1852 case MemoryPermission::Read: 2113 case MemoryPermission::Read:
@@ -1857,6 +2118,8 @@ static constexpr bool IsValidTransferMemoryPermission(MemoryPermission perm) {
1857 } 2118 }
1858} 2119}
1859 2120
2121} // Anonymous namespace
2122
1860/// Creates a TransferMemory object 2123/// Creates a TransferMemory object
1861static ResultCode CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u64 size, 2124static ResultCode CreateTransferMemory(Core::System& system, Handle* out, VAddr address, u64 size,
1862 MemoryPermission map_perm) { 2125 MemoryPermission map_perm) {
@@ -2548,8 +2811,8 @@ static const FunctionDef SVC_Table_64[] = {
2548 {0x48, nullptr, "MapPhysicalMemoryUnsafe"}, 2811 {0x48, nullptr, "MapPhysicalMemoryUnsafe"},
2549 {0x49, nullptr, "UnmapPhysicalMemoryUnsafe"}, 2812 {0x49, nullptr, "UnmapPhysicalMemoryUnsafe"},
2550 {0x4A, nullptr, "SetUnsafeLimit"}, 2813 {0x4A, nullptr, "SetUnsafeLimit"},
2551 {0x4B, nullptr, "CreateCodeMemory"}, 2814 {0x4B, SvcWrap64<CreateCodeMemory>, "CreateCodeMemory"},
2552 {0x4C, nullptr, "ControlCodeMemory"}, 2815 {0x4C, SvcWrap64<ControlCodeMemory>, "ControlCodeMemory"},
2553 {0x4D, nullptr, "SleepSystem"}, 2816 {0x4D, nullptr, "SleepSystem"},
2554 {0x4E, nullptr, "ReadWriteRegister"}, 2817 {0x4E, nullptr, "ReadWriteRegister"},
2555 {0x4F, nullptr, "SetProcessActivity"}, 2818 {0x4F, nullptr, "SetProcessActivity"},
@@ -2588,9 +2851,9 @@ static const FunctionDef SVC_Table_64[] = {
2588 {0x70, nullptr, "CreatePort"}, 2851 {0x70, nullptr, "CreatePort"},
2589 {0x71, nullptr, "ManageNamedPort"}, 2852 {0x71, nullptr, "ManageNamedPort"},
2590 {0x72, nullptr, "ConnectToPort"}, 2853 {0x72, nullptr, "ConnectToPort"},
2591 {0x73, nullptr, "SetProcessMemoryPermission"}, 2854 {0x73, SvcWrap64<SetProcessMemoryPermission>, "SetProcessMemoryPermission"},
2592 {0x74, nullptr, "MapProcessMemory"}, 2855 {0x74, SvcWrap64<MapProcessMemory>, "MapProcessMemory"},
2593 {0x75, nullptr, "UnmapProcessMemory"}, 2856 {0x75, SvcWrap64<UnmapProcessMemory>, "UnmapProcessMemory"},
2594 {0x76, SvcWrap64<QueryProcessMemory>, "QueryProcessMemory"}, 2857 {0x76, SvcWrap64<QueryProcessMemory>, "QueryProcessMemory"},
2595 {0x77, SvcWrap64<MapProcessCodeMemory>, "MapProcessCodeMemory"}, 2858 {0x77, SvcWrap64<MapProcessCodeMemory>, "MapProcessCodeMemory"},
2596 {0x78, SvcWrap64<UnmapProcessCodeMemory>, "UnmapProcessCodeMemory"}, 2859 {0x78, SvcWrap64<UnmapProcessCodeMemory>, "UnmapProcessCodeMemory"},
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 6e62e656f..86255fe6d 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -73,6 +73,23 @@ void SvcWrap64(Core::System& system) {
73 .raw); 73 .raw);
74} 74}
75 75
76// Used by MapProcessMemory and UnmapProcessMemory
77template <ResultCode func(Core::System&, u64, u32, u64, u64)>
78void SvcWrap64(Core::System& system) {
79 FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)),
80 Param(system, 2), Param(system, 3))
81 .raw);
82}
83
84// Used by ControlCodeMemory
85template <ResultCode func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)>
86void SvcWrap64(Core::System& system) {
87 FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)),
88 static_cast<u32>(Param(system, 1)), Param(system, 2), Param(system, 3),
89 static_cast<Svc::MemoryPermission>(Param(system, 4)))
90 .raw);
91}
92
76template <ResultCode func(Core::System&, u32*)> 93template <ResultCode func(Core::System&, u32*)>
77void SvcWrap64(Core::System& system) { 94void SvcWrap64(Core::System& system) {
78 u32 param = 0; 95 u32 param = 0;
@@ -301,6 +318,16 @@ void SvcWrap64(Core::System& system) {
301 FuncReturn(system, retval); 318 FuncReturn(system, retval);
302} 319}
303 320
321// Used by CreateCodeMemory
322template <ResultCode func(Core::System&, Handle*, u64, u64)>
323void SvcWrap64(Core::System& system) {
324 u32 param_1 = 0;
325 const u32 retval = func(system, &param_1, Param(system, 1), Param(system, 2)).raw;
326
327 system.CurrentArmInterface().SetReg(1, param_1);
328 FuncReturn(system, retval);
329}
330
304template <ResultCode func(Core::System&, Handle*, u64, u32, u32)> 331template <ResultCode func(Core::System&, Handle*, u64, u32, u32)>
305void SvcWrap64(Core::System& system) { 332void SvcWrap64(Core::System& system) {
306 u32 param_1 = 0; 333 u32 param_1 = 0;
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
index 8cd7279a3..aa985d820 100644
--- a/src/core/hle/kernel/time_manager.cpp
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -5,6 +5,7 @@
5#include "common/assert.h" 5#include "common/assert.h"
6#include "core/core.h" 6#include "core/core.h"
7#include "core/core_timing.h" 7#include "core/core_timing.h"
8#include "core/hle/kernel/k_scheduler.h"
8#include "core/hle/kernel/k_thread.h" 9#include "core/hle/kernel/k_thread.h"
9#include "core/hle/kernel/time_manager.h" 10#include "core/hle/kernel/time_manager.h"
10 11
@@ -15,7 +16,10 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
15 Core::Timing::CreateEvent("Kernel::TimeManagerCallback", 16 Core::Timing::CreateEvent("Kernel::TimeManagerCallback",
16 [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { 17 [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
17 KThread* thread = reinterpret_cast<KThread*>(thread_handle); 18 KThread* thread = reinterpret_cast<KThread*>(thread_handle);
18 thread->Wakeup(); 19 {
20 KScopedSchedulerLock sl(system.Kernel());
21 thread->OnTimer();
22 }
19 }); 23 });
20} 24}
21 25