summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/thread.cpp
diff options
context:
space:
mode:
authorGravatar bunnei2021-01-29 23:06:40 -0800
committerGravatar GitHub2021-01-29 23:06:40 -0800
commita4526c4e1acb50808bbe205952101142288e1c60 (patch)
tree7109edf89606c43352da9de40d0e3a920a08b659 /src/core/hle/kernel/thread.cpp
parentMerge pull request #5795 from ReinUsesLisp/bytes-to-map-end (diff)
parenthle: kernel: KLightLock: Fix several bugs. (diff)
downloadyuzu-a4526c4e1acb50808bbe205952101142288e1c60.tar.gz
yuzu-a4526c4e1acb50808bbe205952101142288e1c60.tar.xz
yuzu-a4526c4e1acb50808bbe205952101142288e1c60.zip
Merge pull request #5779 from bunnei/kthread-rewrite
Rewrite KThread to be more accurate
Diffstat (limited to 'src/core/hle/kernel/thread.cpp')
-rw-r--r--src/core/hle/kernel/thread.cpp460
1 files changed, 0 insertions, 460 deletions
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
deleted file mode 100644
index d97323255..000000000
--- a/src/core/hle/kernel/thread.cpp
+++ /dev/null
@@ -1,460 +0,0 @@
1// Copyright 2014 Citra Emulator Project / PPSSPP Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <cinttypes>
7#include <optional>
8#include <vector>
9
10#include "common/assert.h"
11#include "common/common_types.h"
12#include "common/fiber.h"
13#include "common/logging/log.h"
14#include "common/thread_queue_list.h"
15#include "core/core.h"
16#include "core/cpu_manager.h"
17#include "core/hardware_properties.h"
18#include "core/hle/kernel/errors.h"
19#include "core/hle/kernel/handle_table.h"
20#include "core/hle/kernel/k_condition_variable.h"
21#include "core/hle/kernel/k_scheduler.h"
22#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
23#include "core/hle/kernel/kernel.h"
24#include "core/hle/kernel/memory/memory_layout.h"
25#include "core/hle/kernel/object.h"
26#include "core/hle/kernel/process.h"
27#include "core/hle/kernel/thread.h"
28#include "core/hle/kernel/time_manager.h"
29#include "core/hle/result.h"
30#include "core/memory.h"
31
32#ifdef ARCHITECTURE_x86_64
33#include "core/arm/dynarmic/arm_dynarmic_32.h"
34#include "core/arm/dynarmic/arm_dynarmic_64.h"
35#endif
36
37namespace Kernel {
38
39bool Thread::IsSignaled() const {
40 return signaled;
41}
42
43Thread::Thread(KernelCore& kernel) : KSynchronizationObject{kernel} {}
44Thread::~Thread() = default;
45
46void Thread::Stop() {
47 {
48 KScopedSchedulerLock lock(kernel);
49 SetState(ThreadState::Terminated);
50 signaled = true;
51 NotifyAvailable();
52 kernel.GlobalHandleTable().Close(global_handle);
53
54 if (owner_process) {
55 owner_process->UnregisterThread(this);
56
57 // Mark the TLS slot in the thread's page as free.
58 owner_process->FreeTLSRegion(tls_address);
59 }
60 has_exited = true;
61 }
62 global_handle = 0;
63}
64
65void Thread::Wakeup() {
66 KScopedSchedulerLock lock(kernel);
67 SetState(ThreadState::Runnable);
68}
69
70ResultCode Thread::Start() {
71 KScopedSchedulerLock lock(kernel);
72 SetState(ThreadState::Runnable);
73 return RESULT_SUCCESS;
74}
75
76void Thread::CancelWait() {
77 KScopedSchedulerLock lock(kernel);
78 if (GetState() != ThreadState::Waiting || !is_cancellable) {
79 is_sync_cancelled = true;
80 return;
81 }
82 // TODO(Blinkhawk): Implement cancel of server session
83 is_sync_cancelled = false;
84 SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED);
85 SetState(ThreadState::Runnable);
86}
87
88static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
89 u32 entry_point, u32 arg) {
90 context = {};
91 context.cpu_registers[0] = arg;
92 context.cpu_registers[15] = entry_point;
93 context.cpu_registers[13] = stack_top;
94}
95
96static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top,
97 VAddr entry_point, u64 arg) {
98 context = {};
99 context.cpu_registers[0] = arg;
100 context.pc = entry_point;
101 context.sp = stack_top;
102 // TODO(merry): Perform a hardware test to determine the below value.
103 context.fpcr = 0;
104}
105
106std::shared_ptr<Common::Fiber>& Thread::GetHostContext() {
107 return host_context;
108}
109
110ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
111 std::string name, VAddr entry_point, u32 priority,
112 u64 arg, s32 processor_id, VAddr stack_top,
113 Process* owner_process) {
114 std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc();
115 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
116 return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top,
117 owner_process, std::move(init_func), init_func_parameter);
118}
119
120ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadType type_flags,
121 std::string name, VAddr entry_point, u32 priority,
122 u64 arg, s32 processor_id, VAddr stack_top,
123 Process* owner_process,
124 std::function<void(void*)>&& thread_start_func,
125 void* thread_start_parameter) {
126 auto& kernel = system.Kernel();
127 // Check if priority is in ranged. Lowest priority -> highest priority id.
128 if (priority > THREADPRIO_LOWEST && ((type_flags & THREADTYPE_IDLE) == 0)) {
129 LOG_ERROR(Kernel_SVC, "Invalid thread priority: {}", priority);
130 return ERR_INVALID_THREAD_PRIORITY;
131 }
132
133 if (processor_id > THREADPROCESSORID_MAX) {
134 LOG_ERROR(Kernel_SVC, "Invalid processor id: {}", processor_id);
135 return ERR_INVALID_PROCESSOR_ID;
136 }
137
138 if (owner_process) {
139 if (!system.Memory().IsValidVirtualAddress(*owner_process, entry_point)) {
140 LOG_ERROR(Kernel_SVC, "(name={}): invalid entry {:016X}", name, entry_point);
141 // TODO (bunnei): Find the correct error code to use here
142 return RESULT_UNKNOWN;
143 }
144 }
145
146 std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel);
147
148 thread->thread_id = kernel.CreateNewThreadID();
149 thread->thread_state = ThreadState::Initialized;
150 thread->entry_point = entry_point;
151 thread->stack_top = stack_top;
152 thread->disable_count = 1;
153 thread->tpidr_el0 = 0;
154 thread->current_priority = priority;
155 thread->base_priority = priority;
156 thread->lock_owner = nullptr;
157 thread->schedule_count = -1;
158 thread->last_scheduled_tick = 0;
159 thread->processor_id = processor_id;
160 thread->ideal_core = processor_id;
161 thread->affinity_mask.SetAffinity(processor_id, true);
162 thread->name = std::move(name);
163 thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap();
164 thread->owner_process = owner_process;
165 thread->type = type_flags;
166 thread->signaled = false;
167 if ((type_flags & THREADTYPE_IDLE) == 0) {
168 auto& scheduler = kernel.GlobalSchedulerContext();
169 scheduler.AddThread(thread);
170 }
171 if (owner_process) {
172 thread->tls_address = thread->owner_process->CreateTLSRegion();
173 thread->owner_process->RegisterThread(thread.get());
174 } else {
175 thread->tls_address = 0;
176 }
177
178 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
179 // to initialize the context
180 if ((type_flags & THREADTYPE_HLE) == 0) {
181 ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top),
182 static_cast<u32>(entry_point), static_cast<u32>(arg));
183 ResetThreadContext64(thread->context_64, stack_top, entry_point, arg);
184 }
185 thread->host_context =
186 std::make_shared<Common::Fiber>(std::move(thread_start_func), thread_start_parameter);
187
188 return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
189}
190
191void Thread::SetBasePriority(u32 priority) {
192 ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
193 "Invalid priority value.");
194
195 KScopedSchedulerLock lock(kernel);
196
197 // Change our base priority.
198 base_priority = priority;
199
200 // Perform a priority restoration.
201 RestorePriority(kernel, this);
202}
203
204void Thread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) {
205 signaling_object = object;
206 signaling_result = result;
207}
208
209VAddr Thread::GetCommandBufferAddress() const {
210 // Offset from the start of TLS at which the IPC command buffer begins.
211 constexpr u64 command_header_offset = 0x80;
212 return GetTLSAddress() + command_header_offset;
213}
214
215void Thread::SetState(ThreadState state) {
216 KScopedSchedulerLock sl(kernel);
217
218 // Clear debugging state
219 SetMutexWaitAddressForDebugging({});
220 SetWaitReasonForDebugging({});
221
222 const ThreadState old_state = thread_state;
223 thread_state =
224 static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask));
225 if (thread_state != old_state) {
226 KScheduler::OnThreadStateChanged(kernel, this, old_state);
227 }
228}
229
230void Thread::AddWaiterImpl(Thread* thread) {
231 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
232
233 // Find the right spot to insert the waiter.
234 auto it = waiter_list.begin();
235 while (it != waiter_list.end()) {
236 if (it->GetPriority() > thread->GetPriority()) {
237 break;
238 }
239 it++;
240 }
241
242 // Keep track of how many kernel waiters we have.
243 if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
244 ASSERT((num_kernel_waiters++) >= 0);
245 }
246
247 // Insert the waiter.
248 waiter_list.insert(it, *thread);
249 thread->SetLockOwner(this);
250}
251
252void Thread::RemoveWaiterImpl(Thread* thread) {
253 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
254
255 // Keep track of how many kernel waiters we have.
256 if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
257 ASSERT((num_kernel_waiters--) > 0);
258 }
259
260 // Remove the waiter.
261 waiter_list.erase(waiter_list.iterator_to(*thread));
262 thread->SetLockOwner(nullptr);
263}
264
265void Thread::RestorePriority(KernelCore& kernel, Thread* thread) {
266 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
267
268 while (true) {
269 // We want to inherit priority where possible.
270 s32 new_priority = thread->GetBasePriority();
271 if (thread->HasWaiters()) {
272 new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority());
273 }
274
275 // If the priority we would inherit is not different from ours, don't do anything.
276 if (new_priority == thread->GetPriority()) {
277 return;
278 }
279
280 // Ensure we don't violate condition variable red black tree invariants.
281 if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
282 BeforeUpdatePriority(kernel, cv_tree, thread);
283 }
284
285 // Change the priority.
286 const s32 old_priority = thread->GetPriority();
287 thread->SetPriority(new_priority);
288
289 // Restore the condition variable, if relevant.
290 if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
291 AfterUpdatePriority(kernel, cv_tree, thread);
292 }
293
294 // Update the scheduler.
295 KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority);
296
297 // Keep the lock owner up to date.
298 Thread* lock_owner = thread->GetLockOwner();
299 if (lock_owner == nullptr) {
300 return;
301 }
302
303 // Update the thread in the lock owner's sorted list, and continue inheriting.
304 lock_owner->RemoveWaiterImpl(thread);
305 lock_owner->AddWaiterImpl(thread);
306 thread = lock_owner;
307 }
308}
309
310void Thread::AddWaiter(Thread* thread) {
311 AddWaiterImpl(thread);
312 RestorePriority(kernel, this);
313}
314
315void Thread::RemoveWaiter(Thread* thread) {
316 RemoveWaiterImpl(thread);
317 RestorePriority(kernel, this);
318}
319
320Thread* Thread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
321 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
322
323 s32 num_waiters{};
324 Thread* next_lock_owner{};
325 auto it = waiter_list.begin();
326 while (it != waiter_list.end()) {
327 if (it->GetAddressKey() == key) {
328 Thread* thread = std::addressof(*it);
329
330 // Keep track of how many kernel waiters we have.
331 if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
332 ASSERT((num_kernel_waiters--) > 0);
333 }
334 it = waiter_list.erase(it);
335
336 // Update the next lock owner.
337 if (next_lock_owner == nullptr) {
338 next_lock_owner = thread;
339 next_lock_owner->SetLockOwner(nullptr);
340 } else {
341 next_lock_owner->AddWaiterImpl(thread);
342 }
343 num_waiters++;
344 } else {
345 it++;
346 }
347 }
348
349 // Do priority updates, if we have a next owner.
350 if (next_lock_owner) {
351 RestorePriority(kernel, this);
352 RestorePriority(kernel, next_lock_owner);
353 }
354
355 // Return output.
356 *out_num_waiters = num_waiters;
357 return next_lock_owner;
358}
359
360ResultCode Thread::SetActivity(ThreadActivity value) {
361 KScopedSchedulerLock lock(kernel);
362
363 auto sched_status = GetState();
364
365 if (sched_status != ThreadState::Runnable && sched_status != ThreadState::Waiting) {
366 return ERR_INVALID_STATE;
367 }
368
369 if (IsTerminationRequested()) {
370 return RESULT_SUCCESS;
371 }
372
373 if (value == ThreadActivity::Paused) {
374 if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) != 0) {
375 return ERR_INVALID_STATE;
376 }
377 AddSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
378 } else {
379 if ((pausing_state & static_cast<u32>(ThreadSchedFlags::ThreadPauseFlag)) == 0) {
380 return ERR_INVALID_STATE;
381 }
382 RemoveSchedulingFlag(ThreadSchedFlags::ThreadPauseFlag);
383 }
384 return RESULT_SUCCESS;
385}
386
387ResultCode Thread::Sleep(s64 nanoseconds) {
388 Handle event_handle{};
389 {
390 KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
391 SetState(ThreadState::Waiting);
392 SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
393 }
394
395 if (event_handle != InvalidHandle) {
396 auto& time_manager = kernel.TimeManager();
397 time_manager.UnscheduleTimeEvent(event_handle);
398 }
399 return RESULT_SUCCESS;
400}
401
402void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
403 const auto old_state = GetRawState();
404 pausing_state |= static_cast<u32>(flag);
405 const auto base_scheduling = GetState();
406 thread_state = base_scheduling | static_cast<ThreadState>(pausing_state);
407 KScheduler::OnThreadStateChanged(kernel, this, old_state);
408}
409
410void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
411 const auto old_state = GetRawState();
412 pausing_state &= ~static_cast<u32>(flag);
413 const auto base_scheduling = GetState();
414 thread_state = base_scheduling | static_cast<ThreadState>(pausing_state);
415 KScheduler::OnThreadStateChanged(kernel, this, old_state);
416}
417
418ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
419 KScopedSchedulerLock lock(kernel);
420 const auto HighestSetCore = [](u64 mask, u32 max_cores) {
421 for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) {
422 if (((mask >> core) & 1) != 0) {
423 return core;
424 }
425 }
426 return -1;
427 };
428
429 const bool use_override = affinity_override_count != 0;
430 if (new_core == THREADPROCESSORID_DONT_UPDATE) {
431 new_core = use_override ? ideal_core_override : ideal_core;
432 if ((new_affinity_mask & (1ULL << new_core)) == 0) {
433 LOG_ERROR(Kernel, "New affinity mask is incorrect! new_core={}, new_affinity_mask={}",
434 new_core, new_affinity_mask);
435 return ERR_INVALID_COMBINATION;
436 }
437 }
438 if (use_override) {
439 ideal_core_override = new_core;
440 } else {
441 const auto old_affinity_mask = affinity_mask;
442 affinity_mask.SetAffinityMask(new_affinity_mask);
443 ideal_core = new_core;
444 if (old_affinity_mask.GetAffinityMask() != new_affinity_mask) {
445 const s32 old_core = processor_id;
446 if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) {
447 if (static_cast<s32>(ideal_core) < 0) {
448 processor_id = HighestSetCore(affinity_mask.GetAffinityMask(),
449 Core::Hardware::NUM_CPU_CORES);
450 } else {
451 processor_id = ideal_core;
452 }
453 }
454 KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_affinity_mask, old_core);
455 }
456 }
457 return RESULT_SUCCESS;
458}
459
460} // namespace Kernel