summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/thread.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel/thread.cpp')
-rw-r--r--src/core/hle/kernel/thread.cpp114
1 files changed, 65 insertions, 49 deletions
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index f1e5cf3cb..59272715f 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -18,10 +18,10 @@
18#include "core/core_timing.h" 18#include "core/core_timing.h"
19#include "core/hle/hle.h" 19#include "core/hle/hle.h"
20#include "core/hle/kernel/kernel.h" 20#include "core/hle/kernel/kernel.h"
21#include "core/hle/kernel/process.h"
22#include "core/hle/kernel/thread.h"
23#include "core/hle/kernel/memory.h" 21#include "core/hle/kernel/memory.h"
24#include "core/hle/kernel/mutex.h" 22#include "core/hle/kernel/mutex.h"
23#include "core/hle/kernel/process.h"
24#include "core/hle/kernel/thread.h"
25#include "core/hle/result.h" 25#include "core/hle/result.h"
26#include "core/memory.h" 26#include "core/memory.h"
27 27
@@ -46,7 +46,7 @@ static Kernel::HandleTable wakeup_callback_handle_table;
46static std::vector<SharedPtr<Thread>> thread_list; 46static std::vector<SharedPtr<Thread>> thread_list;
47 47
48// Lists only ready thread ids. 48// Lists only ready thread ids.
49static Common::ThreadQueueList<Thread*, THREADPRIO_LOWEST+1> ready_queue; 49static Common::ThreadQueueList<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
50 50
51static Thread* current_thread; 51static Thread* current_thread;
52 52
@@ -61,8 +61,10 @@ inline static u32 const NewThreadId() {
61 return next_thread_id++; 61 return next_thread_id++;
62} 62}
63 63
64Thread::Thread() {} 64Thread::Thread() {
65Thread::~Thread() {} 65}
66Thread::~Thread() {
67}
66 68
67Thread* GetCurrentThread() { 69Thread* GetCurrentThread() {
68 return current_thread; 70 return current_thread;
@@ -103,7 +105,7 @@ void Thread::Stop() {
103 105
104 // Clean up thread from ready queue 106 // Clean up thread from ready queue
105 // This is only needed when the thread is termintated forcefully (SVC TerminateProcess) 107 // This is only needed when the thread is termintated forcefully (SVC TerminateProcess)
106 if (status == THREADSTATUS_READY){ 108 if (status == THREADSTATUS_READY) {
107 ready_queue.remove(current_priority, this); 109 ready_queue.remove(current_priority, this);
108 } 110 }
109 111
@@ -119,7 +121,8 @@ void Thread::Stop() {
119 121
120 // Mark the TLS slot in the thread's page as free. 122 // Mark the TLS slot in the thread's page as free.
121 u32 tls_page = (tls_address - Memory::TLS_AREA_VADDR) / Memory::PAGE_SIZE; 123 u32 tls_page = (tls_address - Memory::TLS_AREA_VADDR) / Memory::PAGE_SIZE;
122 u32 tls_slot = ((tls_address - Memory::TLS_AREA_VADDR) % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE; 124 u32 tls_slot =
125 ((tls_address - Memory::TLS_AREA_VADDR) % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE;
123 Kernel::g_current_process->tls_slots[tls_page].reset(tls_slot); 126 Kernel::g_current_process->tls_slots[tls_page].reset(tls_slot);
124 127
125 HLE::Reschedule(__func__); 128 HLE::Reschedule(__func__);
@@ -137,7 +140,7 @@ Thread* ArbitrateHighestPriorityThread(u32 address) {
137 if (thread == nullptr) 140 if (thread == nullptr)
138 continue; 141 continue;
139 142
140 if(thread->current_priority <= priority) { 143 if (thread->current_priority <= priority) {
141 highest_priority_thread = thread.get(); 144 highest_priority_thread = thread.get();
142 priority = thread->current_priority; 145 priority = thread->current_priority;
143 } 146 }
@@ -170,7 +173,7 @@ static void PriorityBoostStarvedThreads() {
170 // on hardware. However, this is almost certainly not perfect, and the real CTR OS scheduler 173 // on hardware. However, this is almost certainly not perfect, and the real CTR OS scheduler
171 // should probably be reversed to verify this. 174 // should probably be reversed to verify this.
172 175
173 const u64 boost_timeout = 2000000; // Boost threads that have been ready for > this long 176 const u64 boost_timeout = 2000000; // Boost threads that have been ready for > this long
174 177
175 u64 delta = current_ticks - thread->last_running_ticks; 178 u64 delta = current_ticks - thread->last_running_ticks;
176 179
@@ -193,10 +196,12 @@ static std::tuple<u32*, u32*> GetWaitSynchTimeoutParameterRegister(Thread* threa
193 196
194 if ((thumb_mode && thumb_inst == 0xDF24) || (!thumb_mode && inst == 0x0F000024)) { 197 if ((thumb_mode && thumb_inst == 0xDF24) || (!thumb_mode && inst == 0x0F000024)) {
195 // svc #0x24 (WaitSynchronization1) 198 // svc #0x24 (WaitSynchronization1)
196 return std::make_tuple(&thread->context.cpu_registers[2], &thread->context.cpu_registers[3]); 199 return std::make_tuple(&thread->context.cpu_registers[2],
200 &thread->context.cpu_registers[3]);
197 } else if ((thumb_mode && thumb_inst == 0xDF25) || (!thumb_mode && inst == 0x0F000025)) { 201 } else if ((thumb_mode && thumb_inst == 0xDF25) || (!thumb_mode && inst == 0x0F000025)) {
198 // svc #0x25 (WaitSynchronizationN) 202 // svc #0x25 (WaitSynchronizationN)
199 return std::make_tuple(&thread->context.cpu_registers[0], &thread->context.cpu_registers[4]); 203 return std::make_tuple(&thread->context.cpu_registers[0],
204 &thread->context.cpu_registers[4]);
200 } 205 }
201 206
202 UNREACHABLE(); 207 UNREACHABLE();
@@ -245,7 +250,8 @@ static void SwitchContext(Thread* new_thread) {
245 250
246 // Load context of new thread 251 // Load context of new thread
247 if (new_thread) { 252 if (new_thread) {
248 DEBUG_ASSERT_MSG(new_thread->status == THREADSTATUS_READY, "Thread must be ready to become running."); 253 DEBUG_ASSERT_MSG(new_thread->status == THREADSTATUS_READY,
254 "Thread must be ready to become running.");
249 255
250 // Cancel any outstanding wakeup events for this thread 256 // Cancel any outstanding wakeup events for this thread
251 CoreTiming::UnscheduleEvent(ThreadWakeupEventType, new_thread->callback_handle); 257 CoreTiming::UnscheduleEvent(ThreadWakeupEventType, new_thread->callback_handle);
@@ -263,7 +269,7 @@ static void SwitchContext(Thread* new_thread) {
263 new_thread->context.pc -= thumb_mode ? 2 : 4; 269 new_thread->context.pc -= thumb_mode ? 2 : 4;
264 270
265 // Get the register for timeout parameter 271 // Get the register for timeout parameter
266 u32* timeout_low, *timeout_high; 272 u32 *timeout_low, *timeout_high;
267 std::tie(timeout_low, timeout_high) = GetWaitSynchTimeoutParameterRegister(new_thread); 273 std::tie(timeout_low, timeout_high) = GetWaitSynchTimeoutParameterRegister(new_thread);
268 274
269 // Update the timeout parameter 275 // Update the timeout parameter
@@ -307,7 +313,7 @@ static Thread* PopNextReadyThread() {
307 // Otherwise just keep going with the current thread 313 // Otherwise just keep going with the current thread
308 next = thread; 314 next = thread;
309 } 315 }
310 } else { 316 } else {
311 next = ready_queue.pop_first(); 317 next = ready_queue.pop_first();
312 } 318 }
313 319
@@ -321,7 +327,8 @@ void WaitCurrentThread_Sleep() {
321 HLE::Reschedule(__func__); 327 HLE::Reschedule(__func__);
322} 328}
323 329
324void WaitCurrentThread_WaitSynchronization(std::vector<SharedPtr<WaitObject>> wait_objects, bool wait_set_output, bool wait_all) { 330void WaitCurrentThread_WaitSynchronization(std::vector<SharedPtr<WaitObject>> wait_objects,
331 bool wait_set_output, bool wait_all) {
325 Thread* thread = GetCurrentThread(); 332 Thread* thread = GetCurrentThread();
326 thread->wait_set_output = wait_set_output; 333 thread->wait_set_output = wait_set_output;
327 thread->wait_all = wait_all; 334 thread->wait_all = wait_all;
@@ -352,7 +359,8 @@ static void ThreadWakeupCallback(u64 thread_handle, int cycles_late) {
352 359
353 if (thread->status == THREADSTATUS_WAIT_SYNCH || thread->status == THREADSTATUS_WAIT_ARB) { 360 if (thread->status == THREADSTATUS_WAIT_SYNCH || thread->status == THREADSTATUS_WAIT_ARB) {
354 thread->SetWaitSynchronizationResult(ResultCode(ErrorDescription::Timeout, ErrorModule::OS, 361 thread->SetWaitSynchronizationResult(ResultCode(ErrorDescription::Timeout, ErrorModule::OS,
355 ErrorSummary::StatusChanged, ErrorLevel::Info)); 362 ErrorSummary::StatusChanged,
363 ErrorLevel::Info));
356 364
357 if (thread->wait_set_output) 365 if (thread->wait_set_output)
358 thread->SetWaitSynchronizationOutput(-1); 366 thread->SetWaitSynchronizationOutput(-1);
@@ -372,25 +380,25 @@ void Thread::WakeAfterDelay(s64 nanoseconds) {
372 380
373void Thread::ResumeFromWait() { 381void Thread::ResumeFromWait() {
374 switch (status) { 382 switch (status) {
375 case THREADSTATUS_WAIT_SYNCH: 383 case THREADSTATUS_WAIT_SYNCH:
376 case THREADSTATUS_WAIT_ARB: 384 case THREADSTATUS_WAIT_ARB:
377 case THREADSTATUS_WAIT_SLEEP: 385 case THREADSTATUS_WAIT_SLEEP:
378 break; 386 break;
379 387
380 case THREADSTATUS_READY: 388 case THREADSTATUS_READY:
381 // If the thread is waiting on multiple wait objects, it might be awoken more than once 389 // If the thread is waiting on multiple wait objects, it might be awoken more than once
382 // before actually resuming. We can ignore subsequent wakeups if the thread status has 390 // before actually resuming. We can ignore subsequent wakeups if the thread status has
383 // already been set to THREADSTATUS_READY. 391 // already been set to THREADSTATUS_READY.
384 return; 392 return;
385 393
386 case THREADSTATUS_RUNNING: 394 case THREADSTATUS_RUNNING:
387 DEBUG_ASSERT_MSG(false, "Thread with object id %u has already resumed.", GetObjectId()); 395 DEBUG_ASSERT_MSG(false, "Thread with object id %u has already resumed.", GetObjectId());
388 return; 396 return;
389 case THREADSTATUS_DEAD: 397 case THREADSTATUS_DEAD:
390 // This should never happen, as threads must complete before being stopped. 398 // This should never happen, as threads must complete before being stopped.
391 DEBUG_ASSERT_MSG(false, "Thread with object id %u cannot be resumed because it's DEAD.", 399 DEBUG_ASSERT_MSG(false, "Thread with object id %u cannot be resumed because it's DEAD.",
392 GetObjectId()); 400 GetObjectId());
393 return; 401 return;
394 } 402 }
395 403
396 ready_queue.push_back(current_priority, this); 404 ready_queue.push_back(current_priority, this);
@@ -405,7 +413,8 @@ static void DebugThreadQueue() {
405 if (!thread) { 413 if (!thread) {
406 LOG_DEBUG(Kernel, "Current: NO CURRENT THREAD"); 414 LOG_DEBUG(Kernel, "Current: NO CURRENT THREAD");
407 } else { 415 } else {
408 LOG_DEBUG(Kernel, "0x%02X %u (current)", thread->current_priority, GetCurrentThread()->GetObjectId()); 416 LOG_DEBUG(Kernel, "0x%02X %u (current)", thread->current_priority,
417 GetCurrentThread()->GetObjectId());
409 } 418 }
410 419
411 for (auto& t : thread_list) { 420 for (auto& t : thread_list) {
@@ -448,7 +457,8 @@ std::tuple<u32, u32, bool> GetFreeThreadLocalSlot(std::vector<std::bitset<8>>& t
448 * @param entry_point Address of entry point for execution 457 * @param entry_point Address of entry point for execution
449 * @param arg User argument for thread 458 * @param arg User argument for thread
450 */ 459 */
451static void ResetThreadContext(Core::ThreadContext& context, u32 stack_top, u32 entry_point, u32 arg) { 460static void ResetThreadContext(Core::ThreadContext& context, u32 stack_top, u32 entry_point,
461 u32 arg) {
452 memset(&context, 0, sizeof(Core::ThreadContext)); 462 memset(&context, 0, sizeof(Core::ThreadContext));
453 463
454 context.cpu_registers[0] = arg; 464 context.cpu_registers[0] = arg;
@@ -458,11 +468,11 @@ static void ResetThreadContext(Core::ThreadContext& context, u32 stack_top, u32
458} 468}
459 469
460ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point, s32 priority, 470ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point, s32 priority,
461 u32 arg, s32 processor_id, VAddr stack_top) { 471 u32 arg, s32 processor_id, VAddr stack_top) {
462 if (priority < THREADPRIO_HIGHEST || priority > THREADPRIO_LOWEST) { 472 if (priority < THREADPRIO_HIGHEST || priority > THREADPRIO_LOWEST) {
463 s32 new_priority = MathUtil::Clamp<s32>(priority, THREADPRIO_HIGHEST, THREADPRIO_LOWEST); 473 s32 new_priority = MathUtil::Clamp<s32>(priority, THREADPRIO_HIGHEST, THREADPRIO_LOWEST);
464 LOG_WARNING(Kernel_SVC, "(name=%s): invalid priority=%d, clamping to %d", 474 LOG_WARNING(Kernel_SVC, "(name=%s): invalid priority=%d, clamping to %d", name.c_str(),
465 name.c_str(), priority, new_priority); 475 priority, new_priority);
466 // TODO(bunnei): Clamping to a valid priority is not necessarily correct behavior... Confirm 476 // TODO(bunnei): Clamping to a valid priority is not necessarily correct behavior... Confirm
467 // validity of this 477 // validity of this
468 priority = new_priority; 478 priority = new_priority;
@@ -472,7 +482,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
472 LOG_ERROR(Kernel_SVC, "(name=%s): invalid entry %08x", name.c_str(), entry_point); 482 LOG_ERROR(Kernel_SVC, "(name=%s): invalid entry %08x", name.c_str(), entry_point);
473 // TODO: Verify error 483 // TODO: Verify error
474 return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::Kernel, 484 return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::Kernel,
475 ErrorSummary::InvalidArgument, ErrorLevel::Permanent); 485 ErrorSummary::InvalidArgument, ErrorLevel::Permanent);
476 } 486 }
477 487
478 SharedPtr<Thread> thread(new Thread); 488 SharedPtr<Thread> thread(new Thread);
@@ -511,8 +521,10 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
511 auto& linheap_memory = memory_region->linear_heap_memory; 521 auto& linheap_memory = memory_region->linear_heap_memory;
512 522
513 if (linheap_memory->size() + Memory::PAGE_SIZE > memory_region->size) { 523 if (linheap_memory->size() + Memory::PAGE_SIZE > memory_region->size) {
514 LOG_ERROR(Kernel_SVC, "Not enough space in region to allocate a new TLS page for thread"); 524 LOG_ERROR(Kernel_SVC,
515 return ResultCode(ErrorDescription::OutOfMemory, ErrorModule::Kernel, ErrorSummary::OutOfResource, ErrorLevel::Permanent); 525 "Not enough space in region to allocate a new TLS page for thread");
526 return ResultCode(ErrorDescription::OutOfMemory, ErrorModule::Kernel,
527 ErrorSummary::OutOfResource, ErrorLevel::Permanent);
516 } 528 }
517 529
518 u32 offset = linheap_memory->size(); 530 u32 offset = linheap_memory->size();
@@ -537,7 +549,8 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
537 549
538 // Mark the slot as used 550 // Mark the slot as used
539 tls_slots[available_page].set(available_slot); 551 tls_slots[available_page].set(available_slot);
540 thread->tls_address = Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE + available_slot * Memory::TLS_ENTRY_SIZE; 552 thread->tls_address = Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE +
553 available_slot * Memory::TLS_ENTRY_SIZE;
541 554
542 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used 555 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
543 // to initialize the context 556 // to initialize the context
@@ -551,10 +564,12 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
551 return MakeResult<SharedPtr<Thread>>(std::move(thread)); 564 return MakeResult<SharedPtr<Thread>>(std::move(thread));
552} 565}
553 566
554// TODO(peachum): Remove this. Range checking should be done, and an appropriate error should be returned. 567// TODO(peachum): Remove this. Range checking should be done, and an appropriate error should be
568// returned.
555static void ClampPriority(const Thread* thread, s32* priority) { 569static void ClampPriority(const Thread* thread, s32* priority) {
556 if (*priority < THREADPRIO_HIGHEST || *priority > THREADPRIO_LOWEST) { 570 if (*priority < THREADPRIO_HIGHEST || *priority > THREADPRIO_LOWEST) {
557 DEBUG_ASSERT_MSG(false, "Application passed an out of range priority. An error should be returned."); 571 DEBUG_ASSERT_MSG(
572 false, "Application passed an out of range priority. An error should be returned.");
558 573
559 s32 new_priority = MathUtil::Clamp<s32>(*priority, THREADPRIO_HIGHEST, THREADPRIO_LOWEST); 574 s32 new_priority = MathUtil::Clamp<s32>(*priority, THREADPRIO_HIGHEST, THREADPRIO_LOWEST);
560 LOG_WARNING(Kernel_SVC, "(name=%s): invalid priority=%d, clamping to %d", 575 LOG_WARNING(Kernel_SVC, "(name=%s): invalid priority=%d, clamping to %d",
@@ -586,12 +601,13 @@ SharedPtr<Thread> SetupMainThread(u32 entry_point, s32 priority) {
586 DEBUG_ASSERT(!GetCurrentThread()); 601 DEBUG_ASSERT(!GetCurrentThread());
587 602
588 // Initialize new "main" thread 603 // Initialize new "main" thread
589 auto thread_res = Thread::Create("main", entry_point, priority, 0, 604 auto thread_res = Thread::Create("main", entry_point, priority, 0, THREADPROCESSORID_0,
590 THREADPROCESSORID_0, Memory::HEAP_VADDR_END); 605 Memory::HEAP_VADDR_END);
591 606
592 SharedPtr<Thread> thread = thread_res.MoveFrom(); 607 SharedPtr<Thread> thread = thread_res.MoveFrom();
593 608
594 thread->context.fpscr = FPSCR_DEFAULT_NAN | FPSCR_FLUSH_TO_ZERO | FPSCR_ROUND_TOZERO | FPSCR_IXC; // 0x03C00010 609 thread->context.fpscr =
610 FPSCR_DEFAULT_NAN | FPSCR_FLUSH_TO_ZERO | FPSCR_ROUND_TOZERO | FPSCR_IXC; // 0x03C00010
595 611
596 // Run new "main" thread 612 // Run new "main" thread
597 SwitchContext(thread.get()); 613 SwitchContext(thread.get());