summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/scheduler.cpp
diff options
context:
space:
mode:
authorGravatar bunnei2018-02-18 14:58:40 -0500
committerGravatar bunnei2018-02-18 14:58:40 -0500
commitc78d495161355277d19bd9debc64161b1d4bb949 (patch)
tree7de9d4d46450b78baa1c2153b929829a3f7eabc6 /src/core/hle/kernel/scheduler.cpp
parentcore: Use shared_ptr for cpu_core. (diff)
downloadyuzu-c78d495161355277d19bd9debc64161b1d4bb949.tar.gz
yuzu-c78d495161355277d19bd9debc64161b1d4bb949.tar.xz
yuzu-c78d495161355277d19bd9debc64161b1d4bb949.zip
kernel: Add Scheduler, which encapsulates the scheduling loading from Thread module.
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
-rw-r--r--src/core/hle/kernel/scheduler.cpp134
1 files changed, 134 insertions, 0 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
new file mode 100644
index 000000000..2fe0d5a38
--- /dev/null
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -0,0 +1,134 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/core_timing.h"
6#include "core/hle/kernel/process.h"
7#include "core/hle/kernel/scheduler.h"
8
9namespace Kernel {
10
11Scheduler::Scheduler(std::shared_ptr<ARM_Interface> cpu_core) : cpu_core(cpu_core) {}
12
13Scheduler::~Scheduler() {
14 for (auto& thread : thread_list) {
15 thread->Stop();
16 }
17}
18
19bool Scheduler::HaveReadyThreads() {
20 return ready_queue.get_first() != nullptr;
21}
22
23Thread* Scheduler::GetCurrentThread() const {
24 return current_thread.get();
25}
26
27Thread* Scheduler::PopNextReadyThread() {
28 Thread* next = nullptr;
29 Thread* thread = GetCurrentThread();
30
31 if (thread && thread->status == THREADSTATUS_RUNNING) {
32 // We have to do better than the current thread.
33 // This call returns null when that's not possible.
34 next = ready_queue.pop_first_better(thread->current_priority);
35 if (!next) {
36 // Otherwise just keep going with the current thread
37 next = thread;
38 }
39 } else {
40 next = ready_queue.pop_first();
41 }
42
43 return next;
44}
45
46void Scheduler::SwitchContext(Thread* new_thread) {
47 Thread* previous_thread = GetCurrentThread();
48
49 // Save context for previous thread
50 if (previous_thread) {
51 previous_thread->last_running_ticks = CoreTiming::GetTicks();
52 cpu_core->SaveContext(previous_thread->context);
53
54 if (previous_thread->status == THREADSTATUS_RUNNING) {
55 // This is only the case when a reschedule is triggered without the current thread
56 // yielding execution (i.e. an event triggered, system core time-sliced, etc)
57 ready_queue.push_front(previous_thread->current_priority, previous_thread);
58 previous_thread->status = THREADSTATUS_READY;
59 }
60 }
61
62 // Load context of new thread
63 if (new_thread) {
64 ASSERT_MSG(new_thread->status == THREADSTATUS_READY,
65 "Thread must be ready to become running.");
66
67 // Cancel any outstanding wakeup events for this thread
68 new_thread->CancelWakeupTimer();
69
70 auto previous_process = Kernel::g_current_process;
71
72 current_thread = new_thread;
73
74 ready_queue.remove(new_thread->current_priority, new_thread);
75 new_thread->status = THREADSTATUS_RUNNING;
76
77 if (previous_process != current_thread->owner_process) {
78 Kernel::g_current_process = current_thread->owner_process;
79 SetCurrentPageTable(&Kernel::g_current_process->vm_manager.page_table);
80 }
81
82 cpu_core->LoadContext(new_thread->context);
83 cpu_core->SetTlsAddress(new_thread->GetTLSAddress());
84 } else {
85 current_thread = nullptr;
86 // Note: We do not reset the current process and current page table when idling because
87 // technically we haven't changed processes, our threads are just paused.
88 }
89}
90
91void Scheduler::Reschedule() {
92 Thread* cur = GetCurrentThread();
93 Thread* next = PopNextReadyThread();
94
95 if (cur && next) {
96 LOG_TRACE(Kernel, "context switch %u -> %u", cur->GetObjectId(), next->GetObjectId());
97 } else if (cur) {
98 LOG_TRACE(Kernel, "context switch %u -> idle", cur->GetObjectId());
99 } else if (next) {
100 LOG_TRACE(Kernel, "context switch idle -> %u", next->GetObjectId());
101 }
102
103 SwitchContext(next);
104}
105
106void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) {
107 thread_list.push_back(thread);
108 ready_queue.prepare(priority);
109}
110
111void Scheduler::RemoveThread(Thread* thread) {
112 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
113 thread_list.end());
114}
115
116void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
117 ASSERT(thread->status == THREADSTATUS_READY);
118 ready_queue.push_back(priority, thread);
119}
120
121void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
122 ASSERT(thread->status == THREADSTATUS_READY);
123 ready_queue.remove(priority, thread);
124}
125
126void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
127 // If thread was ready, adjust queues
128 if (thread->status == THREADSTATUS_READY)
129 ready_queue.move(thread, thread->current_priority, priority);
130 else
131 ready_queue.prepare(priority);
132}
133
134} // namespace Kernel