summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/kernel.cpp
diff options
context:
space:
mode:
authorGravatar Fernando Sahmkow2020-02-14 09:30:53 -0400
committerGravatar FernandoS272020-02-22 11:18:06 -0400
commit179bafa7cb1efae5405d38ea9b98dc6b3e1ec756 (patch)
treee26bc705bf9446b335c29e086b84c61916f44f32 /src/core/hle/kernel/kernel.cpp
parentKernel: Make global scheduler depend on KernelCore (diff)
downloadyuzu-179bafa7cb1efae5405d38ea9b98dc6b3e1ec756.tar.gz
yuzu-179bafa7cb1efae5405d38ea9b98dc6b3e1ec756.tar.xz
yuzu-179bafa7cb1efae5405d38ea9b98dc6b3e1ec756.zip
Kernel: Rename ThreadCallbackHandleTable and Setup Thread Ids on Kernel.
Diffstat (limited to 'src/core/hle/kernel/kernel.cpp')
-rw-r--r--src/core/hle/kernel/kernel.cpp88
1 files changed, 76 insertions, 12 deletions
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index d312ae31e..b3a5d7505 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -6,6 +6,8 @@
6#include <functional> 6#include <functional>
7#include <memory> 7#include <memory>
8#include <mutex> 8#include <mutex>
9#include <thread>
10#include <unordered_map>
9#include <utility> 11#include <utility>
10 12
11#include "common/assert.h" 13#include "common/assert.h"
@@ -44,7 +46,7 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_
44 std::lock_guard lock{HLE::g_hle_lock}; 46 std::lock_guard lock{HLE::g_hle_lock};
45 47
46 std::shared_ptr<Thread> thread = 48 std::shared_ptr<Thread> thread =
47 system.Kernel().RetrieveThreadFromWakeupCallbackHandleTable(proper_handle); 49 system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
48 if (thread == nullptr) { 50 if (thread == nullptr) {
49 LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", proper_handle); 51 LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", proper_handle);
50 return; 52 return;
@@ -120,7 +122,7 @@ struct KernelCore::Impl {
120 122
121 system_resource_limit = nullptr; 123 system_resource_limit = nullptr;
122 124
123 thread_wakeup_callback_handle_table.Clear(); 125 global_handle_table.Clear();
124 thread_wakeup_event_type = nullptr; 126 thread_wakeup_event_type = nullptr;
125 preemption_event = nullptr; 127 preemption_event = nullptr;
126 128
@@ -138,8 +140,8 @@ struct KernelCore::Impl {
138 140
139 void InitializePhysicalCores() { 141 void InitializePhysicalCores() {
140 exclusive_monitor = 142 exclusive_monitor =
141 Core::MakeExclusiveMonitor(system.Memory(), global_scheduler.CpuCoresCount()); 143 Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
142 for (std::size_t i = 0; i < global_scheduler.CpuCoresCount(); i++) { 144 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
143 cores.emplace_back(system, i, *exclusive_monitor); 145 cores.emplace_back(system, i, *exclusive_monitor);
144 } 146 }
145 } 147 }
@@ -184,6 +186,48 @@ struct KernelCore::Impl {
184 system.Memory().SetCurrentPageTable(*process); 186 system.Memory().SetCurrentPageTable(*process);
185 } 187 }
186 188
189 void RegisterCoreThread(std::size_t core_id) {
190 const std::thread::id this_id = std::this_thread::get_id();
191 const auto it = host_thread_ids.find(this_id);
192 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
193 ASSERT(it == host_thread_ids.end());
194 ASSERT(!registered_core_threads[core_id]);
195 host_thread_ids[this_id] = static_cast<u32>(core_id);
196 registered_core_threads.set(core_id);
197 }
198
199 void RegisterHostThread() {
200 const std::thread::id this_id = std::this_thread::get_id();
201 const auto it = host_thread_ids.find(this_id);
202 ASSERT(it == host_thread_ids.end());
203 host_thread_ids[this_id] = registered_thread_ids++;
204 }
205
206 u32 GetCurrentHostThreadId() const {
207 const std::thread::id this_id = std::this_thread::get_id();
208 const auto it = host_thread_ids.find(this_id);
209 if (it == host_thread_ids.end()) {
210 return Core::INVALID_HOST_THREAD_ID;
211 }
212 return it->second;
213 }
214
215 Core::EmuThreadHandle GetCurrentEmuThreadId() const {
216 Core::EmuThreadHandle result = Core::EmuThreadHandle::InvalidHandle();
217 result.host_handle = GetCurrentHostThreadId();
218 if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) {
219 return result;
220 }
221 const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler();
222 const Kernel::Thread* current = sched.GetCurrentThread();
223 if (current != nullptr) {
224 result.guest_handle = current->GetGlobalHandle();
225 } else {
226 result.guest_handle = InvalidHandle;
227 }
228 return result;
229 }
230
187 std::atomic<u32> next_object_id{0}; 231 std::atomic<u32> next_object_id{0};
188 std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin}; 232 std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin};
189 std::atomic<u64> next_user_process_id{Process::ProcessIDMin}; 233 std::atomic<u64> next_user_process_id{Process::ProcessIDMin};
@@ -202,7 +246,7 @@ struct KernelCore::Impl {
202 246
203 // TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future, 247 // TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future,
204 // allowing us to simply use a pool index or similar. 248 // allowing us to simply use a pool index or similar.
205 Kernel::HandleTable thread_wakeup_callback_handle_table; 249 Kernel::HandleTable global_handle_table;
206 250
207 /// Map of named ports managed by the kernel, which can be retrieved using 251 /// Map of named ports managed by the kernel, which can be retrieved using
208 /// the ConnectToPort SVC. 252 /// the ConnectToPort SVC.
@@ -211,6 +255,11 @@ struct KernelCore::Impl {
211 std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor; 255 std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor;
212 std::vector<Kernel::PhysicalCore> cores; 256 std::vector<Kernel::PhysicalCore> cores;
213 257
258 // 0-3 Ids represent core threads, >3 represent others
259 std::unordered_map<std::thread::id, u32> host_thread_ids;
260 u32 registered_thread_ids{Core::Hardware::NUM_CPU_CORES};
261 std::bitset<Core::Hardware::NUM_CPU_CORES> registered_core_threads{};
262
214 // System context 263 // System context
215 Core::System& system; 264 Core::System& system;
216}; 265};
@@ -232,9 +281,8 @@ std::shared_ptr<ResourceLimit> KernelCore::GetSystemResourceLimit() const {
232 return impl->system_resource_limit; 281 return impl->system_resource_limit;
233} 282}
234 283
235std::shared_ptr<Thread> KernelCore::RetrieveThreadFromWakeupCallbackHandleTable( 284std::shared_ptr<Thread> KernelCore::RetrieveThreadFromGlobalHandleTable(Handle handle) const {
236 Handle handle) const { 285 return impl->global_handle_table.Get<Thread>(handle);
237 return impl->thread_wakeup_callback_handle_table.Get<Thread>(handle);
238} 286}
239 287
240void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) { 288void KernelCore::AppendNewProcess(std::shared_ptr<Process> process) {
@@ -346,12 +394,28 @@ const std::shared_ptr<Core::Timing::EventType>& KernelCore::ThreadWakeupCallback
346 return impl->thread_wakeup_event_type; 394 return impl->thread_wakeup_event_type;
347} 395}
348 396
349Kernel::HandleTable& KernelCore::ThreadWakeupCallbackHandleTable() { 397Kernel::HandleTable& KernelCore::GlobalHandleTable() {
350 return impl->thread_wakeup_callback_handle_table; 398 return impl->global_handle_table;
399}
400
401const Kernel::HandleTable& KernelCore::GlobalHandleTable() const {
402 return impl->global_handle_table;
403}
404
405void KernelCore::RegisterCoreThread(std::size_t core_id) {
406 impl->RegisterCoreThread(core_id);
407}
408
409void KernelCore::RegisterHostThread() {
410 impl->RegisterHostThread();
411}
412
413u32 KernelCore::GetCurrentHostThreadId() const {
414 return impl->GetCurrentHostThreadId();
351} 415}
352 416
353const Kernel::HandleTable& KernelCore::ThreadWakeupCallbackHandleTable() const { 417Core::EmuThreadHandle KernelCore::GetCurrentEmuThreadId() const {
354 return impl->thread_wakeup_callback_handle_table; 418 return impl->GetCurrentEmuThreadId();
355} 419}
356 420
357} // namespace Kernel 421} // namespace Kernel