diff options
Diffstat (limited to 'src/core/hle/kernel/kernel.cpp')
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 201 |
1 files changed, 115 insertions, 86 deletions
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index f2b0fe2fd..e8ece8164 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -7,15 +7,15 @@ | |||
| 7 | #include <bitset> | 7 | #include <bitset> |
| 8 | #include <functional> | 8 | #include <functional> |
| 9 | #include <memory> | 9 | #include <memory> |
| 10 | #include <mutex> | ||
| 11 | #include <thread> | 10 | #include <thread> |
| 12 | #include <unordered_map> | 11 | #include <unordered_set> |
| 13 | #include <utility> | 12 | #include <utility> |
| 14 | 13 | ||
| 15 | #include "common/assert.h" | 14 | #include "common/assert.h" |
| 16 | #include "common/logging/log.h" | 15 | #include "common/logging/log.h" |
| 17 | #include "common/microprofile.h" | 16 | #include "common/microprofile.h" |
| 18 | #include "common/thread.h" | 17 | #include "common/thread.h" |
| 18 | #include "common/thread_worker.h" | ||
| 19 | #include "core/arm/arm_interface.h" | 19 | #include "core/arm/arm_interface.h" |
| 20 | #include "core/arm/cpu_interrupt_handler.h" | 20 | #include "core/arm/cpu_interrupt_handler.h" |
| 21 | #include "core/arm/exclusive_monitor.h" | 21 | #include "core/arm/exclusive_monitor.h" |
| @@ -28,6 +28,7 @@ | |||
| 28 | #include "core/hle/kernel/client_port.h" | 28 | #include "core/hle/kernel/client_port.h" |
| 29 | #include "core/hle/kernel/errors.h" | 29 | #include "core/hle/kernel/errors.h" |
| 30 | #include "core/hle/kernel/handle_table.h" | 30 | #include "core/hle/kernel/handle_table.h" |
| 31 | #include "core/hle/kernel/k_scheduler.h" | ||
| 31 | #include "core/hle/kernel/kernel.h" | 32 | #include "core/hle/kernel/kernel.h" |
| 32 | #include "core/hle/kernel/memory/memory_layout.h" | 33 | #include "core/hle/kernel/memory/memory_layout.h" |
| 33 | #include "core/hle/kernel/memory/memory_manager.h" | 34 | #include "core/hle/kernel/memory/memory_manager.h" |
| @@ -35,7 +36,7 @@ | |||
| 35 | #include "core/hle/kernel/physical_core.h" | 36 | #include "core/hle/kernel/physical_core.h" |
| 36 | #include "core/hle/kernel/process.h" | 37 | #include "core/hle/kernel/process.h" |
| 37 | #include "core/hle/kernel/resource_limit.h" | 38 | #include "core/hle/kernel/resource_limit.h" |
| 38 | #include "core/hle/kernel/scheduler.h" | 39 | #include "core/hle/kernel/service_thread.h" |
| 39 | #include "core/hle/kernel/shared_memory.h" | 40 | #include "core/hle/kernel/shared_memory.h" |
| 40 | #include "core/hle/kernel/synchronization.h" | 41 | #include "core/hle/kernel/synchronization.h" |
| 41 | #include "core/hle/kernel/thread.h" | 42 | #include "core/hle/kernel/thread.h" |
| @@ -50,17 +51,20 @@ namespace Kernel { | |||
| 50 | 51 | ||
| 51 | struct KernelCore::Impl { | 52 | struct KernelCore::Impl { |
| 52 | explicit Impl(Core::System& system, KernelCore& kernel) | 53 | explicit Impl(Core::System& system, KernelCore& kernel) |
| 53 | : global_scheduler{kernel}, synchronization{system}, time_manager{system}, | 54 | : synchronization{system}, time_manager{system}, global_handle_table{kernel}, system{ |
| 54 | global_handle_table{kernel}, system{system} {} | 55 | system} {} |
| 55 | 56 | ||
| 56 | void SetMulticore(bool is_multicore) { | 57 | void SetMulticore(bool is_multicore) { |
| 57 | this->is_multicore = is_multicore; | 58 | this->is_multicore = is_multicore; |
| 58 | } | 59 | } |
| 59 | 60 | ||
| 60 | void Initialize(KernelCore& kernel) { | 61 | void Initialize(KernelCore& kernel) { |
| 61 | Shutdown(); | ||
| 62 | RegisterHostThread(); | 62 | RegisterHostThread(); |
| 63 | 63 | ||
| 64 | global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel); | ||
| 65 | service_thread_manager = | ||
| 66 | std::make_unique<Common::ThreadWorker>(1, "yuzu:ServiceThreadManager"); | ||
| 67 | |||
| 64 | InitializePhysicalCores(); | 68 | InitializePhysicalCores(); |
| 65 | InitializeSystemResourceLimit(kernel); | 69 | InitializeSystemResourceLimit(kernel); |
| 66 | InitializeMemoryLayout(); | 70 | InitializeMemoryLayout(); |
| @@ -69,7 +73,19 @@ struct KernelCore::Impl { | |||
| 69 | InitializeSuspendThreads(); | 73 | InitializeSuspendThreads(); |
| 70 | } | 74 | } |
| 71 | 75 | ||
| 76 | void InitializeCores() { | ||
| 77 | for (auto& core : cores) { | ||
| 78 | core.Initialize(current_process->Is64BitProcess()); | ||
| 79 | } | ||
| 80 | } | ||
| 81 | |||
| 72 | void Shutdown() { | 82 | void Shutdown() { |
| 83 | process_list.clear(); | ||
| 84 | |||
| 85 | // Ensures all service threads gracefully shutdown | ||
| 86 | service_thread_manager.reset(); | ||
| 87 | service_threads.clear(); | ||
| 88 | |||
| 73 | next_object_id = 0; | 89 | next_object_id = 0; |
| 74 | next_kernel_process_id = Process::InitialKIPIDMin; | 90 | next_kernel_process_id = Process::InitialKIPIDMin; |
| 75 | next_user_process_id = Process::ProcessIDMin; | 91 | next_user_process_id = Process::ProcessIDMin; |
| @@ -81,41 +97,30 @@ struct KernelCore::Impl { | |||
| 81 | } | 97 | } |
| 82 | } | 98 | } |
| 83 | 99 | ||
| 84 | for (std::size_t i = 0; i < cores.size(); i++) { | ||
| 85 | cores[i].Shutdown(); | ||
| 86 | schedulers[i].reset(); | ||
| 87 | } | ||
| 88 | cores.clear(); | 100 | cores.clear(); |
| 89 | 101 | ||
| 90 | registered_core_threads.reset(); | ||
| 91 | |||
| 92 | process_list.clear(); | ||
| 93 | current_process = nullptr; | 102 | current_process = nullptr; |
| 94 | 103 | ||
| 95 | system_resource_limit = nullptr; | 104 | system_resource_limit = nullptr; |
| 96 | 105 | ||
| 97 | global_handle_table.Clear(); | 106 | global_handle_table.Clear(); |
| 98 | preemption_event = nullptr; | ||
| 99 | 107 | ||
| 100 | global_scheduler.Shutdown(); | 108 | preemption_event = nullptr; |
| 101 | 109 | ||
| 102 | named_ports.clear(); | 110 | named_ports.clear(); |
| 103 | 111 | ||
| 104 | for (auto& core : cores) { | ||
| 105 | core.Shutdown(); | ||
| 106 | } | ||
| 107 | cores.clear(); | ||
| 108 | |||
| 109 | exclusive_monitor.reset(); | 112 | exclusive_monitor.reset(); |
| 110 | host_thread_ids.clear(); | 113 | |
| 114 | // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others | ||
| 115 | next_host_thread_id = Core::Hardware::NUM_CPU_CORES; | ||
| 111 | } | 116 | } |
| 112 | 117 | ||
| 113 | void InitializePhysicalCores() { | 118 | void InitializePhysicalCores() { |
| 114 | exclusive_monitor = | 119 | exclusive_monitor = |
| 115 | Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); | 120 | Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); |
| 116 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | 121 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { |
| 117 | schedulers[i] = std::make_unique<Kernel::Scheduler>(system, i); | 122 | schedulers[i] = std::make_unique<Kernel::KScheduler>(system, i); |
| 118 | cores.emplace_back(system, i, *schedulers[i], interrupts[i]); | 123 | cores.emplace_back(i, system, *schedulers[i], interrupts); |
| 119 | } | 124 | } |
| 120 | } | 125 | } |
| 121 | 126 | ||
| @@ -147,8 +152,8 @@ struct KernelCore::Impl { | |||
| 147 | preemption_event = Core::Timing::CreateEvent( | 152 | preemption_event = Core::Timing::CreateEvent( |
| 148 | "PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) { | 153 | "PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) { |
| 149 | { | 154 | { |
| 150 | SchedulerLock lock(kernel); | 155 | KScopedSchedulerLock lock(kernel); |
| 151 | global_scheduler.PreemptThreads(); | 156 | global_scheduler_context->PreemptThreads(); |
| 152 | } | 157 | } |
| 153 | const auto time_interval = std::chrono::nanoseconds{ | 158 | const auto time_interval = std::chrono::nanoseconds{ |
| 154 | Core::Timing::msToCycles(std::chrono::milliseconds(10))}; | 159 | Core::Timing::msToCycles(std::chrono::milliseconds(10))}; |
| @@ -177,63 +182,62 @@ struct KernelCore::Impl { | |||
| 177 | 182 | ||
| 178 | void MakeCurrentProcess(Process* process) { | 183 | void MakeCurrentProcess(Process* process) { |
| 179 | current_process = process; | 184 | current_process = process; |
| 180 | |||
| 181 | if (process == nullptr) { | 185 | if (process == nullptr) { |
| 182 | return; | 186 | return; |
| 183 | } | 187 | } |
| 184 | 188 | ||
| 185 | u32 core_id = GetCurrentHostThreadID(); | 189 | const u32 core_id = GetCurrentHostThreadID(); |
| 186 | if (core_id < Core::Hardware::NUM_CPU_CORES) { | 190 | if (core_id < Core::Hardware::NUM_CPU_CORES) { |
| 187 | system.Memory().SetCurrentPageTable(*process, core_id); | 191 | system.Memory().SetCurrentPageTable(*process, core_id); |
| 188 | } | 192 | } |
| 189 | } | 193 | } |
| 190 | 194 | ||
| 195 | /// Creates a new host thread ID, should only be called by GetHostThreadId | ||
| 196 | u32 AllocateHostThreadId(std::optional<std::size_t> core_id) { | ||
| 197 | if (core_id) { | ||
| 198 | // The first for slots are reserved for CPU core threads | ||
| 199 | ASSERT(*core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 200 | return static_cast<u32>(*core_id); | ||
| 201 | } else { | ||
| 202 | return next_host_thread_id++; | ||
| 203 | } | ||
| 204 | } | ||
| 205 | |||
| 206 | /// Gets the host thread ID for the caller, allocating a new one if this is the first time | ||
| 207 | u32 GetHostThreadId(std::optional<std::size_t> core_id = std::nullopt) { | ||
| 208 | const thread_local auto host_thread_id{AllocateHostThreadId(core_id)}; | ||
| 209 | return host_thread_id; | ||
| 210 | } | ||
| 211 | |||
| 212 | /// Registers a CPU core thread by allocating a host thread ID for it | ||
| 191 | void RegisterCoreThread(std::size_t core_id) { | 213 | void RegisterCoreThread(std::size_t core_id) { |
| 192 | std::unique_lock lock{register_thread_mutex}; | 214 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); |
| 215 | const auto this_id = GetHostThreadId(core_id); | ||
| 193 | if (!is_multicore) { | 216 | if (!is_multicore) { |
| 194 | single_core_thread_id = std::this_thread::get_id(); | 217 | single_core_thread_id = this_id; |
| 195 | } | 218 | } |
| 196 | const std::thread::id this_id = std::this_thread::get_id(); | ||
| 197 | const auto it = host_thread_ids.find(this_id); | ||
| 198 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 199 | ASSERT(it == host_thread_ids.end()); | ||
| 200 | ASSERT(!registered_core_threads[core_id]); | ||
| 201 | host_thread_ids[this_id] = static_cast<u32>(core_id); | ||
| 202 | registered_core_threads.set(core_id); | ||
| 203 | } | 219 | } |
| 204 | 220 | ||
| 221 | /// Registers a new host thread by allocating a host thread ID for it | ||
| 205 | void RegisterHostThread() { | 222 | void RegisterHostThread() { |
| 206 | std::unique_lock lock{register_thread_mutex}; | 223 | [[maybe_unused]] const auto this_id = GetHostThreadId(); |
| 207 | const std::thread::id this_id = std::this_thread::get_id(); | ||
| 208 | const auto it = host_thread_ids.find(this_id); | ||
| 209 | if (it != host_thread_ids.end()) { | ||
| 210 | return; | ||
| 211 | } | ||
| 212 | host_thread_ids[this_id] = registered_thread_ids++; | ||
| 213 | } | 224 | } |
| 214 | 225 | ||
| 215 | u32 GetCurrentHostThreadID() const { | 226 | [[nodiscard]] u32 GetCurrentHostThreadID() { |
| 216 | const std::thread::id this_id = std::this_thread::get_id(); | 227 | const auto this_id = GetHostThreadId(); |
| 217 | if (!is_multicore) { | 228 | if (!is_multicore && single_core_thread_id == this_id) { |
| 218 | if (single_core_thread_id == this_id) { | 229 | return static_cast<u32>(system.GetCpuManager().CurrentCore()); |
| 219 | return static_cast<u32>(system.GetCpuManager().CurrentCore()); | ||
| 220 | } | ||
| 221 | } | ||
| 222 | std::unique_lock lock{register_thread_mutex}; | ||
| 223 | const auto it = host_thread_ids.find(this_id); | ||
| 224 | if (it == host_thread_ids.end()) { | ||
| 225 | return Core::INVALID_HOST_THREAD_ID; | ||
| 226 | } | 230 | } |
| 227 | return it->second; | 231 | return this_id; |
| 228 | } | 232 | } |
| 229 | 233 | ||
| 230 | Core::EmuThreadHandle GetCurrentEmuThreadID() const { | 234 | [[nodiscard]] Core::EmuThreadHandle GetCurrentEmuThreadID() { |
| 231 | Core::EmuThreadHandle result = Core::EmuThreadHandle::InvalidHandle(); | 235 | Core::EmuThreadHandle result = Core::EmuThreadHandle::InvalidHandle(); |
| 232 | result.host_handle = GetCurrentHostThreadID(); | 236 | result.host_handle = GetCurrentHostThreadID(); |
| 233 | if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) { | 237 | if (result.host_handle >= Core::Hardware::NUM_CPU_CORES) { |
| 234 | return result; | 238 | return result; |
| 235 | } | 239 | } |
| 236 | const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler(); | 240 | const Kernel::KScheduler& sched = cores[result.host_handle].Scheduler(); |
| 237 | const Kernel::Thread* current = sched.GetCurrentThread(); | 241 | const Kernel::Thread* current = sched.GetCurrentThread(); |
| 238 | if (current != nullptr && !current->IsPhantomMode()) { | 242 | if (current != nullptr && !current->IsPhantomMode()) { |
| 239 | result.guest_handle = current->GetGlobalHandle(); | 243 | result.guest_handle = current->GetGlobalHandle(); |
| @@ -302,7 +306,7 @@ struct KernelCore::Impl { | |||
| 302 | // Lists all processes that exist in the current session. | 306 | // Lists all processes that exist in the current session. |
| 303 | std::vector<std::shared_ptr<Process>> process_list; | 307 | std::vector<std::shared_ptr<Process>> process_list; |
| 304 | Process* current_process = nullptr; | 308 | Process* current_process = nullptr; |
| 305 | Kernel::GlobalScheduler global_scheduler; | 309 | std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context; |
| 306 | Kernel::Synchronization synchronization; | 310 | Kernel::Synchronization synchronization; |
| 307 | Kernel::TimeManager time_manager; | 311 | Kernel::TimeManager time_manager; |
| 308 | 312 | ||
| @@ -321,11 +325,8 @@ struct KernelCore::Impl { | |||
| 321 | std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor; | 325 | std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor; |
| 322 | std::vector<Kernel::PhysicalCore> cores; | 326 | std::vector<Kernel::PhysicalCore> cores; |
| 323 | 327 | ||
| 324 | // 0-3 IDs represent core threads, >3 represent others | 328 | // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others |
| 325 | std::unordered_map<std::thread::id, u32> host_thread_ids; | 329 | std::atomic<u32> next_host_thread_id{Core::Hardware::NUM_CPU_CORES}; |
| 326 | u32 registered_thread_ids{Core::Hardware::NUM_CPU_CORES}; | ||
| 327 | std::bitset<Core::Hardware::NUM_CPU_CORES> registered_core_threads; | ||
| 328 | mutable std::mutex register_thread_mutex; | ||
| 329 | 330 | ||
| 330 | // Kernel memory management | 331 | // Kernel memory management |
| 331 | std::unique_ptr<Memory::MemoryManager> memory_manager; | 332 | std::unique_ptr<Memory::MemoryManager> memory_manager; |
| @@ -337,12 +338,19 @@ struct KernelCore::Impl { | |||
| 337 | std::shared_ptr<Kernel::SharedMemory> irs_shared_mem; | 338 | std::shared_ptr<Kernel::SharedMemory> irs_shared_mem; |
| 338 | std::shared_ptr<Kernel::SharedMemory> time_shared_mem; | 339 | std::shared_ptr<Kernel::SharedMemory> time_shared_mem; |
| 339 | 340 | ||
| 341 | // Threads used for services | ||
| 342 | std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads; | ||
| 343 | |||
| 344 | // Service threads are managed by a worker thread, so that a calling service thread can queue up | ||
| 345 | // the release of itself | ||
| 346 | std::unique_ptr<Common::ThreadWorker> service_thread_manager; | ||
| 347 | |||
| 340 | std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; | 348 | std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; |
| 341 | std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{}; | 349 | std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{}; |
| 342 | std::array<std::unique_ptr<Kernel::Scheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; | 350 | std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; |
| 343 | 351 | ||
| 344 | bool is_multicore{}; | 352 | bool is_multicore{}; |
| 345 | std::thread::id single_core_thread_id{}; | 353 | u32 single_core_thread_id{}; |
| 346 | 354 | ||
| 347 | std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{}; | 355 | std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{}; |
| 348 | 356 | ||
| @@ -363,6 +371,10 @@ void KernelCore::Initialize() { | |||
| 363 | impl->Initialize(*this); | 371 | impl->Initialize(*this); |
| 364 | } | 372 | } |
| 365 | 373 | ||
| 374 | void KernelCore::InitializeCores() { | ||
| 375 | impl->InitializeCores(); | ||
| 376 | } | ||
| 377 | |||
| 366 | void KernelCore::Shutdown() { | 378 | void KernelCore::Shutdown() { |
| 367 | impl->Shutdown(); | 379 | impl->Shutdown(); |
| 368 | } | 380 | } |
| @@ -395,19 +407,19 @@ const std::vector<std::shared_ptr<Process>>& KernelCore::GetProcessList() const | |||
| 395 | return impl->process_list; | 407 | return impl->process_list; |
| 396 | } | 408 | } |
| 397 | 409 | ||
| 398 | Kernel::GlobalScheduler& KernelCore::GlobalScheduler() { | 410 | Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() { |
| 399 | return impl->global_scheduler; | 411 | return *impl->global_scheduler_context; |
| 400 | } | 412 | } |
| 401 | 413 | ||
| 402 | const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const { | 414 | const Kernel::GlobalSchedulerContext& KernelCore::GlobalSchedulerContext() const { |
| 403 | return impl->global_scheduler; | 415 | return *impl->global_scheduler_context; |
| 404 | } | 416 | } |
| 405 | 417 | ||
| 406 | Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) { | 418 | Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) { |
| 407 | return *impl->schedulers[id]; | 419 | return *impl->schedulers[id]; |
| 408 | } | 420 | } |
| 409 | 421 | ||
| 410 | const Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) const { | 422 | const Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) const { |
| 411 | return *impl->schedulers[id]; | 423 | return *impl->schedulers[id]; |
| 412 | } | 424 | } |
| 413 | 425 | ||
| @@ -431,16 +443,13 @@ const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { | |||
| 431 | return impl->cores[core_id]; | 443 | return impl->cores[core_id]; |
| 432 | } | 444 | } |
| 433 | 445 | ||
| 434 | Kernel::Scheduler& KernelCore::CurrentScheduler() { | 446 | Kernel::KScheduler* KernelCore::CurrentScheduler() { |
| 435 | u32 core_id = impl->GetCurrentHostThreadID(); | 447 | u32 core_id = impl->GetCurrentHostThreadID(); |
| 436 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | 448 | if (core_id >= Core::Hardware::NUM_CPU_CORES) { |
| 437 | return *impl->schedulers[core_id]; | 449 | // This is expected when called from not a guest thread |
| 438 | } | 450 | return {}; |
| 439 | 451 | } | |
| 440 | const Kernel::Scheduler& KernelCore::CurrentScheduler() const { | 452 | return impl->schedulers[core_id].get(); |
| 441 | u32 core_id = impl->GetCurrentHostThreadID(); | ||
| 442 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 443 | return *impl->schedulers[core_id]; | ||
| 444 | } | 453 | } |
| 445 | 454 | ||
| 446 | std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() { | 455 | std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() { |
| @@ -477,12 +486,17 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const { | |||
| 477 | } | 486 | } |
| 478 | 487 | ||
| 479 | void KernelCore::InvalidateAllInstructionCaches() { | 488 | void KernelCore::InvalidateAllInstructionCaches() { |
| 480 | auto& threads = GlobalScheduler().GetThreadList(); | 489 | for (auto& physical_core : impl->cores) { |
| 481 | for (auto& thread : threads) { | 490 | physical_core.ArmInterface().ClearInstructionCache(); |
| 482 | if (!thread->IsHLEThread()) { | 491 | } |
| 483 | auto& arm_interface = thread->ArmInterface(); | 492 | } |
| 484 | arm_interface.ClearInstructionCache(); | 493 | |
| 494 | void KernelCore::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) { | ||
| 495 | for (auto& physical_core : impl->cores) { | ||
| 496 | if (!physical_core.IsInitialized()) { | ||
| 497 | continue; | ||
| 485 | } | 498 | } |
| 499 | physical_core.ArmInterface().InvalidateCacheRange(addr, size); | ||
| 486 | } | 500 | } |
| 487 | } | 501 | } |
| 488 | 502 | ||
| @@ -598,7 +612,7 @@ const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const { | |||
| 598 | void KernelCore::Suspend(bool in_suspention) { | 612 | void KernelCore::Suspend(bool in_suspention) { |
| 599 | const bool should_suspend = exception_exited || in_suspention; | 613 | const bool should_suspend = exception_exited || in_suspention; |
| 600 | { | 614 | { |
| 601 | SchedulerLock lock(*this); | 615 | KScopedSchedulerLock lock(*this); |
| 602 | ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep; | 616 | ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep; |
| 603 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | 617 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { |
| 604 | impl->suspend_threads[i]->SetStatus(status); | 618 | impl->suspend_threads[i]->SetStatus(status); |
| @@ -625,4 +639,19 @@ void KernelCore::ExitSVCProfile() { | |||
| 625 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]); | 639 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]); |
| 626 | } | 640 | } |
| 627 | 641 | ||
| 642 | std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { | ||
| 643 | auto service_thread = std::make_shared<Kernel::ServiceThread>(*this, 1, name); | ||
| 644 | impl->service_thread_manager->QueueWork( | ||
| 645 | [this, service_thread] { impl->service_threads.emplace(service_thread); }); | ||
| 646 | return service_thread; | ||
| 647 | } | ||
| 648 | |||
| 649 | void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) { | ||
| 650 | impl->service_thread_manager->QueueWork([this, service_thread] { | ||
| 651 | if (auto strong_ptr = service_thread.lock()) { | ||
| 652 | impl->service_threads.erase(strong_ptr); | ||
| 653 | } | ||
| 654 | }); | ||
| 655 | } | ||
| 656 | |||
| 628 | } // namespace Kernel | 657 | } // namespace Kernel |