diff options
| author | 2020-07-04 00:59:40 -0400 | |
|---|---|---|
| committer | 2020-07-04 00:59:40 -0400 | |
| commit | f829932ed191ad469df01342191bf2725e8a20bb (patch) | |
| tree | 0ae185ce3ef43ef9b085aae7b9ad5abb04e3d239 /src/core/hle/kernel/kernel.cpp | |
| parent | Fix for always firing triggers on some controllers, trigger threshold more un... (diff) | |
| parent | Merge pull request #4218 from ogniK5377/opus-external (diff) | |
| download | yuzu-f829932ed191ad469df01342191bf2725e8a20bb.tar.gz yuzu-f829932ed191ad469df01342191bf2725e8a20bb.tar.xz yuzu-f829932ed191ad469df01342191bf2725e8a20bb.zip | |
Fix merge conflicts?
Diffstat (limited to 'src/core/hle/kernel/kernel.cpp')
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 255 |
1 files changed, 159 insertions, 96 deletions
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 7655382fa..1f2af7a1b 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <array> | ||
| 5 | #include <atomic> | 6 | #include <atomic> |
| 6 | #include <bitset> | 7 | #include <bitset> |
| 7 | #include <functional> | 8 | #include <functional> |
| @@ -13,11 +14,15 @@ | |||
| 13 | 14 | ||
| 14 | #include "common/assert.h" | 15 | #include "common/assert.h" |
| 15 | #include "common/logging/log.h" | 16 | #include "common/logging/log.h" |
| 17 | #include "common/microprofile.h" | ||
| 18 | #include "common/thread.h" | ||
| 16 | #include "core/arm/arm_interface.h" | 19 | #include "core/arm/arm_interface.h" |
| 20 | #include "core/arm/cpu_interrupt_handler.h" | ||
| 17 | #include "core/arm/exclusive_monitor.h" | 21 | #include "core/arm/exclusive_monitor.h" |
| 18 | #include "core/core.h" | 22 | #include "core/core.h" |
| 19 | #include "core/core_timing.h" | 23 | #include "core/core_timing.h" |
| 20 | #include "core/core_timing_util.h" | 24 | #include "core/core_timing_util.h" |
| 25 | #include "core/cpu_manager.h" | ||
| 21 | #include "core/device_memory.h" | 26 | #include "core/device_memory.h" |
| 22 | #include "core/hardware_properties.h" | 27 | #include "core/hardware_properties.h" |
| 23 | #include "core/hle/kernel/client_port.h" | 28 | #include "core/hle/kernel/client_port.h" |
| @@ -39,85 +44,28 @@ | |||
| 39 | #include "core/hle/result.h" | 44 | #include "core/hle/result.h" |
| 40 | #include "core/memory.h" | 45 | #include "core/memory.h" |
| 41 | 46 | ||
| 42 | namespace Kernel { | 47 | MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70)); |
| 43 | |||
| 44 | /** | ||
| 45 | * Callback that will wake up the thread it was scheduled for | ||
| 46 | * @param thread_handle The handle of the thread that's been awoken | ||
| 47 | * @param cycles_late The number of CPU cycles that have passed since the desired wakeup time | ||
| 48 | */ | ||
| 49 | static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_late) { | ||
| 50 | const auto proper_handle = static_cast<Handle>(thread_handle); | ||
| 51 | const auto& system = Core::System::GetInstance(); | ||
| 52 | |||
| 53 | // Lock the global kernel mutex when we enter the kernel HLE. | ||
| 54 | std::lock_guard lock{HLE::g_hle_lock}; | ||
| 55 | |||
| 56 | std::shared_ptr<Thread> thread = | ||
| 57 | system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle); | ||
| 58 | if (thread == nullptr) { | ||
| 59 | LOG_CRITICAL(Kernel, "Callback fired for invalid thread {:08X}", proper_handle); | ||
| 60 | return; | ||
| 61 | } | ||
| 62 | |||
| 63 | bool resume = true; | ||
| 64 | |||
| 65 | if (thread->GetStatus() == ThreadStatus::WaitSynch || | ||
| 66 | thread->GetStatus() == ThreadStatus::WaitHLEEvent) { | ||
| 67 | // Remove the thread from each of its waiting objects' waitlists | ||
| 68 | for (const auto& object : thread->GetSynchronizationObjects()) { | ||
| 69 | object->RemoveWaitingThread(thread); | ||
| 70 | } | ||
| 71 | thread->ClearSynchronizationObjects(); | ||
| 72 | |||
| 73 | // Invoke the wakeup callback before clearing the wait objects | ||
| 74 | if (thread->HasWakeupCallback()) { | ||
| 75 | resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Timeout, thread, nullptr, 0); | ||
| 76 | } | ||
| 77 | } else if (thread->GetStatus() == ThreadStatus::WaitMutex || | ||
| 78 | thread->GetStatus() == ThreadStatus::WaitCondVar) { | ||
| 79 | thread->SetMutexWaitAddress(0); | ||
| 80 | thread->SetWaitHandle(0); | ||
| 81 | if (thread->GetStatus() == ThreadStatus::WaitCondVar) { | ||
| 82 | thread->GetOwnerProcess()->RemoveConditionVariableThread(thread); | ||
| 83 | thread->SetCondVarWaitAddress(0); | ||
| 84 | } | ||
| 85 | |||
| 86 | auto* const lock_owner = thread->GetLockOwner(); | ||
| 87 | // Threads waking up by timeout from WaitProcessWideKey do not perform priority inheritance | ||
| 88 | // and don't have a lock owner unless SignalProcessWideKey was called first and the thread | ||
| 89 | // wasn't awakened due to the mutex already being acquired. | ||
| 90 | if (lock_owner != nullptr) { | ||
| 91 | lock_owner->RemoveMutexWaiter(thread); | ||
| 92 | } | ||
| 93 | } | ||
| 94 | 48 | ||
| 95 | if (thread->GetStatus() == ThreadStatus::WaitArb) { | 49 | namespace Kernel { |
| 96 | auto& address_arbiter = thread->GetOwnerProcess()->GetAddressArbiter(); | ||
| 97 | address_arbiter.HandleWakeupThread(thread); | ||
| 98 | } | ||
| 99 | |||
| 100 | if (resume) { | ||
| 101 | if (thread->GetStatus() == ThreadStatus::WaitCondVar || | ||
| 102 | thread->GetStatus() == ThreadStatus::WaitArb) { | ||
| 103 | thread->SetWaitSynchronizationResult(RESULT_TIMEOUT); | ||
| 104 | } | ||
| 105 | thread->ResumeFromWait(); | ||
| 106 | } | ||
| 107 | } | ||
| 108 | 50 | ||
| 109 | struct KernelCore::Impl { | 51 | struct KernelCore::Impl { |
| 110 | explicit Impl(Core::System& system, KernelCore& kernel) | 52 | explicit Impl(Core::System& system, KernelCore& kernel) |
| 111 | : global_scheduler{kernel}, synchronization{system}, time_manager{system}, system{system} {} | 53 | : global_scheduler{kernel}, synchronization{system}, time_manager{system}, system{system} {} |
| 112 | 54 | ||
| 55 | void SetMulticore(bool is_multicore) { | ||
| 56 | this->is_multicore = is_multicore; | ||
| 57 | } | ||
| 58 | |||
| 113 | void Initialize(KernelCore& kernel) { | 59 | void Initialize(KernelCore& kernel) { |
| 114 | Shutdown(); | 60 | Shutdown(); |
| 61 | RegisterHostThread(); | ||
| 115 | 62 | ||
| 116 | InitializePhysicalCores(); | 63 | InitializePhysicalCores(); |
| 117 | InitializeSystemResourceLimit(kernel); | 64 | InitializeSystemResourceLimit(kernel); |
| 118 | InitializeMemoryLayout(); | 65 | InitializeMemoryLayout(); |
| 119 | InitializeThreads(); | 66 | InitializePreemption(kernel); |
| 120 | InitializePreemption(); | 67 | InitializeSchedulers(); |
| 68 | InitializeSuspendThreads(); | ||
| 121 | } | 69 | } |
| 122 | 70 | ||
| 123 | void Shutdown() { | 71 | void Shutdown() { |
| @@ -126,13 +74,26 @@ struct KernelCore::Impl { | |||
| 126 | next_user_process_id = Process::ProcessIDMin; | 74 | next_user_process_id = Process::ProcessIDMin; |
| 127 | next_thread_id = 1; | 75 | next_thread_id = 1; |
| 128 | 76 | ||
| 77 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | ||
| 78 | if (suspend_threads[i]) { | ||
| 79 | suspend_threads[i].reset(); | ||
| 80 | } | ||
| 81 | } | ||
| 82 | |||
| 83 | for (std::size_t i = 0; i < cores.size(); i++) { | ||
| 84 | cores[i].Shutdown(); | ||
| 85 | schedulers[i].reset(); | ||
| 86 | } | ||
| 87 | cores.clear(); | ||
| 88 | |||
| 89 | registered_core_threads.reset(); | ||
| 90 | |||
| 129 | process_list.clear(); | 91 | process_list.clear(); |
| 130 | current_process = nullptr; | 92 | current_process = nullptr; |
| 131 | 93 | ||
| 132 | system_resource_limit = nullptr; | 94 | system_resource_limit = nullptr; |
| 133 | 95 | ||
| 134 | global_handle_table.Clear(); | 96 | global_handle_table.Clear(); |
| 135 | thread_wakeup_event_type = nullptr; | ||
| 136 | preemption_event = nullptr; | 97 | preemption_event = nullptr; |
| 137 | 98 | ||
| 138 | global_scheduler.Shutdown(); | 99 | global_scheduler.Shutdown(); |
| @@ -145,13 +106,21 @@ struct KernelCore::Impl { | |||
| 145 | cores.clear(); | 106 | cores.clear(); |
| 146 | 107 | ||
| 147 | exclusive_monitor.reset(); | 108 | exclusive_monitor.reset(); |
| 109 | host_thread_ids.clear(); | ||
| 148 | } | 110 | } |
| 149 | 111 | ||
| 150 | void InitializePhysicalCores() { | 112 | void InitializePhysicalCores() { |
| 151 | exclusive_monitor = | 113 | exclusive_monitor = |
| 152 | Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); | 114 | Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); |
| 153 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | 115 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { |
| 154 | cores.emplace_back(system, i, *exclusive_monitor); | 116 | schedulers[i] = std::make_unique<Kernel::Scheduler>(system, i); |
| 117 | cores.emplace_back(system, i, *schedulers[i], interrupts[i]); | ||
| 118 | } | ||
| 119 | } | ||
| 120 | |||
| 121 | void InitializeSchedulers() { | ||
| 122 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | ||
| 123 | cores[i].Scheduler().Initialize(); | ||
| 155 | } | 124 | } |
| 156 | } | 125 | } |
| 157 | 126 | ||
| @@ -173,15 +142,13 @@ struct KernelCore::Impl { | |||
| 173 | } | 142 | } |
| 174 | } | 143 | } |
| 175 | 144 | ||
| 176 | void InitializeThreads() { | 145 | void InitializePreemption(KernelCore& kernel) { |
| 177 | thread_wakeup_event_type = | 146 | preemption_event = Core::Timing::CreateEvent( |
| 178 | Core::Timing::CreateEvent("ThreadWakeupCallback", ThreadWakeupCallback); | 147 | "PreemptionCallback", [this, &kernel](u64 userdata, s64 cycles_late) { |
| 179 | } | 148 | { |
| 180 | 149 | SchedulerLock lock(kernel); | |
| 181 | void InitializePreemption() { | 150 | global_scheduler.PreemptThreads(); |
| 182 | preemption_event = | 151 | } |
| 183 | Core::Timing::CreateEvent("PreemptionCallback", [this](u64 userdata, s64 cycles_late) { | ||
| 184 | global_scheduler.PreemptThreads(); | ||
| 185 | s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10)); | 152 | s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10)); |
| 186 | system.CoreTiming().ScheduleEvent(time_interval, preemption_event); | 153 | system.CoreTiming().ScheduleEvent(time_interval, preemption_event); |
| 187 | }); | 154 | }); |
| @@ -190,6 +157,20 @@ struct KernelCore::Impl { | |||
| 190 | system.CoreTiming().ScheduleEvent(time_interval, preemption_event); | 157 | system.CoreTiming().ScheduleEvent(time_interval, preemption_event); |
| 191 | } | 158 | } |
| 192 | 159 | ||
| 160 | void InitializeSuspendThreads() { | ||
| 161 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | ||
| 162 | std::string name = "Suspend Thread Id:" + std::to_string(i); | ||
| 163 | std::function<void(void*)> init_func = | ||
| 164 | system.GetCpuManager().GetSuspendThreadStartFunc(); | ||
| 165 | void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); | ||
| 166 | ThreadType type = | ||
| 167 | static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND); | ||
| 168 | auto thread_res = Thread::Create(system, type, name, 0, 0, 0, static_cast<u32>(i), 0, | ||
| 169 | nullptr, std::move(init_func), init_func_parameter); | ||
| 170 | suspend_threads[i] = std::move(thread_res).Unwrap(); | ||
| 171 | } | ||
| 172 | } | ||
| 173 | |||
| 193 | void MakeCurrentProcess(Process* process) { | 174 | void MakeCurrentProcess(Process* process) { |
| 194 | current_process = process; | 175 | current_process = process; |
| 195 | 176 | ||
| @@ -197,15 +178,17 @@ struct KernelCore::Impl { | |||
| 197 | return; | 178 | return; |
| 198 | } | 179 | } |
| 199 | 180 | ||
| 200 | for (auto& core : cores) { | 181 | u32 core_id = GetCurrentHostThreadID(); |
| 201 | core.SetIs64Bit(process->Is64BitProcess()); | 182 | if (core_id < Core::Hardware::NUM_CPU_CORES) { |
| 183 | system.Memory().SetCurrentPageTable(*process, core_id); | ||
| 202 | } | 184 | } |
| 203 | |||
| 204 | system.Memory().SetCurrentPageTable(*process); | ||
| 205 | } | 185 | } |
| 206 | 186 | ||
| 207 | void RegisterCoreThread(std::size_t core_id) { | 187 | void RegisterCoreThread(std::size_t core_id) { |
| 208 | std::unique_lock lock{register_thread_mutex}; | 188 | std::unique_lock lock{register_thread_mutex}; |
| 189 | if (!is_multicore) { | ||
| 190 | single_core_thread_id = std::this_thread::get_id(); | ||
| 191 | } | ||
| 209 | const std::thread::id this_id = std::this_thread::get_id(); | 192 | const std::thread::id this_id = std::this_thread::get_id(); |
| 210 | const auto it = host_thread_ids.find(this_id); | 193 | const auto it = host_thread_ids.find(this_id); |
| 211 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | 194 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); |
| @@ -219,12 +202,19 @@ struct KernelCore::Impl { | |||
| 219 | std::unique_lock lock{register_thread_mutex}; | 202 | std::unique_lock lock{register_thread_mutex}; |
| 220 | const std::thread::id this_id = std::this_thread::get_id(); | 203 | const std::thread::id this_id = std::this_thread::get_id(); |
| 221 | const auto it = host_thread_ids.find(this_id); | 204 | const auto it = host_thread_ids.find(this_id); |
| 222 | ASSERT(it == host_thread_ids.end()); | 205 | if (it != host_thread_ids.end()) { |
| 206 | return; | ||
| 207 | } | ||
| 223 | host_thread_ids[this_id] = registered_thread_ids++; | 208 | host_thread_ids[this_id] = registered_thread_ids++; |
| 224 | } | 209 | } |
| 225 | 210 | ||
| 226 | u32 GetCurrentHostThreadID() const { | 211 | u32 GetCurrentHostThreadID() const { |
| 227 | const std::thread::id this_id = std::this_thread::get_id(); | 212 | const std::thread::id this_id = std::this_thread::get_id(); |
| 213 | if (!is_multicore) { | ||
| 214 | if (single_core_thread_id == this_id) { | ||
| 215 | return static_cast<u32>(system.GetCpuManager().CurrentCore()); | ||
| 216 | } | ||
| 217 | } | ||
| 228 | const auto it = host_thread_ids.find(this_id); | 218 | const auto it = host_thread_ids.find(this_id); |
| 229 | if (it == host_thread_ids.end()) { | 219 | if (it == host_thread_ids.end()) { |
| 230 | return Core::INVALID_HOST_THREAD_ID; | 220 | return Core::INVALID_HOST_THREAD_ID; |
| @@ -240,7 +230,7 @@ struct KernelCore::Impl { | |||
| 240 | } | 230 | } |
| 241 | const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler(); | 231 | const Kernel::Scheduler& sched = cores[result.host_handle].Scheduler(); |
| 242 | const Kernel::Thread* current = sched.GetCurrentThread(); | 232 | const Kernel::Thread* current = sched.GetCurrentThread(); |
| 243 | if (current != nullptr) { | 233 | if (current != nullptr && !current->IsPhantomMode()) { |
| 244 | result.guest_handle = current->GetGlobalHandle(); | 234 | result.guest_handle = current->GetGlobalHandle(); |
| 245 | } else { | 235 | } else { |
| 246 | result.guest_handle = InvalidHandle; | 236 | result.guest_handle = InvalidHandle; |
| @@ -313,7 +303,6 @@ struct KernelCore::Impl { | |||
| 313 | 303 | ||
| 314 | std::shared_ptr<ResourceLimit> system_resource_limit; | 304 | std::shared_ptr<ResourceLimit> system_resource_limit; |
| 315 | 305 | ||
| 316 | std::shared_ptr<Core::Timing::EventType> thread_wakeup_event_type; | ||
| 317 | std::shared_ptr<Core::Timing::EventType> preemption_event; | 306 | std::shared_ptr<Core::Timing::EventType> preemption_event; |
| 318 | 307 | ||
| 319 | // This is the kernel's handle table or supervisor handle table which | 308 | // This is the kernel's handle table or supervisor handle table which |
| @@ -343,6 +332,15 @@ struct KernelCore::Impl { | |||
| 343 | std::shared_ptr<Kernel::SharedMemory> irs_shared_mem; | 332 | std::shared_ptr<Kernel::SharedMemory> irs_shared_mem; |
| 344 | std::shared_ptr<Kernel::SharedMemory> time_shared_mem; | 333 | std::shared_ptr<Kernel::SharedMemory> time_shared_mem; |
| 345 | 334 | ||
| 335 | std::array<std::shared_ptr<Thread>, Core::Hardware::NUM_CPU_CORES> suspend_threads{}; | ||
| 336 | std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{}; | ||
| 337 | std::array<std::unique_ptr<Kernel::Scheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; | ||
| 338 | |||
| 339 | bool is_multicore{}; | ||
| 340 | std::thread::id single_core_thread_id{}; | ||
| 341 | |||
| 342 | std::array<u64, Core::Hardware::NUM_CPU_CORES> svc_ticks{}; | ||
| 343 | |||
| 346 | // System context | 344 | // System context |
| 347 | Core::System& system; | 345 | Core::System& system; |
| 348 | }; | 346 | }; |
| @@ -352,6 +350,10 @@ KernelCore::~KernelCore() { | |||
| 352 | Shutdown(); | 350 | Shutdown(); |
| 353 | } | 351 | } |
| 354 | 352 | ||
| 353 | void KernelCore::SetMulticore(bool is_multicore) { | ||
| 354 | impl->SetMulticore(is_multicore); | ||
| 355 | } | ||
| 356 | |||
| 355 | void KernelCore::Initialize() { | 357 | void KernelCore::Initialize() { |
| 356 | impl->Initialize(*this); | 358 | impl->Initialize(*this); |
| 357 | } | 359 | } |
| @@ -397,11 +399,11 @@ const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const { | |||
| 397 | } | 399 | } |
| 398 | 400 | ||
| 399 | Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) { | 401 | Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) { |
| 400 | return impl->cores[id].Scheduler(); | 402 | return *impl->schedulers[id]; |
| 401 | } | 403 | } |
| 402 | 404 | ||
| 403 | const Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) const { | 405 | const Kernel::Scheduler& KernelCore::Scheduler(std::size_t id) const { |
| 404 | return impl->cores[id].Scheduler(); | 406 | return *impl->schedulers[id]; |
| 405 | } | 407 | } |
| 406 | 408 | ||
| 407 | Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) { | 409 | Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) { |
| @@ -412,6 +414,39 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const { | |||
| 412 | return impl->cores[id]; | 414 | return impl->cores[id]; |
| 413 | } | 415 | } |
| 414 | 416 | ||
| 417 | Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { | ||
| 418 | u32 core_id = impl->GetCurrentHostThreadID(); | ||
| 419 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 420 | return impl->cores[core_id]; | ||
| 421 | } | ||
| 422 | |||
| 423 | const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { | ||
| 424 | u32 core_id = impl->GetCurrentHostThreadID(); | ||
| 425 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 426 | return impl->cores[core_id]; | ||
| 427 | } | ||
| 428 | |||
| 429 | Kernel::Scheduler& KernelCore::CurrentScheduler() { | ||
| 430 | u32 core_id = impl->GetCurrentHostThreadID(); | ||
| 431 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 432 | return *impl->schedulers[core_id]; | ||
| 433 | } | ||
| 434 | |||
| 435 | const Kernel::Scheduler& KernelCore::CurrentScheduler() const { | ||
| 436 | u32 core_id = impl->GetCurrentHostThreadID(); | ||
| 437 | ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); | ||
| 438 | return *impl->schedulers[core_id]; | ||
| 439 | } | ||
| 440 | |||
| 441 | std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() { | ||
| 442 | return impl->interrupts; | ||
| 443 | } | ||
| 444 | |||
| 445 | const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() | ||
| 446 | const { | ||
| 447 | return impl->interrupts; | ||
| 448 | } | ||
| 449 | |||
| 415 | Kernel::Synchronization& KernelCore::Synchronization() { | 450 | Kernel::Synchronization& KernelCore::Synchronization() { |
| 416 | return impl->synchronization; | 451 | return impl->synchronization; |
| 417 | } | 452 | } |
| @@ -437,15 +472,17 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const { | |||
| 437 | } | 472 | } |
| 438 | 473 | ||
| 439 | void KernelCore::InvalidateAllInstructionCaches() { | 474 | void KernelCore::InvalidateAllInstructionCaches() { |
| 440 | for (std::size_t i = 0; i < impl->global_scheduler.CpuCoresCount(); i++) { | 475 | auto& threads = GlobalScheduler().GetThreadList(); |
| 441 | PhysicalCore(i).ArmInterface().ClearInstructionCache(); | 476 | for (auto& thread : threads) { |
| 477 | if (!thread->IsHLEThread()) { | ||
| 478 | auto& arm_interface = thread->ArmInterface(); | ||
| 479 | arm_interface.ClearInstructionCache(); | ||
| 480 | } | ||
| 442 | } | 481 | } |
| 443 | } | 482 | } |
| 444 | 483 | ||
| 445 | void KernelCore::PrepareReschedule(std::size_t id) { | 484 | void KernelCore::PrepareReschedule(std::size_t id) { |
| 446 | if (id < impl->global_scheduler.CpuCoresCount()) { | 485 | // TODO: Reimplement, this |
| 447 | impl->cores[id].Stop(); | ||
| 448 | } | ||
| 449 | } | 486 | } |
| 450 | 487 | ||
| 451 | void KernelCore::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) { | 488 | void KernelCore::AddNamedPort(std::string name, std::shared_ptr<ClientPort> port) { |
| @@ -481,10 +518,6 @@ u64 KernelCore::CreateNewUserProcessID() { | |||
| 481 | return impl->next_user_process_id++; | 518 | return impl->next_user_process_id++; |
| 482 | } | 519 | } |
| 483 | 520 | ||
| 484 | const std::shared_ptr<Core::Timing::EventType>& KernelCore::ThreadWakeupCallbackEventType() const { | ||
| 485 | return impl->thread_wakeup_event_type; | ||
| 486 | } | ||
| 487 | |||
| 488 | Kernel::HandleTable& KernelCore::GlobalHandleTable() { | 521 | Kernel::HandleTable& KernelCore::GlobalHandleTable() { |
| 489 | return impl->global_handle_table; | 522 | return impl->global_handle_table; |
| 490 | } | 523 | } |
| @@ -557,4 +590,34 @@ const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const { | |||
| 557 | return *impl->time_shared_mem; | 590 | return *impl->time_shared_mem; |
| 558 | } | 591 | } |
| 559 | 592 | ||
| 593 | void KernelCore::Suspend(bool in_suspention) { | ||
| 594 | const bool should_suspend = exception_exited || in_suspention; | ||
| 595 | { | ||
| 596 | SchedulerLock lock(*this); | ||
| 597 | ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep; | ||
| 598 | for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { | ||
| 599 | impl->suspend_threads[i]->SetStatus(status); | ||
| 600 | } | ||
| 601 | } | ||
| 602 | } | ||
| 603 | |||
| 604 | bool KernelCore::IsMulticore() const { | ||
| 605 | return impl->is_multicore; | ||
| 606 | } | ||
| 607 | |||
| 608 | void KernelCore::ExceptionalExit() { | ||
| 609 | exception_exited = true; | ||
| 610 | Suspend(true); | ||
| 611 | } | ||
| 612 | |||
| 613 | void KernelCore::EnterSVCProfile() { | ||
| 614 | std::size_t core = impl->GetCurrentHostThreadID(); | ||
| 615 | impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); | ||
| 616 | } | ||
| 617 | |||
| 618 | void KernelCore::ExitSVCProfile() { | ||
| 619 | std::size_t core = impl->GetCurrentHostThreadID(); | ||
| 620 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]); | ||
| 621 | } | ||
| 622 | |||
| 560 | } // namespace Kernel | 623 | } // namespace Kernel |