diff options
| author | 2022-07-07 20:06:46 -0400 | |
|---|---|---|
| committer | 2022-07-25 12:14:15 -0400 | |
| commit | 6523854dd6ac2d202dacb2110bc83b8e61621e9a (patch) | |
| tree | d3d5c5d3048ef0d316692e5f74a78575666114de /src/core/hle/kernel/kernel.cpp | |
| parent | Merge pull request #8549 from liamwhite/kscheduler-sc (diff) | |
| download | yuzu-6523854dd6ac2d202dacb2110bc83b8e61621e9a.tar.gz yuzu-6523854dd6ac2d202dacb2110bc83b8e61621e9a.tar.xz yuzu-6523854dd6ac2d202dacb2110bc83b8e61621e9a.zip | |
kernel: unlayer CPU interrupt handling
Diffstat (limited to 'src/core/hle/kernel/kernel.cpp')
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 41 |
1 files changed, 13 insertions, 28 deletions
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index f4072e1c3..ce7fa8275 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -17,7 +17,6 @@ | |||
| 17 | #include "common/thread.h" | 17 | #include "common/thread.h" |
| 18 | #include "common/thread_worker.h" | 18 | #include "common/thread_worker.h" |
| 19 | #include "core/arm/arm_interface.h" | 19 | #include "core/arm/arm_interface.h" |
| 20 | #include "core/arm/cpu_interrupt_handler.h" | ||
| 21 | #include "core/arm/exclusive_monitor.h" | 20 | #include "core/arm/exclusive_monitor.h" |
| 22 | #include "core/core.h" | 21 | #include "core/core.h" |
| 23 | #include "core/core_timing.h" | 22 | #include "core/core_timing.h" |
| @@ -82,7 +81,7 @@ struct KernelCore::Impl { | |||
| 82 | 81 | ||
| 83 | void InitializeCores() { | 82 | void InitializeCores() { |
| 84 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | 83 | for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { |
| 85 | cores[core_id].Initialize((*current_process).Is64BitProcess()); | 84 | cores[core_id]->Initialize((*current_process).Is64BitProcess()); |
| 86 | system.Memory().SetCurrentPageTable(*current_process, core_id); | 85 | system.Memory().SetCurrentPageTable(*current_process, core_id); |
| 87 | } | 86 | } |
| 88 | } | 87 | } |
| @@ -100,7 +99,9 @@ struct KernelCore::Impl { | |||
| 100 | next_user_process_id = KProcess::ProcessIDMin; | 99 | next_user_process_id = KProcess::ProcessIDMin; |
| 101 | next_thread_id = 1; | 100 | next_thread_id = 1; |
| 102 | 101 | ||
| 103 | cores.clear(); | 102 | for (auto& core : cores) { |
| 103 | core = nullptr; | ||
| 104 | } | ||
| 104 | 105 | ||
| 105 | global_handle_table->Finalize(); | 106 | global_handle_table->Finalize(); |
| 106 | global_handle_table.reset(); | 107 | global_handle_table.reset(); |
| @@ -199,7 +200,7 @@ struct KernelCore::Impl { | |||
| 199 | const s32 core{static_cast<s32>(i)}; | 200 | const s32 core{static_cast<s32>(i)}; |
| 200 | 201 | ||
| 201 | schedulers[i] = std::make_unique<Kernel::KScheduler>(system.Kernel()); | 202 | schedulers[i] = std::make_unique<Kernel::KScheduler>(system.Kernel()); |
| 202 | cores.emplace_back(i, system, *schedulers[i], interrupts); | 203 | cores[i] = std::make_unique<Kernel::PhysicalCore>(i, system, *schedulers[i]); |
| 203 | 204 | ||
| 204 | auto* main_thread{Kernel::KThread::Create(system.Kernel())}; | 205 | auto* main_thread{Kernel::KThread::Create(system.Kernel())}; |
| 205 | main_thread->SetName(fmt::format("MainThread:{}", core)); | 206 | main_thread->SetName(fmt::format("MainThread:{}", core)); |
| @@ -761,7 +762,7 @@ struct KernelCore::Impl { | |||
| 761 | std::unordered_set<KAutoObject*> registered_in_use_objects; | 762 | std::unordered_set<KAutoObject*> registered_in_use_objects; |
| 762 | 763 | ||
| 763 | std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor; | 764 | std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor; |
| 764 | std::vector<Kernel::PhysicalCore> cores; | 765 | std::array<std::unique_ptr<Kernel::PhysicalCore>, Core::Hardware::NUM_CPU_CORES> cores; |
| 765 | 766 | ||
| 766 | // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others | 767 | // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others |
| 767 | std::atomic<u32> next_host_thread_id{Core::Hardware::NUM_CPU_CORES}; | 768 | std::atomic<u32> next_host_thread_id{Core::Hardware::NUM_CPU_CORES}; |
| @@ -785,7 +786,6 @@ struct KernelCore::Impl { | |||
| 785 | Common::ThreadWorker service_threads_manager; | 786 | Common::ThreadWorker service_threads_manager; |
| 786 | 787 | ||
| 787 | std::array<KThread*, Core::Hardware::NUM_CPU_CORES> shutdown_threads; | 788 | std::array<KThread*, Core::Hardware::NUM_CPU_CORES> shutdown_threads; |
| 788 | std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{}; | ||
| 789 | std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; | 789 | std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{}; |
| 790 | 790 | ||
| 791 | bool is_multicore{}; | 791 | bool is_multicore{}; |
| @@ -874,11 +874,11 @@ const Kernel::KScheduler& KernelCore::Scheduler(std::size_t id) const { | |||
| 874 | } | 874 | } |
| 875 | 875 | ||
| 876 | Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) { | 876 | Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) { |
| 877 | return impl->cores[id]; | 877 | return *impl->cores[id]; |
| 878 | } | 878 | } |
| 879 | 879 | ||
| 880 | const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const { | 880 | const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const { |
| 881 | return impl->cores[id]; | 881 | return *impl->cores[id]; |
| 882 | } | 882 | } |
| 883 | 883 | ||
| 884 | size_t KernelCore::CurrentPhysicalCoreIndex() const { | 884 | size_t KernelCore::CurrentPhysicalCoreIndex() const { |
| @@ -890,11 +890,11 @@ size_t KernelCore::CurrentPhysicalCoreIndex() const { | |||
| 890 | } | 890 | } |
| 891 | 891 | ||
| 892 | Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { | 892 | Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { |
| 893 | return impl->cores[CurrentPhysicalCoreIndex()]; | 893 | return *impl->cores[CurrentPhysicalCoreIndex()]; |
| 894 | } | 894 | } |
| 895 | 895 | ||
| 896 | const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { | 896 | const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { |
| 897 | return impl->cores[CurrentPhysicalCoreIndex()]; | 897 | return *impl->cores[CurrentPhysicalCoreIndex()]; |
| 898 | } | 898 | } |
| 899 | 899 | ||
| 900 | Kernel::KScheduler* KernelCore::CurrentScheduler() { | 900 | Kernel::KScheduler* KernelCore::CurrentScheduler() { |
| @@ -906,15 +906,6 @@ Kernel::KScheduler* KernelCore::CurrentScheduler() { | |||
| 906 | return impl->schedulers[core_id].get(); | 906 | return impl->schedulers[core_id].get(); |
| 907 | } | 907 | } |
| 908 | 908 | ||
| 909 | std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() { | ||
| 910 | return impl->interrupts; | ||
| 911 | } | ||
| 912 | |||
| 913 | const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& KernelCore::Interrupts() | ||
| 914 | const { | ||
| 915 | return impl->interrupts; | ||
| 916 | } | ||
| 917 | |||
| 918 | Kernel::TimeManager& KernelCore::TimeManager() { | 909 | Kernel::TimeManager& KernelCore::TimeManager() { |
| 919 | return impl->time_manager; | 910 | return impl->time_manager; |
| 920 | } | 911 | } |
| @@ -939,24 +930,18 @@ const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const { | |||
| 939 | return *impl->global_object_list_container; | 930 | return *impl->global_object_list_container; |
| 940 | } | 931 | } |
| 941 | 932 | ||
| 942 | void KernelCore::InterruptAllPhysicalCores() { | ||
| 943 | for (auto& physical_core : impl->cores) { | ||
| 944 | physical_core.Interrupt(); | ||
| 945 | } | ||
| 946 | } | ||
| 947 | |||
| 948 | void KernelCore::InvalidateAllInstructionCaches() { | 933 | void KernelCore::InvalidateAllInstructionCaches() { |
| 949 | for (auto& physical_core : impl->cores) { | 934 | for (auto& physical_core : impl->cores) { |
| 950 | physical_core.ArmInterface().ClearInstructionCache(); | 935 | physical_core->ArmInterface().ClearInstructionCache(); |
| 951 | } | 936 | } |
| 952 | } | 937 | } |
| 953 | 938 | ||
| 954 | void KernelCore::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) { | 939 | void KernelCore::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) { |
| 955 | for (auto& physical_core : impl->cores) { | 940 | for (auto& physical_core : impl->cores) { |
| 956 | if (!physical_core.IsInitialized()) { | 941 | if (!physical_core->IsInitialized()) { |
| 957 | continue; | 942 | continue; |
| 958 | } | 943 | } |
| 959 | physical_core.ArmInterface().InvalidateCacheRange(addr, size); | 944 | physical_core->ArmInterface().InvalidateCacheRange(addr, size); |
| 960 | } | 945 | } |
| 961 | } | 946 | } |
| 962 | 947 | ||