summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar bunnei2021-08-25 20:59:28 -0700
committerGravatar GitHub2021-08-25 20:59:28 -0700
commit0c8594b2251cf600afc3a89ecf1114eb3a25f700 (patch)
treed6f7d065cd8e2f00c85b9749076ef303a705c864 /src
parentMerge pull request #6919 from ameerj/vk-int8-capability (diff)
downloadyuzu-0c8594b2251cf600afc3a89ecf1114eb3a25f700.tar.gz
yuzu-0c8594b2251cf600afc3a89ecf1114eb3a25f700.tar.xz
yuzu-0c8594b2251cf600afc3a89ecf1114eb3a25f700.zip
Revert "kernel: Various improvements to scheduler"
Diffstat (limited to 'src')
-rw-r--r--src/core/core.cpp6
-rw-r--r--src/core/core.h3
-rw-r--r--src/core/cpu_manager.cpp40
-rw-r--r--src/core/cpu_manager.h6
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp4
-rw-r--r--src/core/hle/kernel/k_auto_object.h4
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp2
-rw-r--r--src/core/hle/kernel/k_handle_table.cpp6
-rw-r--r--src/core/hle/kernel/k_handle_table.h2
-rw-r--r--src/core/hle/kernel/k_process.cpp1
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp85
-rw-r--r--src/core/hle/kernel/k_scheduler.h2
-rw-r--r--src/core/hle/kernel/k_thread.cpp21
-rw-r--r--src/core/hle/kernel/k_thread.h36
-rw-r--r--src/core/hle/kernel/kernel.cpp57
-rw-r--r--src/core/hle/kernel/kernel.h3
-rw-r--r--src/core/hle/kernel/svc.cpp2
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue.cpp25
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue.h11
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.cpp15
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.h3
-rw-r--r--src/core/hle/service/vi/display/vi_display.cpp17
-rw-r--r--src/core/hle/service/vi/display/vi_display.h13
23 files changed, 140 insertions, 224 deletions
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 5893a86bf..ba4629993 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -507,6 +507,12 @@ const ARM_Interface& System::CurrentArmInterface() const {
507 return impl->kernel.CurrentPhysicalCore().ArmInterface(); 507 return impl->kernel.CurrentPhysicalCore().ArmInterface();
508} 508}
509 509
510std::size_t System::CurrentCoreIndex() const {
511 std::size_t core = impl->kernel.GetCurrentHostThreadID();
512 ASSERT(core < Core::Hardware::NUM_CPU_CORES);
513 return core;
514}
515
510Kernel::PhysicalCore& System::CurrentPhysicalCore() { 516Kernel::PhysicalCore& System::CurrentPhysicalCore() {
511 return impl->kernel.CurrentPhysicalCore(); 517 return impl->kernel.CurrentPhysicalCore();
512} 518}
diff --git a/src/core/core.h b/src/core/core.h
index f9116ebb6..715ab88e7 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -205,6 +205,9 @@ public:
205 /// Gets an ARM interface to the CPU core that is currently running 205 /// Gets an ARM interface to the CPU core that is currently running
206 [[nodiscard]] const ARM_Interface& CurrentArmInterface() const; 206 [[nodiscard]] const ARM_Interface& CurrentArmInterface() const;
207 207
208 /// Gets the index of the currently running CPU core
209 [[nodiscard]] std::size_t CurrentCoreIndex() const;
210
208 /// Gets the physical core for the CPU core that is currently running 211 /// Gets the physical core for the CPU core that is currently running
209 [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore(); 212 [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore();
210 213
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp
index de2e5563e..7e195346b 100644
--- a/src/core/cpu_manager.cpp
+++ b/src/core/cpu_manager.cpp
@@ -21,25 +21,34 @@ namespace Core {
21CpuManager::CpuManager(System& system_) : system{system_} {} 21CpuManager::CpuManager(System& system_) : system{system_} {}
22CpuManager::~CpuManager() = default; 22CpuManager::~CpuManager() = default;
23 23
24void CpuManager::ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, 24void CpuManager::ThreadStart(CpuManager& cpu_manager, std::size_t core) {
25 std::size_t core) { 25 cpu_manager.RunThread(core);
26 cpu_manager.RunThread(stop_token, core);
27} 26}
28 27
29void CpuManager::Initialize() { 28void CpuManager::Initialize() {
30 running_mode = true; 29 running_mode = true;
31 if (is_multicore) { 30 if (is_multicore) {
32 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { 31 for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
33 core_data[core].host_thread = std::jthread(ThreadStart, std::ref(*this), core); 32 core_data[core].host_thread =
33 std::make_unique<std::thread>(ThreadStart, std::ref(*this), core);
34 } 34 }
35 } else { 35 } else {
36 core_data[0].host_thread = std::jthread(ThreadStart, std::ref(*this), 0); 36 core_data[0].host_thread = std::make_unique<std::thread>(ThreadStart, std::ref(*this), 0);
37 } 37 }
38} 38}
39 39
40void CpuManager::Shutdown() { 40void CpuManager::Shutdown() {
41 running_mode = false; 41 running_mode = false;
42 Pause(false); 42 Pause(false);
43 if (is_multicore) {
44 for (auto& data : core_data) {
45 data.host_thread->join();
46 data.host_thread.reset();
47 }
48 } else {
49 core_data[0].host_thread->join();
50 core_data[0].host_thread.reset();
51 }
43} 52}
44 53
45std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() { 54std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() {
@@ -118,18 +127,17 @@ void CpuManager::MultiCoreRunGuestLoop() {
118 physical_core = &kernel.CurrentPhysicalCore(); 127 physical_core = &kernel.CurrentPhysicalCore();
119 } 128 }
120 system.ExitDynarmicProfile(); 129 system.ExitDynarmicProfile();
121 { 130 physical_core->ArmInterface().ClearExclusiveState();
122 Kernel::KScopedDisableDispatch dd(kernel); 131 kernel.CurrentScheduler()->RescheduleCurrentCore();
123 physical_core->ArmInterface().ClearExclusiveState();
124 }
125 } 132 }
126} 133}
127 134
128void CpuManager::MultiCoreRunIdleThread() { 135void CpuManager::MultiCoreRunIdleThread() {
129 auto& kernel = system.Kernel(); 136 auto& kernel = system.Kernel();
130 while (true) { 137 while (true) {
131 Kernel::KScopedDisableDispatch dd(kernel); 138 auto& physical_core = kernel.CurrentPhysicalCore();
132 kernel.CurrentPhysicalCore().Idle(); 139 physical_core.Idle();
140 kernel.CurrentScheduler()->RescheduleCurrentCore();
133 } 141 }
134} 142}
135 143
@@ -137,12 +145,12 @@ void CpuManager::MultiCoreRunSuspendThread() {
137 auto& kernel = system.Kernel(); 145 auto& kernel = system.Kernel();
138 kernel.CurrentScheduler()->OnThreadStart(); 146 kernel.CurrentScheduler()->OnThreadStart();
139 while (true) { 147 while (true) {
140 auto core = kernel.CurrentPhysicalCoreIndex(); 148 auto core = kernel.GetCurrentHostThreadID();
141 auto& scheduler = *kernel.CurrentScheduler(); 149 auto& scheduler = *kernel.CurrentScheduler();
142 Kernel::KThread* current_thread = scheduler.GetCurrentThread(); 150 Kernel::KThread* current_thread = scheduler.GetCurrentThread();
143 Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context); 151 Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
144 ASSERT(scheduler.ContextSwitchPending()); 152 ASSERT(scheduler.ContextSwitchPending());
145 ASSERT(core == kernel.CurrentPhysicalCoreIndex()); 153 ASSERT(core == kernel.GetCurrentHostThreadID());
146 scheduler.RescheduleCurrentCore(); 154 scheduler.RescheduleCurrentCore();
147 } 155 }
148} 156}
@@ -309,7 +317,7 @@ void CpuManager::Pause(bool paused) {
309 } 317 }
310} 318}
311 319
312void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) { 320void CpuManager::RunThread(std::size_t core) {
313 /// Initialization 321 /// Initialization
314 system.RegisterCoreThread(core); 322 system.RegisterCoreThread(core);
315 std::string name; 323 std::string name;
@@ -348,8 +356,8 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
348 sc_sync_first_use = false; 356 sc_sync_first_use = false;
349 } 357 }
350 358
351 // Emulation was stopped 359 // Abort if emulation was killed before the session really starts
352 if (stop_token.stop_requested()) { 360 if (!system.IsPoweredOn()) {
353 return; 361 return;
354 } 362 }
355 363
diff --git a/src/core/cpu_manager.h b/src/core/cpu_manager.h
index 9d92d4af0..140263b09 100644
--- a/src/core/cpu_manager.h
+++ b/src/core/cpu_manager.h
@@ -78,9 +78,9 @@ private:
78 void SingleCoreRunSuspendThread(); 78 void SingleCoreRunSuspendThread();
79 void SingleCorePause(bool paused); 79 void SingleCorePause(bool paused);
80 80
81 static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core); 81 static void ThreadStart(CpuManager& cpu_manager, std::size_t core);
82 82
83 void RunThread(std::stop_token stop_token, std::size_t core); 83 void RunThread(std::size_t core);
84 84
85 struct CoreData { 85 struct CoreData {
86 std::shared_ptr<Common::Fiber> host_context; 86 std::shared_ptr<Common::Fiber> host_context;
@@ -89,7 +89,7 @@ private:
89 std::atomic<bool> is_running; 89 std::atomic<bool> is_running;
90 std::atomic<bool> is_paused; 90 std::atomic<bool> is_paused;
91 std::atomic<bool> initialized; 91 std::atomic<bool> initialized;
92 std::jthread host_thread; 92 std::unique_ptr<std::thread> host_thread;
93 }; 93 };
94 94
95 std::atomic<bool> running_mode{}; 95 std::atomic<bool> running_mode{};
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
index 6771ef621..1b429bc1e 100644
--- a/src/core/hle/kernel/k_address_arbiter.cpp
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -28,7 +28,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
28 28
29bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { 29bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
30 auto& monitor = system.Monitor(); 30 auto& monitor = system.Monitor();
31 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); 31 const auto current_core = system.CurrentCoreIndex();
32 32
33 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. 33 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
34 // TODO(bunnei): We should call CanAccessAtomic(..) here. 34 // TODO(bunnei): We should call CanAccessAtomic(..) here.
@@ -58,7 +58,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
58 58
59bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { 59bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
60 auto& monitor = system.Monitor(); 60 auto& monitor = system.Monitor();
61 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); 61 const auto current_core = system.CurrentCoreIndex();
62 62
63 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. 63 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
64 // TODO(bunnei): We should call CanAccessAtomic(..) here. 64 // TODO(bunnei): We should call CanAccessAtomic(..) here.
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h
index 165b76747..e4fcdbc67 100644
--- a/src/core/hle/kernel/k_auto_object.h
+++ b/src/core/hle/kernel/k_auto_object.h
@@ -170,10 +170,6 @@ public:
170 } 170 }
171 } 171 }
172 172
173 const std::string& GetName() const {
174 return name;
175 }
176
177private: 173private:
178 void RegisterWithKernel(); 174 void RegisterWithKernel();
179 void UnregisterWithKernel(); 175 void UnregisterWithKernel();
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index 4174f35fd..ef14ad1d2 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -35,7 +35,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
35bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, 35bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
36 u32 new_orr_mask) { 36 u32 new_orr_mask) {
37 auto& monitor = system.Monitor(); 37 auto& monitor = system.Monitor();
38 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); 38 const auto current_core = system.CurrentCoreIndex();
39 39
40 // Load the value from the address. 40 // Load the value from the address.
41 const auto expected = monitor.ExclusiveRead32(current_core, address); 41 const auto expected = monitor.ExclusiveRead32(current_core, address);
diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp
index d720c2dda..6a420d5b0 100644
--- a/src/core/hle/kernel/k_handle_table.cpp
+++ b/src/core/hle/kernel/k_handle_table.cpp
@@ -13,7 +13,6 @@ ResultCode KHandleTable::Finalize() {
13 // Get the table and clear our record of it. 13 // Get the table and clear our record of it.
14 u16 saved_table_size = 0; 14 u16 saved_table_size = 0;
15 { 15 {
16 KScopedDisableDispatch dd(kernel);
17 KScopedSpinLock lk(m_lock); 16 KScopedSpinLock lk(m_lock);
18 17
19 std::swap(m_table_size, saved_table_size); 18 std::swap(m_table_size, saved_table_size);
@@ -44,7 +43,6 @@ bool KHandleTable::Remove(Handle handle) {
44 // Find the object and free the entry. 43 // Find the object and free the entry.
45 KAutoObject* obj = nullptr; 44 KAutoObject* obj = nullptr;
46 { 45 {
47 KScopedDisableDispatch dd(kernel);
48 KScopedSpinLock lk(m_lock); 46 KScopedSpinLock lk(m_lock);
49 47
50 if (this->IsValidHandle(handle)) { 48 if (this->IsValidHandle(handle)) {
@@ -63,7 +61,6 @@ bool KHandleTable::Remove(Handle handle) {
63} 61}
64 62
65ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { 63ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
66 KScopedDisableDispatch dd(kernel);
67 KScopedSpinLock lk(m_lock); 64 KScopedSpinLock lk(m_lock);
68 65
69 // Never exceed our capacity. 66 // Never exceed our capacity.
@@ -86,7 +83,6 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
86} 83}
87 84
88ResultCode KHandleTable::Reserve(Handle* out_handle) { 85ResultCode KHandleTable::Reserve(Handle* out_handle) {
89 KScopedDisableDispatch dd(kernel);
90 KScopedSpinLock lk(m_lock); 86 KScopedSpinLock lk(m_lock);
91 87
92 // Never exceed our capacity. 88 // Never exceed our capacity.
@@ -97,7 +93,6 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) {
97} 93}
98 94
99void KHandleTable::Unreserve(Handle handle) { 95void KHandleTable::Unreserve(Handle handle) {
100 KScopedDisableDispatch dd(kernel);
101 KScopedSpinLock lk(m_lock); 96 KScopedSpinLock lk(m_lock);
102 97
103 // Unpack the handle. 98 // Unpack the handle.
@@ -116,7 +111,6 @@ void KHandleTable::Unreserve(Handle handle) {
116} 111}
117 112
118void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { 113void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) {
119 KScopedDisableDispatch dd(kernel);
120 KScopedSpinLock lk(m_lock); 114 KScopedSpinLock lk(m_lock);
121 115
122 // Unpack the handle. 116 // Unpack the handle.
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h
index 75dcec7df..2ff6aa160 100644
--- a/src/core/hle/kernel/k_handle_table.h
+++ b/src/core/hle/kernel/k_handle_table.h
@@ -69,7 +69,6 @@ public:
69 template <typename T = KAutoObject> 69 template <typename T = KAutoObject>
70 KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { 70 KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
71 // Lock and look up in table. 71 // Lock and look up in table.
72 KScopedDisableDispatch dd(kernel);
73 KScopedSpinLock lk(m_lock); 72 KScopedSpinLock lk(m_lock);
74 73
75 if constexpr (std::is_same_v<T, KAutoObject>) { 74 if constexpr (std::is_same_v<T, KAutoObject>) {
@@ -124,7 +123,6 @@ public:
124 size_t num_opened; 123 size_t num_opened;
125 { 124 {
126 // Lock the table. 125 // Lock the table.
127 KScopedDisableDispatch dd(kernel);
128 KScopedSpinLock lk(m_lock); 126 KScopedSpinLock lk(m_lock);
129 for (num_opened = 0; num_opened < num_handles; num_opened++) { 127 for (num_opened = 0; num_opened < num_handles; num_opened++) {
130 // Get the current handle. 128 // Get the current handle.
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 3d7e6707e..8ead1a769 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -59,7 +59,6 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
59 thread->GetContext64().cpu_registers[0] = 0; 59 thread->GetContext64().cpu_registers[0] = 0;
60 thread->GetContext32().cpu_registers[1] = thread_handle; 60 thread->GetContext32().cpu_registers[1] = thread_handle;
61 thread->GetContext64().cpu_registers[1] = thread_handle; 61 thread->GetContext64().cpu_registers[1] = thread_handle;
62 thread->DisableDispatch();
63 62
64 auto& kernel = system.Kernel(); 63 auto& kernel = system.Kernel();
65 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires 64 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 6ddbae52c..6a7d80d03 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -376,18 +376,20 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
376} 376}
377 377
378void KScheduler::DisableScheduling(KernelCore& kernel) { 378void KScheduler::DisableScheduling(KernelCore& kernel) {
379 ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0); 379 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
380 GetCurrentThreadPointer(kernel)->DisableDispatch(); 380 ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0);
381 scheduler->GetCurrentThread()->DisableDispatch();
382 }
381} 383}
382 384
383void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { 385void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
384 ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 1); 386 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
385 387 ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1);
386 if (GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() > 1) { 388 if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) {
387 GetCurrentThreadPointer(kernel)->EnableDispatch(); 389 scheduler->GetCurrentThread()->EnableDispatch();
388 } else { 390 }
389 RescheduleCores(kernel, cores_needing_scheduling);
390 } 391 }
392 RescheduleCores(kernel, cores_needing_scheduling);
391} 393}
392 394
393u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { 395u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
@@ -615,17 +617,13 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c
615 state.highest_priority_thread = nullptr; 617 state.highest_priority_thread = nullptr;
616} 618}
617 619
618void KScheduler::Finalize() { 620KScheduler::~KScheduler() {
619 if (idle_thread) { 621 if (idle_thread) {
620 idle_thread->Close(); 622 idle_thread->Close();
621 idle_thread = nullptr; 623 idle_thread = nullptr;
622 } 624 }
623} 625}
624 626
625KScheduler::~KScheduler() {
626 ASSERT(!idle_thread);
627}
628
629KThread* KScheduler::GetCurrentThread() const { 627KThread* KScheduler::GetCurrentThread() const {
630 if (auto result = current_thread.load(); result) { 628 if (auto result = current_thread.load(); result) {
631 return result; 629 return result;
@@ -644,12 +642,10 @@ void KScheduler::RescheduleCurrentCore() {
644 if (phys_core.IsInterrupted()) { 642 if (phys_core.IsInterrupted()) {
645 phys_core.ClearInterrupt(); 643 phys_core.ClearInterrupt();
646 } 644 }
647
648 guard.Lock(); 645 guard.Lock();
649 if (state.needs_scheduling.load()) { 646 if (state.needs_scheduling.load()) {
650 Schedule(); 647 Schedule();
651 } else { 648 } else {
652 GetCurrentThread()->EnableDispatch();
653 guard.Unlock(); 649 guard.Unlock();
654 } 650 }
655} 651}
@@ -659,33 +655,26 @@ void KScheduler::OnThreadStart() {
659} 655}
660 656
661void KScheduler::Unload(KThread* thread) { 657void KScheduler::Unload(KThread* thread) {
662 ASSERT(thread);
663
664 LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); 658 LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
665 659
666 if (thread->IsCallingSvc()) { 660 if (thread) {
667 thread->ClearIsCallingSvc(); 661 if (thread->IsCallingSvc()) {
668 } 662 thread->ClearIsCallingSvc();
669 663 }
670 auto& physical_core = system.Kernel().PhysicalCore(core_id); 664 if (!thread->IsTerminationRequested()) {
671 if (!physical_core.IsInitialized()) { 665 prev_thread = thread;
672 return; 666
673 } 667 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
674 668 cpu_core.SaveContext(thread->GetContext32());
675 Core::ARM_Interface& cpu_core = physical_core.ArmInterface(); 669 cpu_core.SaveContext(thread->GetContext64());
676 cpu_core.SaveContext(thread->GetContext32()); 670 // Save the TPIDR_EL0 system register in case it was modified.
677 cpu_core.SaveContext(thread->GetContext64()); 671 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
678 // Save the TPIDR_EL0 system register in case it was modified. 672 cpu_core.ClearExclusiveState();
679 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); 673 } else {
680 cpu_core.ClearExclusiveState(); 674 prev_thread = nullptr;
681 675 }
682 if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) { 676 thread->context_guard.Unlock();
683 prev_thread = thread;
684 } else {
685 prev_thread = nullptr;
686 } 677 }
687
688 thread->context_guard.Unlock();
689} 678}
690 679
691void KScheduler::Reload(KThread* thread) { 680void KScheduler::Reload(KThread* thread) {
@@ -694,6 +683,11 @@ void KScheduler::Reload(KThread* thread) {
694 if (thread) { 683 if (thread) {
695 ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); 684 ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
696 685
686 auto* const thread_owner_process = thread->GetOwnerProcess();
687 if (thread_owner_process != nullptr) {
688 system.Kernel().MakeCurrentProcess(thread_owner_process);
689 }
690
697 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); 691 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
698 cpu_core.LoadContext(thread->GetContext32()); 692 cpu_core.LoadContext(thread->GetContext32());
699 cpu_core.LoadContext(thread->GetContext64()); 693 cpu_core.LoadContext(thread->GetContext64());
@@ -711,7 +705,7 @@ void KScheduler::SwitchContextStep2() {
711} 705}
712 706
713void KScheduler::ScheduleImpl() { 707void KScheduler::ScheduleImpl() {
714 KThread* previous_thread = GetCurrentThread(); 708 KThread* previous_thread = current_thread.load();
715 KThread* next_thread = state.highest_priority_thread; 709 KThread* next_thread = state.highest_priority_thread;
716 710
717 state.needs_scheduling = false; 711 state.needs_scheduling = false;
@@ -723,15 +717,10 @@ void KScheduler::ScheduleImpl() {
723 717
724 // If we're not actually switching thread, there's nothing to do. 718 // If we're not actually switching thread, there's nothing to do.
725 if (next_thread == current_thread.load()) { 719 if (next_thread == current_thread.load()) {
726 previous_thread->EnableDispatch();
727 guard.Unlock(); 720 guard.Unlock();
728 return; 721 return;
729 } 722 }
730 723
731 if (next_thread->GetCurrentCore() != core_id) {
732 next_thread->SetCurrentCore(core_id);
733 }
734
735 current_thread.store(next_thread); 724 current_thread.store(next_thread);
736 725
737 KProcess* const previous_process = system.Kernel().CurrentProcess(); 726 KProcess* const previous_process = system.Kernel().CurrentProcess();
@@ -742,7 +731,11 @@ void KScheduler::ScheduleImpl() {
742 Unload(previous_thread); 731 Unload(previous_thread);
743 732
744 std::shared_ptr<Common::Fiber>* old_context; 733 std::shared_ptr<Common::Fiber>* old_context;
745 old_context = &previous_thread->GetHostContext(); 734 if (previous_thread != nullptr) {
735 old_context = &previous_thread->GetHostContext();
736 } else {
737 old_context = &idle_thread->GetHostContext();
738 }
746 guard.Unlock(); 739 guard.Unlock();
747 740
748 Common::Fiber::YieldTo(*old_context, *switch_fiber); 741 Common::Fiber::YieldTo(*old_context, *switch_fiber);
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 516e0cdba..12cfae919 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -33,8 +33,6 @@ public:
33 explicit KScheduler(Core::System& system_, s32 core_id_); 33 explicit KScheduler(Core::System& system_, s32 core_id_);
34 ~KScheduler(); 34 ~KScheduler();
35 35
36 void Finalize();
37
38 /// Reschedules to the next available thread (call after current thread is suspended) 36 /// Reschedules to the next available thread (call after current thread is suspended)
39 void RescheduleCurrentCore(); 37 void RescheduleCurrentCore();
40 38
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 0f6808ade..9f1d3156b 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -14,7 +14,6 @@
14#include "common/fiber.h" 14#include "common/fiber.h"
15#include "common/logging/log.h" 15#include "common/logging/log.h"
16#include "common/scope_exit.h" 16#include "common/scope_exit.h"
17#include "common/settings.h"
18#include "common/thread_queue_list.h" 17#include "common/thread_queue_list.h"
19#include "core/core.h" 18#include "core/core.h"
20#include "core/cpu_manager.h" 19#include "core/cpu_manager.h"
@@ -189,7 +188,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
189 // Setup the stack parameters. 188 // Setup the stack parameters.
190 StackParameters& sp = GetStackParameters(); 189 StackParameters& sp = GetStackParameters();
191 sp.cur_thread = this; 190 sp.cur_thread = this;
192 sp.disable_count = 0; 191 sp.disable_count = 1;
193 SetInExceptionHandler(); 192 SetInExceptionHandler();
194 193
195 // Set thread ID. 194 // Set thread ID.
@@ -216,10 +215,9 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint
216 // Initialize the thread. 215 // Initialize the thread.
217 R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); 216 R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
218 217
219 // Initialize emulation parameters. 218 // Initialize host context.
220 thread->host_context = 219 thread->host_context =
221 std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter); 220 std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter);
222 thread->is_single_core = !Settings::values.use_multi_core.GetValue();
223 221
224 return ResultSuccess; 222 return ResultSuccess;
225} 223}
@@ -972,9 +970,6 @@ ResultCode KThread::Run() {
972 970
973 // Set our state and finish. 971 // Set our state and finish.
974 SetState(ThreadState::Runnable); 972 SetState(ThreadState::Runnable);
975
976 DisableDispatch();
977
978 return ResultSuccess; 973 return ResultSuccess;
979 } 974 }
980} 975}
@@ -1059,16 +1054,4 @@ s32 GetCurrentCoreId(KernelCore& kernel) {
1059 return GetCurrentThread(kernel).GetCurrentCore(); 1054 return GetCurrentThread(kernel).GetCurrentCore();
1060} 1055}
1061 1056
1062KScopedDisableDispatch::~KScopedDisableDispatch() {
1063 if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) {
1064 auto scheduler = kernel.CurrentScheduler();
1065
1066 if (scheduler) {
1067 scheduler->RescheduleCurrentCore();
1068 }
1069 } else {
1070 GetCurrentThread(kernel).EnableDispatch();
1071 }
1072}
1073
1074} // namespace Kernel 1057} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index e4c4c877d..c77f44ad4 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -450,39 +450,16 @@ public:
450 sleeping_queue = q; 450 sleeping_queue = q;
451 } 451 }
452 452
453 [[nodiscard]] bool IsKernelThread() const {
454 return GetActiveCore() == 3;
455 }
456
457 [[nodiscard]] bool IsDispatchTrackingDisabled() const {
458 return is_single_core || IsKernelThread();
459 }
460
461 [[nodiscard]] s32 GetDisableDispatchCount() const { 453 [[nodiscard]] s32 GetDisableDispatchCount() const {
462 if (IsDispatchTrackingDisabled()) {
463 // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
464 return 1;
465 }
466
467 return this->GetStackParameters().disable_count; 454 return this->GetStackParameters().disable_count;
468 } 455 }
469 456
470 void DisableDispatch() { 457 void DisableDispatch() {
471 if (IsDispatchTrackingDisabled()) {
472 // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
473 return;
474 }
475
476 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); 458 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
477 this->GetStackParameters().disable_count++; 459 this->GetStackParameters().disable_count++;
478 } 460 }
479 461
480 void EnableDispatch() { 462 void EnableDispatch() {
481 if (IsDispatchTrackingDisabled()) {
482 // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
483 return;
484 }
485
486 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); 463 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
487 this->GetStackParameters().disable_count--; 464 this->GetStackParameters().disable_count--;
488 } 465 }
@@ -731,7 +708,6 @@ private:
731 708
732 // For emulation 709 // For emulation
733 std::shared_ptr<Common::Fiber> host_context{}; 710 std::shared_ptr<Common::Fiber> host_context{};
734 bool is_single_core{};
735 711
736 // For debugging 712 // For debugging
737 std::vector<KSynchronizationObject*> wait_objects_for_debugging; 713 std::vector<KSynchronizationObject*> wait_objects_for_debugging;
@@ -776,16 +752,4 @@ public:
776 } 752 }
777}; 753};
778 754
779class KScopedDisableDispatch {
780public:
781 [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} {
782 GetCurrentThread(kernel).DisableDispatch();
783 }
784
785 ~KScopedDisableDispatch();
786
787private:
788 KernelCore& kernel;
789};
790
791} // namespace Kernel 755} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 8fdab44e4..bea945301 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -85,9 +85,8 @@ struct KernelCore::Impl {
85 } 85 }
86 86
87 void InitializeCores() { 87 void InitializeCores() {
88 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { 88 for (auto& core : cores) {
89 cores[core_id].Initialize(current_process->Is64BitProcess()); 89 core.Initialize(current_process->Is64BitProcess());
90 system.Memory().SetCurrentPageTable(*current_process, core_id);
91 } 90 }
92 } 91 }
93 92
@@ -132,6 +131,15 @@ struct KernelCore::Impl {
132 next_user_process_id = KProcess::ProcessIDMin; 131 next_user_process_id = KProcess::ProcessIDMin;
133 next_thread_id = 1; 132 next_thread_id = 1;
134 133
134 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
135 if (suspend_threads[core_id]) {
136 suspend_threads[core_id]->Close();
137 suspend_threads[core_id] = nullptr;
138 }
139
140 schedulers[core_id].reset();
141 }
142
135 cores.clear(); 143 cores.clear();
136 144
137 global_handle_table->Finalize(); 145 global_handle_table->Finalize();
@@ -159,16 +167,6 @@ struct KernelCore::Impl {
159 CleanupObject(time_shared_mem); 167 CleanupObject(time_shared_mem);
160 CleanupObject(system_resource_limit); 168 CleanupObject(system_resource_limit);
161 169
162 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
163 if (suspend_threads[core_id]) {
164 suspend_threads[core_id]->Close();
165 suspend_threads[core_id] = nullptr;
166 }
167
168 schedulers[core_id]->Finalize();
169 schedulers[core_id].reset();
170 }
171
172 // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others 170 // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
173 next_host_thread_id = Core::Hardware::NUM_CPU_CORES; 171 next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
174 172
@@ -259,6 +257,14 @@ struct KernelCore::Impl {
259 257
260 void MakeCurrentProcess(KProcess* process) { 258 void MakeCurrentProcess(KProcess* process) {
261 current_process = process; 259 current_process = process;
260 if (process == nullptr) {
261 return;
262 }
263
264 const u32 core_id = GetCurrentHostThreadID();
265 if (core_id < Core::Hardware::NUM_CPU_CORES) {
266 system.Memory().SetCurrentPageTable(*process, core_id);
267 }
262 } 268 }
263 269
264 static inline thread_local u32 host_thread_id = UINT32_MAX; 270 static inline thread_local u32 host_thread_id = UINT32_MAX;
@@ -821,20 +827,16 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const {
821 return impl->cores[id]; 827 return impl->cores[id];
822} 828}
823 829
824size_t KernelCore::CurrentPhysicalCoreIndex() const {
825 const u32 core_id = impl->GetCurrentHostThreadID();
826 if (core_id >= Core::Hardware::NUM_CPU_CORES) {
827 return Core::Hardware::NUM_CPU_CORES - 1;
828 }
829 return core_id;
830}
831
832Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { 830Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
833 return impl->cores[CurrentPhysicalCoreIndex()]; 831 u32 core_id = impl->GetCurrentHostThreadID();
832 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
833 return impl->cores[core_id];
834} 834}
835 835
836const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { 836const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
837 return impl->cores[CurrentPhysicalCoreIndex()]; 837 u32 core_id = impl->GetCurrentHostThreadID();
838 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
839 return impl->cores[core_id];
838} 840}
839 841
840Kernel::KScheduler* KernelCore::CurrentScheduler() { 842Kernel::KScheduler* KernelCore::CurrentScheduler() {
@@ -1027,9 +1029,6 @@ void KernelCore::Suspend(bool in_suspention) {
1027 impl->suspend_threads[core_id]->SetState(state); 1029 impl->suspend_threads[core_id]->SetState(state);
1028 impl->suspend_threads[core_id]->SetWaitReasonForDebugging( 1030 impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
1029 ThreadWaitReasonForDebugging::Suspended); 1031 ThreadWaitReasonForDebugging::Suspended);
1030 if (!should_suspend) {
1031 impl->suspend_threads[core_id]->DisableDispatch();
1032 }
1033 } 1032 }
1034 } 1033 }
1035} 1034}
@@ -1044,11 +1043,13 @@ void KernelCore::ExceptionalExit() {
1044} 1043}
1045 1044
1046void KernelCore::EnterSVCProfile() { 1045void KernelCore::EnterSVCProfile() {
1047 impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); 1046 std::size_t core = impl->GetCurrentHostThreadID();
1047 impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
1048} 1048}
1049 1049
1050void KernelCore::ExitSVCProfile() { 1050void KernelCore::ExitSVCProfile() {
1051 MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]); 1051 std::size_t core = impl->GetCurrentHostThreadID();
1052 MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
1052} 1053}
1053 1054
1054std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { 1055std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 57535433b..3a6db0b1c 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -146,9 +146,6 @@ public:
146 /// Gets the an instance of the respective physical CPU core. 146 /// Gets the an instance of the respective physical CPU core.
147 const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; 147 const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
148 148
149 /// Gets the current physical core index for the running host thread.
150 std::size_t CurrentPhysicalCoreIndex() const;
151
152 /// Gets the sole instance of the Scheduler at the current running core. 149 /// Gets the sole instance of the Scheduler at the current running core.
153 Kernel::KScheduler* CurrentScheduler(); 150 Kernel::KScheduler* CurrentScheduler();
154 151
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 890c52198..62fb06c45 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -877,7 +877,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
877 const u64 thread_ticks = current_thread->GetCpuTime(); 877 const u64 thread_ticks = current_thread->GetCpuTime();
878 878
879 out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); 879 out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
880 } else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) { 880 } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) {
881 out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; 881 out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
882 } 882 }
883 883
diff --git a/src/core/hle/service/nvflinger/buffer_queue.cpp b/src/core/hle/service/nvflinger/buffer_queue.cpp
index b4c3a6099..59ddf6298 100644
--- a/src/core/hle/service/nvflinger/buffer_queue.cpp
+++ b/src/core/hle/service/nvflinger/buffer_queue.cpp
@@ -9,20 +9,17 @@
9#include "core/core.h" 9#include "core/core.h"
10#include "core/hle/kernel/k_writable_event.h" 10#include "core/hle/kernel/k_writable_event.h"
11#include "core/hle/kernel/kernel.h" 11#include "core/hle/kernel/kernel.h"
12#include "core/hle/service/kernel_helpers.h"
13#include "core/hle/service/nvflinger/buffer_queue.h" 12#include "core/hle/service/nvflinger/buffer_queue.h"
14 13
15namespace Service::NVFlinger { 14namespace Service::NVFlinger {
16 15
17BufferQueue::BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_, 16BufferQueue::BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_)
18 KernelHelpers::ServiceContext& service_context_) 17 : id(id_), layer_id(layer_id_), buffer_wait_event{kernel} {
19 : id(id_), layer_id(layer_id_), service_context{service_context_} { 18 Kernel::KAutoObject::Create(std::addressof(buffer_wait_event));
20 buffer_wait_event = service_context.CreateEvent("BufferQueue:WaitEvent"); 19 buffer_wait_event.Initialize("BufferQueue:WaitEvent");
21} 20}
22 21
23BufferQueue::~BufferQueue() { 22BufferQueue::~BufferQueue() = default;
24 service_context.CloseEvent(buffer_wait_event);
25}
26 23
27void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) { 24void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer) {
28 ASSERT(slot < buffer_slots); 25 ASSERT(slot < buffer_slots);
@@ -44,7 +41,7 @@ void BufferQueue::SetPreallocatedBuffer(u32 slot, const IGBPBuffer& igbp_buffer)
44 .multi_fence = {}, 41 .multi_fence = {},
45 }; 42 };
46 43
47 buffer_wait_event->GetWritableEvent().Signal(); 44 buffer_wait_event.GetWritableEvent().Signal();
48} 45}
49 46
50std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> BufferQueue::DequeueBuffer(u32 width, 47std::optional<std::pair<u32, Service::Nvidia::MultiFence*>> BufferQueue::DequeueBuffer(u32 width,
@@ -122,7 +119,7 @@ void BufferQueue::CancelBuffer(u32 slot, const Service::Nvidia::MultiFence& mult
122 } 119 }
123 free_buffers_condition.notify_one(); 120 free_buffers_condition.notify_one();
124 121
125 buffer_wait_event->GetWritableEvent().Signal(); 122 buffer_wait_event.GetWritableEvent().Signal();
126} 123}
127 124
128std::optional<std::reference_wrapper<const BufferQueue::Buffer>> BufferQueue::AcquireBuffer() { 125std::optional<std::reference_wrapper<const BufferQueue::Buffer>> BufferQueue::AcquireBuffer() {
@@ -157,7 +154,7 @@ void BufferQueue::ReleaseBuffer(u32 slot) {
157 } 154 }
158 free_buffers_condition.notify_one(); 155 free_buffers_condition.notify_one();
159 156
160 buffer_wait_event->GetWritableEvent().Signal(); 157 buffer_wait_event.GetWritableEvent().Signal();
161} 158}
162 159
163void BufferQueue::Connect() { 160void BufferQueue::Connect() {
@@ -172,7 +169,7 @@ void BufferQueue::Disconnect() {
172 std::unique_lock lock{queue_sequence_mutex}; 169 std::unique_lock lock{queue_sequence_mutex};
173 queue_sequence.clear(); 170 queue_sequence.clear();
174 } 171 }
175 buffer_wait_event->GetWritableEvent().Signal(); 172 buffer_wait_event.GetWritableEvent().Signal();
176 is_connect = false; 173 is_connect = false;
177 free_buffers_condition.notify_one(); 174 free_buffers_condition.notify_one();
178} 175}
@@ -192,11 +189,11 @@ u32 BufferQueue::Query(QueryType type) {
192} 189}
193 190
194Kernel::KWritableEvent& BufferQueue::GetWritableBufferWaitEvent() { 191Kernel::KWritableEvent& BufferQueue::GetWritableBufferWaitEvent() {
195 return buffer_wait_event->GetWritableEvent(); 192 return buffer_wait_event.GetWritableEvent();
196} 193}
197 194
198Kernel::KReadableEvent& BufferQueue::GetBufferWaitEvent() { 195Kernel::KReadableEvent& BufferQueue::GetBufferWaitEvent() {
199 return buffer_wait_event->GetReadableEvent(); 196 return buffer_wait_event.GetReadableEvent();
200} 197}
201 198
202} // namespace Service::NVFlinger 199} // namespace Service::NVFlinger
diff --git a/src/core/hle/service/nvflinger/buffer_queue.h b/src/core/hle/service/nvflinger/buffer_queue.h
index 759247eb0..61e337ac5 100644
--- a/src/core/hle/service/nvflinger/buffer_queue.h
+++ b/src/core/hle/service/nvflinger/buffer_queue.h
@@ -24,10 +24,6 @@ class KReadableEvent;
24class KWritableEvent; 24class KWritableEvent;
25} // namespace Kernel 25} // namespace Kernel
26 26
27namespace Service::KernelHelpers {
28class ServiceContext;
29} // namespace Service::KernelHelpers
30
31namespace Service::NVFlinger { 27namespace Service::NVFlinger {
32 28
33constexpr u32 buffer_slots = 0x40; 29constexpr u32 buffer_slots = 0x40;
@@ -58,8 +54,7 @@ public:
58 NativeWindowFormat = 2, 54 NativeWindowFormat = 2,
59 }; 55 };
60 56
61 explicit BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_, 57 explicit BufferQueue(Kernel::KernelCore& kernel, u32 id_, u64 layer_id_);
62 KernelHelpers::ServiceContext& service_context_);
63 ~BufferQueue(); 58 ~BufferQueue();
64 59
65 enum class BufferTransformFlags : u32 { 60 enum class BufferTransformFlags : u32 {
@@ -135,14 +130,12 @@ private:
135 std::list<u32> free_buffers; 130 std::list<u32> free_buffers;
136 std::array<Buffer, buffer_slots> buffers; 131 std::array<Buffer, buffer_slots> buffers;
137 std::list<u32> queue_sequence; 132 std::list<u32> queue_sequence;
138 Kernel::KEvent* buffer_wait_event{}; 133 Kernel::KEvent buffer_wait_event;
139 134
140 std::mutex free_buffers_mutex; 135 std::mutex free_buffers_mutex;
141 std::condition_variable free_buffers_condition; 136 std::condition_variable free_buffers_condition;
142 137
143 std::mutex queue_sequence_mutex; 138 std::mutex queue_sequence_mutex;
144
145 KernelHelpers::ServiceContext& service_context;
146}; 139};
147 140
148} // namespace Service::NVFlinger 141} // namespace Service::NVFlinger
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp
index 00bff8caf..941748970 100644
--- a/src/core/hle/service/nvflinger/nvflinger.cpp
+++ b/src/core/hle/service/nvflinger/nvflinger.cpp
@@ -61,13 +61,12 @@ void NVFlinger::SplitVSync() {
61 } 61 }
62} 62}
63 63
64NVFlinger::NVFlinger(Core::System& system_) 64NVFlinger::NVFlinger(Core::System& system_) : system(system_) {
65 : system(system_), service_context(system_, "nvflinger") { 65 displays.emplace_back(0, "Default", system);
66 displays.emplace_back(0, "Default", service_context, system); 66 displays.emplace_back(1, "External", system);
67 displays.emplace_back(1, "External", service_context, system); 67 displays.emplace_back(2, "Edid", system);
68 displays.emplace_back(2, "Edid", service_context, system); 68 displays.emplace_back(3, "Internal", system);
69 displays.emplace_back(3, "Internal", service_context, system); 69 displays.emplace_back(4, "Null", system);
70 displays.emplace_back(4, "Null", service_context, system);
71 guard = std::make_shared<std::mutex>(); 70 guard = std::make_shared<std::mutex>();
72 71
73 // Schedule the screen composition events 72 // Schedule the screen composition events
@@ -147,7 +146,7 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) {
147void NVFlinger::CreateLayerAtId(VI::Display& display, u64 layer_id) { 146void NVFlinger::CreateLayerAtId(VI::Display& display, u64 layer_id) {
148 const u32 buffer_queue_id = next_buffer_queue_id++; 147 const u32 buffer_queue_id = next_buffer_queue_id++;
149 buffer_queues.emplace_back( 148 buffer_queues.emplace_back(
150 std::make_unique<BufferQueue>(system.Kernel(), buffer_queue_id, layer_id, service_context)); 149 std::make_unique<BufferQueue>(system.Kernel(), buffer_queue_id, layer_id));
151 display.CreateLayer(layer_id, *buffer_queues.back()); 150 display.CreateLayer(layer_id, *buffer_queues.back());
152} 151}
153 152
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h
index 6d84cafb4..d80fd07ef 100644
--- a/src/core/hle/service/nvflinger/nvflinger.h
+++ b/src/core/hle/service/nvflinger/nvflinger.h
@@ -15,7 +15,6 @@
15#include <vector> 15#include <vector>
16 16
17#include "common/common_types.h" 17#include "common/common_types.h"
18#include "core/hle/service/kernel_helpers.h"
19 18
20namespace Common { 19namespace Common {
21class Event; 20class Event;
@@ -136,8 +135,6 @@ private:
136 std::unique_ptr<std::thread> vsync_thread; 135 std::unique_ptr<std::thread> vsync_thread;
137 std::unique_ptr<Common::Event> wait_event; 136 std::unique_ptr<Common::Event> wait_event;
138 std::atomic<bool> is_running{}; 137 std::atomic<bool> is_running{};
139
140 KernelHelpers::ServiceContext service_context;
141}; 138};
142 139
143} // namespace Service::NVFlinger 140} // namespace Service::NVFlinger
diff --git a/src/core/hle/service/vi/display/vi_display.cpp b/src/core/hle/service/vi/display/vi_display.cpp
index b7705c02a..0dd342dbf 100644
--- a/src/core/hle/service/vi/display/vi_display.cpp
+++ b/src/core/hle/service/vi/display/vi_display.cpp
@@ -12,21 +12,18 @@
12#include "core/hle/kernel/k_event.h" 12#include "core/hle/kernel/k_event.h"
13#include "core/hle/kernel/k_readable_event.h" 13#include "core/hle/kernel/k_readable_event.h"
14#include "core/hle/kernel/k_writable_event.h" 14#include "core/hle/kernel/k_writable_event.h"
15#include "core/hle/service/kernel_helpers.h"
16#include "core/hle/service/vi/display/vi_display.h" 15#include "core/hle/service/vi/display/vi_display.h"
17#include "core/hle/service/vi/layer/vi_layer.h" 16#include "core/hle/service/vi/layer/vi_layer.h"
18 17
19namespace Service::VI { 18namespace Service::VI {
20 19
21Display::Display(u64 id, std::string name_, KernelHelpers::ServiceContext& service_context_, 20Display::Display(u64 id, std::string name_, Core::System& system)
22 Core::System& system_) 21 : display_id{id}, name{std::move(name_)}, vsync_event{system.Kernel()} {
23 : display_id{id}, name{std::move(name_)}, service_context{service_context_} { 22 Kernel::KAutoObject::Create(std::addressof(vsync_event));
24 vsync_event = service_context.CreateEvent(fmt::format("Display VSync Event {}", id)); 23 vsync_event.Initialize(fmt::format("Display VSync Event {}", id));
25} 24}
26 25
27Display::~Display() { 26Display::~Display() = default;
28 service_context.CloseEvent(vsync_event);
29}
30 27
31Layer& Display::GetLayer(std::size_t index) { 28Layer& Display::GetLayer(std::size_t index) {
32 return *layers.at(index); 29 return *layers.at(index);
@@ -37,11 +34,11 @@ const Layer& Display::GetLayer(std::size_t index) const {
37} 34}
38 35
39Kernel::KReadableEvent& Display::GetVSyncEvent() { 36Kernel::KReadableEvent& Display::GetVSyncEvent() {
40 return vsync_event->GetReadableEvent(); 37 return vsync_event.GetReadableEvent();
41} 38}
42 39
43void Display::SignalVSyncEvent() { 40void Display::SignalVSyncEvent() {
44 vsync_event->GetWritableEvent().Signal(); 41 vsync_event.GetWritableEvent().Signal();
45} 42}
46 43
47void Display::CreateLayer(u64 layer_id, NVFlinger::BufferQueue& buffer_queue) { 44void Display::CreateLayer(u64 layer_id, NVFlinger::BufferQueue& buffer_queue) {
diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h
index 0979fc421..166f2a4cc 100644
--- a/src/core/hle/service/vi/display/vi_display.h
+++ b/src/core/hle/service/vi/display/vi_display.h
@@ -18,9 +18,6 @@ class KEvent;
18namespace Service::NVFlinger { 18namespace Service::NVFlinger {
19class BufferQueue; 19class BufferQueue;
20} 20}
21namespace Service::KernelHelpers {
22class ServiceContext;
23} // namespace Service::KernelHelpers
24 21
25namespace Service::VI { 22namespace Service::VI {
26 23
@@ -34,13 +31,10 @@ class Display {
34public: 31public:
35 /// Constructs a display with a given unique ID and name. 32 /// Constructs a display with a given unique ID and name.
36 /// 33 ///
37 /// @param id The unique ID for this display. 34 /// @param id The unique ID for this display.
38 /// @param service_context_ The ServiceContext for the owning service.
39 /// @param name_ The name for this display. 35 /// @param name_ The name for this display.
40 /// @param system_ The global system instance.
41 /// 36 ///
42 Display(u64 id, std::string name_, KernelHelpers::ServiceContext& service_context_, 37 Display(u64 id, std::string name_, Core::System& system);
43 Core::System& system_);
44 ~Display(); 38 ~Display();
45 39
46 /// Gets the unique ID assigned to this display. 40 /// Gets the unique ID assigned to this display.
@@ -104,10 +98,9 @@ public:
104private: 98private:
105 u64 display_id; 99 u64 display_id;
106 std::string name; 100 std::string name;
107 KernelHelpers::ServiceContext& service_context;
108 101
109 std::vector<std::shared_ptr<Layer>> layers; 102 std::vector<std::shared_ptr<Layer>> layers;
110 Kernel::KEvent* vsync_event{}; 103 Kernel::KEvent vsync_event;
111}; 104};
112 105
113} // namespace Service::VI 106} // namespace Service::VI