summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
authorGravatar Liam2022-07-05 23:27:25 -0400
committerGravatar Liam2022-07-14 22:47:18 -0400
commit21945ae127480c8332c1110ceada2df4a42a5848 (patch)
treea385c64a14b0d8e8dd71410eaa47575462f8f368 /src/core/hle/kernel
parentkernel: use KScheduler from mesosphere (diff)
downloadyuzu-21945ae127480c8332c1110ceada2df4a42a5848.tar.gz
yuzu-21945ae127480c8332c1110ceada2df4a42a5848.tar.xz
yuzu-21945ae127480c8332c1110ceada2df4a42a5848.zip
kernel: fix issues with single core mode
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/global_scheduler_context.cpp5
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp173
-rw-r--r--src/core/hle/kernel/k_scheduler.h24
-rw-r--r--src/core/hle/kernel/k_thread.cpp5
-rw-r--r--src/core/hle/kernel/k_thread.h24
-rw-r--r--src/core/hle/kernel/kernel.cpp19
-rw-r--r--src/core/hle/kernel/physical_core.cpp1
7 files changed, 120 insertions, 131 deletions
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
index 21fd5cb67..65576b8c4 100644
--- a/src/core/hle/kernel/global_scheduler_context.cpp
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -42,11 +42,6 @@ void GlobalSchedulerContext::PreemptThreads() {
42 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { 42 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
43 const u32 priority = preemption_priorities[core_id]; 43 const u32 priority = preemption_priorities[core_id];
44 KScheduler::RotateScheduledQueue(kernel, core_id, priority); 44 KScheduler::RotateScheduledQueue(kernel, core_id, priority);
45
46 // Signal an interrupt occurred. For core 3, this is a certainty, as preemption will result
47 // in the rotator thread being scheduled. For cores 0-2, this is to simulate or system
48 // interrupts that may have occurred.
49 kernel.PhysicalCore(core_id).Interrupt();
50 } 45 }
51} 46}
52 47
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 13915dbd9..cac96a780 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -28,9 +28,9 @@ static void IncrementScheduledCount(Kernel::KThread* thread) {
28} 28}
29 29
30KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} { 30KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} {
31 m_idle_stack = std::make_shared<Common::Fiber>([this] { 31 m_switch_fiber = std::make_shared<Common::Fiber>([this] {
32 while (true) { 32 while (true) {
33 ScheduleImplOffStack(); 33 ScheduleImplFiber();
34 } 34 }
35 }); 35 });
36 36
@@ -60,9 +60,9 @@ void KScheduler::DisableScheduling(KernelCore& kernel) {
60void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { 60void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
61 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 1); 61 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 1);
62 62
63 auto* scheduler = kernel.CurrentScheduler(); 63 auto* scheduler{kernel.CurrentScheduler()};
64 64
65 if (!scheduler) { 65 if (!scheduler || kernel.IsPhantomModeForSingleCore()) {
66 // HACK: we cannot schedule from this thread, it is not a core thread 66 // HACK: we cannot schedule from this thread, it is not a core thread
67 RescheduleCores(kernel, cores_needing_scheduling); 67 RescheduleCores(kernel, cores_needing_scheduling);
68 if (GetCurrentThread(kernel).GetDisableDispatchCount() == 1) { 68 if (GetCurrentThread(kernel).GetDisableDispatchCount() == 1) {
@@ -125,9 +125,9 @@ void KScheduler::RescheduleCurrentCoreImpl() {
125 } 125 }
126} 126}
127 127
128void KScheduler::Initialize(KThread* idle_thread) { 128void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id) {
129 // Set core ID/idle thread/interrupt task manager. 129 // Set core ID/idle thread/interrupt task manager.
130 m_core_id = GetCurrentCoreId(kernel); 130 m_core_id = core_id;
131 m_idle_thread = idle_thread; 131 m_idle_thread = idle_thread;
132 // m_state.idle_thread_stack = m_idle_thread->GetStackTop(); 132 // m_state.idle_thread_stack = m_idle_thread->GetStackTop();
133 // m_state.interrupt_task_manager = &kernel.GetInterruptTaskManager(); 133 // m_state.interrupt_task_manager = &kernel.GetInterruptTaskManager();
@@ -142,10 +142,10 @@ void KScheduler::Initialize(KThread* idle_thread) {
142 // Bind interrupt handler. 142 // Bind interrupt handler.
143 // kernel.GetInterruptManager().BindHandler( 143 // kernel.GetInterruptManager().BindHandler(
144 // GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id, 144 // GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id,
145 // KInterruptController::PriorityLevel_Scheduler, false, false); 145 // KInterruptController::PriorityLevel::Scheduler, false, false);
146 146
147 // Set the current thread. 147 // Set the current thread.
148 m_current_thread = GetCurrentThreadPointer(kernel); 148 m_current_thread = main_thread;
149} 149}
150 150
151void KScheduler::Activate() { 151void KScheduler::Activate() {
@@ -156,6 +156,10 @@ void KScheduler::Activate() {
156 RescheduleCurrentCore(); 156 RescheduleCurrentCore();
157} 157}
158 158
159void KScheduler::OnThreadStart() {
160 GetCurrentThread(kernel).EnableDispatch();
161}
162
159u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { 163u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
160 if (KThread* prev_highest_thread = m_state.highest_priority_thread; 164 if (KThread* prev_highest_thread = m_state.highest_priority_thread;
161 prev_highest_thread != highest_thread) [[likely]] { 165 prev_highest_thread != highest_thread) [[likely]] {
@@ -372,37 +376,30 @@ void KScheduler::ScheduleImpl() {
372 } 376 }
373 377
374 // The highest priority thread is not the same as the current thread. 378 // The highest priority thread is not the same as the current thread.
375 // Switch to the idle thread stack and continue executing from there. 379 // Jump to the switcher and continue executing from there.
376 m_idle_cur_thread = cur_thread; 380 m_switch_cur_thread = cur_thread;
377 m_idle_highest_priority_thread = highest_priority_thread; 381 m_switch_highest_priority_thread = highest_priority_thread;
378 Common::Fiber::YieldTo(cur_thread->host_context, *m_idle_stack); 382 m_switch_from_schedule = true;
383 Common::Fiber::YieldTo(cur_thread->host_context, *m_switch_fiber);
379 384
380 // Returning from ScheduleImpl occurs after this thread has been scheduled again. 385 // Returning from ScheduleImpl occurs after this thread has been scheduled again.
381} 386}
382 387
383void KScheduler::ScheduleImplOffStack() { 388void KScheduler::ScheduleImplFiber() {
384 KThread* const cur_thread{m_idle_cur_thread}; 389 KThread* const cur_thread{m_switch_cur_thread};
385 KThread* highest_priority_thread{m_idle_highest_priority_thread}; 390 KThread* highest_priority_thread{m_switch_highest_priority_thread};
386 391
387 // Get a reference to the current thread's stack parameters. 392 // If we're not coming from scheduling (i.e., we came from SC preemption),
388 auto& sp{cur_thread->GetStackParameters()}; 393 // we should restart the scheduling loop directly. Not accurate to HOS.
389 394 if (!m_switch_from_schedule) {
390 // Save the original thread context. 395 goto retry;
391 {
392 auto& physical_core = kernel.System().CurrentPhysicalCore();
393 auto& cpu_core = physical_core.ArmInterface();
394 cpu_core.SaveContext(cur_thread->GetContext32());
395 cpu_core.SaveContext(cur_thread->GetContext64());
396 // Save the TPIDR_EL0 system register in case it was modified.
397 cur_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
398 cpu_core.ClearExclusiveState();
399 } 396 }
400 397
401 // Check if the thread is terminated by checking the DPC flags. 398 // Mark that we are not coming from scheduling anymore.
402 if ((sp.dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) { 399 m_switch_from_schedule = false;
403 // The thread isn't terminated, so we want to unlock it. 400
404 sp.m_lock.store(false, std::memory_order_seq_cst); 401 // Save the original thread context.
405 } 402 Unload(cur_thread);
406 403
407 // The current thread's context has been entirely taken care of. 404 // The current thread's context has been entirely taken care of.
408 // Now we want to loop until we successfully switch the thread context. 405 // Now we want to loop until we successfully switch the thread context.
@@ -411,62 +408,39 @@ void KScheduler::ScheduleImplOffStack() {
411 // Check if the highest priority thread is null. 408 // Check if the highest priority thread is null.
412 if (!highest_priority_thread) { 409 if (!highest_priority_thread) {
413 // The next thread is nullptr! 410 // The next thread is nullptr!
414 // Switch to nullptr. This will actually switch to the idle thread.
415 SwitchThread(nullptr);
416
417 // We've switched to the idle thread, so we want to process interrupt tasks until we
418 // schedule a non-idle thread.
419 while (!m_state.interrupt_task_runnable) {
420 // Check if we need scheduling.
421 if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
422 goto retry;
423 }
424 411
425 // Clear the previous thread. 412 // Switch to the idle thread. Note: HOS treats idling as a special case for
426 m_state.prev_thread = nullptr; 413 // performance. This is not *required* for yuzu's purposes, and for singlecore
414 // compatibility, we can just move the logic that would go here into the execution
415 // of the idle thread. If we ever remove singlecore, we should implement this
416 // accurately to HOS.
417 highest_priority_thread = m_idle_thread;
418 }
427 419
428 // Wait for an interrupt before checking again. 420 // We want to try to lock the highest priority thread's context.
429 kernel.System().GetCpuManager().WaitForAndHandleInterrupt(); 421 // Try to take it.
422 while (!highest_priority_thread->context_guard.try_lock()) {
423 // The highest priority thread's context is already locked.
424 // Check if we need scheduling. If we don't, we can retry directly.
425 if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
426 // If we do, another core is interfering, and we must start again.
427 goto retry;
430 } 428 }
429 }
431 430
432 // Execute any pending interrupt tasks. 431 // It's time to switch the thread.
433 // m_state.interrupt_task_manager->DoTasks(); 432 // Switch to the highest priority thread.
434 433 SwitchThread(highest_priority_thread);
435 // Clear the interrupt task thread as runnable.
436 m_state.interrupt_task_runnable = false;
437 434
438 // Retry the scheduling loop. 435 // Check if we need scheduling. If we do, then we can't complete the switch and should
436 // retry.
437 if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
438 // Our switch failed.
439 // We should unlock the thread context, and then retry.
440 highest_priority_thread->context_guard.unlock();
439 goto retry; 441 goto retry;
440 } else { 442 } else {
441 // We want to try to lock the highest priority thread's context. 443 break;
442 // Try to take it.
443 bool expected{false};
444 while (!highest_priority_thread->stack_parameters.m_lock.compare_exchange_strong(
445 expected, true, std::memory_order_seq_cst)) {
446 // The highest priority thread's context is already locked.
447 // Check if we need scheduling. If we don't, we can retry directly.
448 if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
449 // If we do, another core is interfering, and we must start again.
450 goto retry;
451 }
452 expected = false;
453 }
454
455 // It's time to switch the thread.
456 // Switch to the highest priority thread.
457 SwitchThread(highest_priority_thread);
458
459 // Check if we need scheduling. If we do, then we can't complete the switch and should
460 // retry.
461 if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) {
462 // Our switch failed.
463 // We should unlock the thread context, and then retry.
464 highest_priority_thread->stack_parameters.m_lock.store(false,
465 std::memory_order_seq_cst);
466 goto retry;
467 } else {
468 break;
469 }
470 } 444 }
471 445
472 retry: 446 retry:
@@ -480,18 +454,35 @@ void KScheduler::ScheduleImplOffStack() {
480 } 454 }
481 455
482 // Reload the guest thread context. 456 // Reload the guest thread context.
483 { 457 Reload(highest_priority_thread);
484 auto& cpu_core = kernel.System().CurrentArmInterface();
485 cpu_core.LoadContext(highest_priority_thread->GetContext32());
486 cpu_core.LoadContext(highest_priority_thread->GetContext64());
487 cpu_core.SetTlsAddress(highest_priority_thread->GetTLSAddress());
488 cpu_core.SetTPIDR_EL0(highest_priority_thread->GetTPIDR_EL0());
489 cpu_core.LoadWatchpointArray(highest_priority_thread->GetOwnerProcess()->GetWatchpoints());
490 cpu_core.ClearExclusiveState();
491 }
492 458
493 // Reload the host thread. 459 // Reload the host thread.
494 Common::Fiber::YieldTo(m_idle_stack, *highest_priority_thread->host_context); 460 Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->host_context);
461}
462
463void KScheduler::Unload(KThread* thread) {
464 auto& cpu_core = kernel.System().ArmInterface(m_core_id);
465 cpu_core.SaveContext(thread->GetContext32());
466 cpu_core.SaveContext(thread->GetContext64());
467 // Save the TPIDR_EL0 system register in case it was modified.
468 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
469 cpu_core.ClearExclusiveState();
470
471 // Check if the thread is terminated by checking the DPC flags.
472 if ((thread->GetStackParameters().dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) {
473 // The thread isn't terminated, so we want to unlock it.
474 thread->context_guard.unlock();
475 }
476}
477
478void KScheduler::Reload(KThread* thread) {
479 auto& cpu_core = kernel.System().ArmInterface(m_core_id);
480 cpu_core.LoadContext(thread->GetContext32());
481 cpu_core.LoadContext(thread->GetContext64());
482 cpu_core.SetTlsAddress(thread->GetTLSAddress());
483 cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0());
484 cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints());
485 cpu_core.ClearExclusiveState();
495} 486}
496 487
497void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) { 488void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) {
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 8f4eebf6a..91e870933 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -41,8 +41,11 @@ public:
41 explicit KScheduler(KernelCore& kernel); 41 explicit KScheduler(KernelCore& kernel);
42 ~KScheduler(); 42 ~KScheduler();
43 43
44 void Initialize(KThread* idle_thread); 44 void Initialize(KThread* main_thread, KThread* idle_thread, s32 core_id);
45 void Activate(); 45 void Activate();
46 void OnThreadStart();
47 void Unload(KThread* thread);
48 void Reload(KThread* thread);
46 49
47 void SetInterruptTaskRunnable(); 50 void SetInterruptTaskRunnable();
48 void RequestScheduleOnInterrupt(); 51 void RequestScheduleOnInterrupt();
@@ -55,6 +58,14 @@ public:
55 return m_idle_thread; 58 return m_idle_thread;
56 } 59 }
57 60
61 bool IsIdle() const {
62 return m_current_thread.load() == m_idle_thread;
63 }
64
65 std::shared_ptr<Common::Fiber> GetSwitchFiber() {
66 return m_switch_fiber;
67 }
68
58 KThread* GetPreviousThread() const { 69 KThread* GetPreviousThread() const {
59 return m_state.prev_thread; 70 return m_state.prev_thread;
60 } 71 }
@@ -69,7 +80,7 @@ public:
69 80
70 // Static public API. 81 // Static public API.
71 static bool CanSchedule(KernelCore& kernel) { 82 static bool CanSchedule(KernelCore& kernel) {
72 return kernel.GetCurrentEmuThread()->GetDisableDispatchCount() == 0; 83 return GetCurrentThread(kernel).GetDisableDispatchCount() == 0;
73 } 84 }
74 static bool IsSchedulerLockedByCurrentThread(KernelCore& kernel) { 85 static bool IsSchedulerLockedByCurrentThread(KernelCore& kernel) {
75 return kernel.GlobalSchedulerContext().scheduler_lock.IsLockedByCurrentThread(); 86 return kernel.GlobalSchedulerContext().scheduler_lock.IsLockedByCurrentThread();
@@ -113,7 +124,7 @@ private:
113 124
114 // Instanced private API. 125 // Instanced private API.
115 void ScheduleImpl(); 126 void ScheduleImpl();
116 void ScheduleImplOffStack(); 127 void ScheduleImplFiber();
117 void SwitchThread(KThread* next_thread); 128 void SwitchThread(KThread* next_thread);
118 129
119 void Schedule(); 130 void Schedule();
@@ -147,9 +158,10 @@ private:
147 KThread* m_idle_thread{nullptr}; 158 KThread* m_idle_thread{nullptr};
148 std::atomic<KThread*> m_current_thread{nullptr}; 159 std::atomic<KThread*> m_current_thread{nullptr};
149 160
150 std::shared_ptr<Common::Fiber> m_idle_stack{}; 161 std::shared_ptr<Common::Fiber> m_switch_fiber{};
151 KThread* m_idle_cur_thread{}; 162 KThread* m_switch_cur_thread{};
152 KThread* m_idle_highest_priority_thread{}; 163 KThread* m_switch_highest_priority_thread{};
164 bool m_switch_from_schedule{};
153}; 165};
154 166
155class KScopedSchedulerLock : public KScopedLock<KScheduler::LockType> { 167class KScopedSchedulerLock : public KScopedLock<KScheduler::LockType> {
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 9daa589b5..d5d390f04 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -268,7 +268,7 @@ Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32
268 268
269Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { 269Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
270 return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, 270 return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
271 abort); 271 system.GetCpuManager().GetIdleThreadStartFunc());
272} 272}
273 273
274Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, 274Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
@@ -1204,8 +1204,9 @@ KScopedDisableDispatch::~KScopedDisableDispatch() {
1204 return; 1204 return;
1205 } 1205 }
1206 1206
1207 // Skip the reschedule if single-core, as dispatch tracking is disabled here. 1207 // Skip the reschedule if single-core.
1208 if (!Settings::values.use_multi_core.GetValue()) { 1208 if (!Settings::values.use_multi_core.GetValue()) {
1209 GetCurrentThread(kernel).EnableDispatch();
1209 return; 1210 return;
1210 } 1211 }
1211 1212
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index 416a861a9..1fc8f5f3e 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -439,7 +439,6 @@ public:
439 bool is_pinned; 439 bool is_pinned;
440 s32 disable_count; 440 s32 disable_count;
441 KThread* cur_thread; 441 KThread* cur_thread;
442 std::atomic<bool> m_lock;
443 }; 442 };
444 443
445 [[nodiscard]] StackParameters& GetStackParameters() { 444 [[nodiscard]] StackParameters& GetStackParameters() {
@@ -485,39 +484,16 @@ public:
485 return per_core_priority_queue_entry[core]; 484 return per_core_priority_queue_entry[core];
486 } 485 }
487 486
488 [[nodiscard]] bool IsKernelThread() const {
489 return GetActiveCore() == 3;
490 }
491
492 [[nodiscard]] bool IsDispatchTrackingDisabled() const {
493 return is_single_core || IsKernelThread();
494 }
495
496 [[nodiscard]] s32 GetDisableDispatchCount() const { 487 [[nodiscard]] s32 GetDisableDispatchCount() const {
497 if (IsDispatchTrackingDisabled()) {
498 // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
499 return 1;
500 }
501
502 return this->GetStackParameters().disable_count; 488 return this->GetStackParameters().disable_count;
503 } 489 }
504 490
505 void DisableDispatch() { 491 void DisableDispatch() {
506 if (IsDispatchTrackingDisabled()) {
507 // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
508 return;
509 }
510
511 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); 492 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
512 this->GetStackParameters().disable_count++; 493 this->GetStackParameters().disable_count++;
513 } 494 }
514 495
515 void EnableDispatch() { 496 void EnableDispatch() {
516 if (IsDispatchTrackingDisabled()) {
517 // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
518 return;
519 }
520
521 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); 497 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
522 this->GetStackParameters().disable_count--; 498 this->GetStackParameters().disable_count--;
523 } 499 }
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 10e1f47f6..926c6dc84 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -64,8 +64,6 @@ struct KernelCore::Impl {
64 64
65 is_phantom_mode_for_singlecore = false; 65 is_phantom_mode_for_singlecore = false;
66 66
67 InitializePhysicalCores();
68
69 // Derive the initial memory layout from the emulated board 67 // Derive the initial memory layout from the emulated board
70 Init::InitializeSlabResourceCounts(kernel); 68 Init::InitializeSlabResourceCounts(kernel);
71 DeriveInitialMemoryLayout(); 69 DeriveInitialMemoryLayout();
@@ -77,6 +75,7 @@ struct KernelCore::Impl {
77 Init::InitializeKPageBufferSlabHeap(system); 75 Init::InitializeKPageBufferSlabHeap(system);
78 InitializeShutdownThreads(); 76 InitializeShutdownThreads();
79 InitializePreemption(kernel); 77 InitializePreemption(kernel);
78 InitializePhysicalCores();
80 79
81 RegisterHostThread(); 80 RegisterHostThread();
82 } 81 }
@@ -193,8 +192,21 @@ struct KernelCore::Impl {
193 exclusive_monitor = 192 exclusive_monitor =
194 Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES); 193 Core::MakeExclusiveMonitor(system.Memory(), Core::Hardware::NUM_CPU_CORES);
195 for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 194 for (u32 i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
195 const s32 core{static_cast<s32>(i)};
196
196 schedulers[i] = std::make_unique<Kernel::KScheduler>(system.Kernel()); 197 schedulers[i] = std::make_unique<Kernel::KScheduler>(system.Kernel());
197 cores.emplace_back(i, system, *schedulers[i], interrupts); 198 cores.emplace_back(i, system, *schedulers[i], interrupts);
199
200 auto* main_thread{Kernel::KThread::Create(system.Kernel())};
201 main_thread->SetName(fmt::format("MainThread:{}", core));
202 main_thread->SetCurrentCore(core);
203 ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, core).IsSuccess());
204
205 auto* idle_thread{Kernel::KThread::Create(system.Kernel())};
206 idle_thread->SetCurrentCore(core);
207 ASSERT(Kernel::KThread::InitializeIdleThread(system, idle_thread, core).IsSuccess());
208
209 schedulers[i]->Initialize(main_thread, idle_thread, core);
198 } 210 }
199 } 211 }
200 212
@@ -1093,10 +1105,11 @@ void KernelCore::Suspend(bool suspended) {
1093} 1105}
1094 1106
1095void KernelCore::ShutdownCores() { 1107void KernelCore::ShutdownCores() {
1108 KScopedSchedulerLock lk{*this};
1109
1096 for (auto* thread : impl->shutdown_threads) { 1110 for (auto* thread : impl->shutdown_threads) {
1097 void(thread->Run()); 1111 void(thread->Run());
1098 } 1112 }
1099 InterruptAllPhysicalCores();
1100} 1113}
1101 1114
1102bool KernelCore::IsMulticore() const { 1115bool KernelCore::IsMulticore() const {
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index a5b16ae2e..6e7dacf97 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -43,6 +43,7 @@ void PhysicalCore::Initialize([[maybe_unused]] bool is_64_bit) {
43 43
44void PhysicalCore::Run() { 44void PhysicalCore::Run() {
45 arm_interface->Run(); 45 arm_interface->Run();
46 arm_interface->ClearExclusiveState();
46} 47}
47 48
48void PhysicalCore::Idle() { 49void PhysicalCore::Idle() {