summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
authorGravatar bunnei2021-11-10 22:46:07 -0800
committerGravatar bunnei2021-12-06 16:39:17 -0800
commitf62c7091a2bb0c70211b7424c4285906f5dccf4b (patch)
tree91e3b6171b8e59f74cfe7fcfa01ea6b2b9fd9d3b /src/core/hle/kernel
parenthle: kernel: KThread: Migrate to updated KThreadQueue (part 1). (diff)
downloadyuzu-f62c7091a2bb0c70211b7424c4285906f5dccf4b.tar.gz
yuzu-f62c7091a2bb0c70211b7424c4285906f5dccf4b.tar.xz
yuzu-f62c7091a2bb0c70211b7424c4285906f5dccf4b.zip
hle: kernel: KThread: Migrate to updated KThreadQueue (part 2).
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/k_thread.cpp48
1 files changed, 19 insertions, 29 deletions
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 995f0ca50..7ef52a240 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -504,30 +504,33 @@ ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_m
504 504
505 return ResultSuccess; 505 return ResultSuccess;
506} 506}
507ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) { 507
508ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
508 ASSERT(parent != nullptr); 509 ASSERT(parent != nullptr);
509 ASSERT(v_affinity_mask != 0); 510 ASSERT(v_affinity_mask != 0);
510 KScopedLightLock lk{activity_pause_lock}; 511 KScopedLightLock lk(activity_pause_lock);
511 512
512 // Set the core mask. 513 // Set the core mask.
513 u64 p_affinity_mask = 0; 514 u64 p_affinity_mask = 0;
514 { 515 {
515 KScopedSchedulerLock sl{kernel}; 516 KScopedSchedulerLock sl(kernel);
516 ASSERT(num_core_migration_disables >= 0); 517 ASSERT(num_core_migration_disables >= 0);
517 518
518 // If the core id is no-update magic, preserve the ideal core id. 519 // If we're updating, set our ideal virtual core.
519 if (cpu_core_id == Svc::IdealCoreNoUpdate) { 520 if (core_id_ != Svc::IdealCoreNoUpdate) {
520 cpu_core_id = virtual_ideal_core_id; 521 virtual_ideal_core_id = core_id_;
521 R_UNLESS(((1ULL << cpu_core_id) & v_affinity_mask) != 0, ResultInvalidCombination); 522 } else {
523 // Preserve our ideal core id.
524 core_id_ = virtual_ideal_core_id;
525 R_UNLESS(((1ULL << core_id_) & v_affinity_mask) != 0, ResultInvalidCombination);
522 } 526 }
523 527
524 // Set the virtual core/affinity mask. 528 // Set our affinity mask.
525 virtual_ideal_core_id = cpu_core_id;
526 virtual_affinity_mask = v_affinity_mask; 529 virtual_affinity_mask = v_affinity_mask;
527 530
528 // Translate the virtual core to a physical core. 531 // Translate the virtual core to a physical core.
529 if (cpu_core_id >= 0) { 532 if (core_id_ >= 0) {
530 cpu_core_id = Core::Hardware::VirtualToPhysicalCoreMap[cpu_core_id]; 533 core_id_ = Core::Hardware::VirtualToPhysicalCoreMap[core_id_];
531 } 534 }
532 535
533 // Translate the virtual affinity mask to a physical one. 536 // Translate the virtual affinity mask to a physical one.
@@ -542,7 +545,7 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
542 const KAffinityMask old_mask = physical_affinity_mask; 545 const KAffinityMask old_mask = physical_affinity_mask;
543 546
544 // Set our new ideals. 547 // Set our new ideals.
545 physical_ideal_core_id = cpu_core_id; 548 physical_ideal_core_id = core_id_;
546 physical_affinity_mask.SetAffinityMask(p_affinity_mask); 549 physical_affinity_mask.SetAffinityMask(p_affinity_mask);
547 550
548 if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { 551 if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
@@ -560,18 +563,18 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
560 } 563 }
561 } else { 564 } else {
562 // Otherwise, we edit the original affinity for restoration later. 565 // Otherwise, we edit the original affinity for restoration later.
563 original_physical_ideal_core_id = cpu_core_id; 566 original_physical_ideal_core_id = core_id_;
564 original_physical_affinity_mask.SetAffinityMask(p_affinity_mask); 567 original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
565 } 568 }
566 } 569 }
567 570
568 // Update the pinned waiter list. 571 // Update the pinned waiter list.
572 ThreadQueueImplForKThreadSetProperty wait_queue(kernel, std::addressof(pinned_waiter_list));
569 { 573 {
570 bool retry_update{}; 574 bool retry_update{};
571 bool thread_is_pinned{};
572 do { 575 do {
573 // Lock the scheduler. 576 // Lock the scheduler.
574 KScopedSchedulerLock sl{kernel}; 577 KScopedSchedulerLock sl(kernel);
575 578
576 // Don't do any further management if our termination has been requested. 579 // Don't do any further management if our termination has been requested.
577 R_SUCCEED_IF(IsTerminationRequested()); 580 R_SUCCEED_IF(IsTerminationRequested());
@@ -599,12 +602,9 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
599 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), 602 R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
600 ResultTerminationRequested); 603 ResultTerminationRequested);
601 604
602 // Note that the thread was pinned.
603 thread_is_pinned = true;
604
605 // Wait until the thread isn't pinned any more. 605 // Wait until the thread isn't pinned any more.
606 pinned_waiter_list.push_back(GetCurrentThread(kernel)); 606 pinned_waiter_list.push_back(GetCurrentThread(kernel));
607 GetCurrentThread(kernel).SetState(ThreadState::Waiting); 607 GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue));
608 } else { 608 } else {
609 // If the thread isn't pinned, release the scheduler lock and retry until it's 609 // If the thread isn't pinned, release the scheduler lock and retry until it's
610 // not current. 610 // not current.
@@ -612,16 +612,6 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
612 } 612 }
613 } 613 }
614 } while (retry_update); 614 } while (retry_update);
615
616 // If the thread was pinned, it no longer is, and we should remove the current thread from
617 // our waiter list.
618 if (thread_is_pinned) {
619 // Lock the scheduler.
620 KScopedSchedulerLock sl{kernel};
621
622 // Remove from the list.
623 pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
624 }
625 } 615 }
626 616
627 return ResultSuccess; 617 return ResultSuccess;