diff options
Diffstat (limited to 'src/core/hle/kernel/thread.h')
| -rw-r--r-- | src/core/hle/kernel/thread.h | 107 |
1 files changed, 80 insertions, 27 deletions
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index 5192ecff1..f1aa358a4 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h | |||
| @@ -28,10 +28,10 @@ class System; | |||
| 28 | 28 | ||
| 29 | namespace Kernel { | 29 | namespace Kernel { |
| 30 | 30 | ||
| 31 | class GlobalScheduler; | 31 | class GlobalSchedulerContext; |
| 32 | class KernelCore; | 32 | class KernelCore; |
| 33 | class Process; | 33 | class Process; |
| 34 | class Scheduler; | 34 | class KScheduler; |
| 35 | 35 | ||
| 36 | enum ThreadPriority : u32 { | 36 | enum ThreadPriority : u32 { |
| 37 | THREADPRIO_HIGHEST = 0, ///< Highest thread priority | 37 | THREADPRIO_HIGHEST = 0, ///< Highest thread priority |
| @@ -346,8 +346,11 @@ public: | |||
| 346 | 346 | ||
| 347 | void SetStatus(ThreadStatus new_status); | 347 | void SetStatus(ThreadStatus new_status); |
| 348 | 348 | ||
| 349 | u64 GetLastRunningTicks() const { | 349 | constexpr s64 GetLastScheduledTick() const { |
| 350 | return last_running_ticks; | 350 | return this->last_scheduled_tick; |
| 351 | } | ||
| 352 | constexpr void SetLastScheduledTick(s64 tick) { | ||
| 353 | this->last_scheduled_tick = tick; | ||
| 351 | } | 354 | } |
| 352 | 355 | ||
| 353 | u64 GetTotalCPUTimeTicks() const { | 356 | u64 GetTotalCPUTimeTicks() const { |
| @@ -362,10 +365,18 @@ public: | |||
| 362 | return processor_id; | 365 | return processor_id; |
| 363 | } | 366 | } |
| 364 | 367 | ||
| 368 | s32 GetActiveCore() const { | ||
| 369 | return GetProcessorID(); | ||
| 370 | } | ||
| 371 | |||
| 365 | void SetProcessorID(s32 new_core) { | 372 | void SetProcessorID(s32 new_core) { |
| 366 | processor_id = new_core; | 373 | processor_id = new_core; |
| 367 | } | 374 | } |
| 368 | 375 | ||
| 376 | void SetActiveCore(s32 new_core) { | ||
| 377 | processor_id = new_core; | ||
| 378 | } | ||
| 379 | |||
| 369 | Process* GetOwnerProcess() { | 380 | Process* GetOwnerProcess() { |
| 370 | return owner_process; | 381 | return owner_process; |
| 371 | } | 382 | } |
| @@ -479,21 +490,11 @@ public: | |||
| 479 | /// Sleeps this thread for the given amount of nanoseconds. | 490 | /// Sleeps this thread for the given amount of nanoseconds. |
| 480 | ResultCode Sleep(s64 nanoseconds); | 491 | ResultCode Sleep(s64 nanoseconds); |
| 481 | 492 | ||
| 482 | /// Yields this thread without rebalancing loads. | 493 | constexpr s64 GetYieldScheduleCount() const { |
| 483 | std::pair<ResultCode, bool> YieldSimple(); | 494 | return this->schedule_count; |
| 484 | |||
| 485 | /// Yields this thread and does a load rebalancing. | ||
| 486 | std::pair<ResultCode, bool> YieldAndBalanceLoad(); | ||
| 487 | |||
| 488 | /// Yields this thread and if the core is left idle, loads are rebalanced | ||
| 489 | std::pair<ResultCode, bool> YieldAndWaitForLoadBalancing(); | ||
| 490 | |||
| 491 | void IncrementYieldCount() { | ||
| 492 | yield_count++; | ||
| 493 | } | 495 | } |
| 494 | 496 | constexpr void SetYieldScheduleCount(s64 count) { | |
| 495 | u64 GetYieldCount() const { | 497 | this->schedule_count = count; |
| 496 | return yield_count; | ||
| 497 | } | 498 | } |
| 498 | 499 | ||
| 499 | ThreadSchedStatus GetSchedulingStatus() const { | 500 | ThreadSchedStatus GetSchedulingStatus() const { |
| @@ -569,9 +570,62 @@ public: | |||
| 569 | return has_exited; | 570 | return has_exited; |
| 570 | } | 571 | } |
| 571 | 572 | ||
| 573 | struct QueueEntry { | ||
| 574 | private: | ||
| 575 | Thread* prev; | ||
| 576 | Thread* next; | ||
| 577 | |||
| 578 | public: | ||
| 579 | constexpr QueueEntry() : prev(nullptr), next(nullptr) { /* ... */ | ||
| 580 | } | ||
| 581 | |||
| 582 | constexpr void Initialize() { | ||
| 583 | this->prev = nullptr; | ||
| 584 | this->next = nullptr; | ||
| 585 | } | ||
| 586 | |||
| 587 | constexpr Thread* GetPrev() const { | ||
| 588 | return this->prev; | ||
| 589 | } | ||
| 590 | constexpr Thread* GetNext() const { | ||
| 591 | return this->next; | ||
| 592 | } | ||
| 593 | constexpr void SetPrev(Thread* t) { | ||
| 594 | this->prev = t; | ||
| 595 | } | ||
| 596 | constexpr void SetNext(Thread* t) { | ||
| 597 | this->next = t; | ||
| 598 | } | ||
| 599 | }; | ||
| 600 | |||
| 601 | constexpr QueueEntry& GetPriorityQueueEntry(s32 core) { | ||
| 602 | return this->per_core_priority_queue_entry[core]; | ||
| 603 | } | ||
| 604 | constexpr const QueueEntry& GetPriorityQueueEntry(s32 core) const { | ||
| 605 | return this->per_core_priority_queue_entry[core]; | ||
| 606 | } | ||
| 607 | |||
| 608 | s32 GetDisableDispatchCount() const { | ||
| 609 | return disable_count; | ||
| 610 | } | ||
| 611 | |||
| 612 | void DisableDispatch() { | ||
| 613 | ASSERT(GetDisableDispatchCount() >= 0); | ||
| 614 | disable_count++; | ||
| 615 | } | ||
| 616 | |||
| 617 | void EnableDispatch() { | ||
| 618 | ASSERT(GetDisableDispatchCount() > 0); | ||
| 619 | disable_count--; | ||
| 620 | } | ||
| 621 | |||
| 622 | ThreadStatus status = ThreadStatus::Dormant; | ||
| 623 | u32 scheduling_state = 0; | ||
| 624 | |||
| 572 | private: | 625 | private: |
| 573 | friend class GlobalScheduler; | 626 | friend class GlobalSchedulerContext; |
| 574 | friend class Scheduler; | 627 | friend class KScheduler; |
| 628 | friend class Process; | ||
| 575 | 629 | ||
| 576 | void SetSchedulingStatus(ThreadSchedStatus new_status); | 630 | void SetSchedulingStatus(ThreadSchedStatus new_status); |
| 577 | void AddSchedulingFlag(ThreadSchedFlags flag); | 631 | void AddSchedulingFlag(ThreadSchedFlags flag); |
| @@ -586,10 +640,9 @@ private: | |||
| 586 | 640 | ||
| 587 | u64 thread_id = 0; | 641 | u64 thread_id = 0; |
| 588 | 642 | ||
| 589 | ThreadStatus status = ThreadStatus::Dormant; | ||
| 590 | |||
| 591 | VAddr entry_point = 0; | 643 | VAddr entry_point = 0; |
| 592 | VAddr stack_top = 0; | 644 | VAddr stack_top = 0; |
| 645 | std::atomic_int disable_count = 0; | ||
| 593 | 646 | ||
| 594 | ThreadType type; | 647 | ThreadType type; |
| 595 | 648 | ||
| @@ -603,9 +656,8 @@ private: | |||
| 603 | u32 current_priority = 0; | 656 | u32 current_priority = 0; |
| 604 | 657 | ||
| 605 | u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. | 658 | u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. |
| 606 | u64 last_running_ticks = 0; ///< CPU tick when thread was last running | 659 | s64 schedule_count{}; |
| 607 | u64 yield_count = 0; ///< Number of redundant yields carried by this thread. | 660 | s64 last_scheduled_tick{}; |
| 608 | ///< a redundant yield is one where no scheduling is changed | ||
| 609 | 661 | ||
| 610 | s32 processor_id = 0; | 662 | s32 processor_id = 0; |
| 611 | 663 | ||
| @@ -647,7 +699,9 @@ private: | |||
| 647 | Handle hle_time_event; | 699 | Handle hle_time_event; |
| 648 | SynchronizationObject* hle_object; | 700 | SynchronizationObject* hle_object; |
| 649 | 701 | ||
| 650 | Scheduler* scheduler = nullptr; | 702 | KScheduler* scheduler = nullptr; |
| 703 | |||
| 704 | QueueEntry per_core_priority_queue_entry[Core::Hardware::NUM_CPU_CORES]{}; | ||
| 651 | 705 | ||
| 652 | u32 ideal_core{0xFFFFFFFF}; | 706 | u32 ideal_core{0xFFFFFFFF}; |
| 653 | KAffinityMask affinity_mask{}; | 707 | KAffinityMask affinity_mask{}; |
| @@ -655,7 +709,6 @@ private: | |||
| 655 | s32 ideal_core_override = -1; | 709 | s32 ideal_core_override = -1; |
| 656 | u32 affinity_override_count = 0; | 710 | u32 affinity_override_count = 0; |
| 657 | 711 | ||
| 658 | u32 scheduling_state = 0; | ||
| 659 | u32 pausing_state = 0; | 712 | u32 pausing_state = 0; |
| 660 | bool is_running = false; | 713 | bool is_running = false; |
| 661 | bool is_waiting_on_sync = false; | 714 | bool is_waiting_on_sync = false; |