diff options
| author | 2020-03-06 14:56:05 -0400 | |
|---|---|---|
| committer | 2020-06-27 11:35:29 -0400 | |
| commit | a33fbaddec5d516328d7cd179114dcf0b93cfb69 (patch) | |
| tree | 3c4a4ffaae67165e5d4fc380c3f6f899d73f8dac /src | |
| parent | Scheduler: Release old thread fiber before trying to switch to the next threa... (diff) | |
| download | yuzu-a33fbaddec5d516328d7cd179114dcf0b93cfb69.tar.gz yuzu-a33fbaddec5d516328d7cd179114dcf0b93cfb69.tar.xz yuzu-a33fbaddec5d516328d7cd179114dcf0b93cfb69.zip | |
Core: Correct rebase.
Diffstat (limited to 'src')
| -rw-r--r-- | src/core/arm/dynarmic/arm_dynarmic_32.cpp | 18 | ||||
| -rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 11 |
2 files changed, 11 insertions, 18 deletions
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index 0b7aa6a69..30bf62ac1 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include <dynarmic/A32/config.h> | 8 | #include <dynarmic/A32/config.h> |
| 9 | #include <dynarmic/A32/context.h> | 9 | #include <dynarmic/A32/context.h> |
| 10 | #include "common/microprofile.h" | 10 | #include "common/microprofile.h" |
| 11 | #include "core/arm/cpu_interrupt_handler.h" | ||
| 11 | #include "core/arm/dynarmic/arm_dynarmic_32.h" | 12 | #include "core/arm/dynarmic/arm_dynarmic_32.h" |
| 12 | #include "core/arm/dynarmic/arm_dynarmic_64.h" | 13 | #include "core/arm/dynarmic/arm_dynarmic_64.h" |
| 13 | #include "core/arm/dynarmic/arm_dynarmic_cp15.h" | 14 | #include "core/arm/dynarmic/arm_dynarmic_cp15.h" |
| @@ -72,20 +73,13 @@ public: | |||
| 72 | } | 73 | } |
| 73 | 74 | ||
| 74 | void AddTicks(u64 ticks) override { | 75 | void AddTicks(u64 ticks) override { |
| 75 | // Divide the number of ticks by the amount of CPU cores. TODO(Subv): This yields only a | 76 | /// We are using host timing, NOP |
| 76 | // rough approximation of the amount of executed ticks in the system, it may be thrown off | ||
| 77 | // if not all cores are doing a similar amount of work. Instead of doing this, we should | ||
| 78 | // device a way so that timing is consistent across all cores without increasing the ticks 4 | ||
| 79 | // times. | ||
| 80 | u64 amortized_ticks = (ticks - num_interpreted_instructions) / Core::NUM_CPU_CORES; | ||
| 81 | // Always execute at least one tick. | ||
| 82 | amortized_ticks = std::max<u64>(amortized_ticks, 1); | ||
| 83 | |||
| 84 | parent.system.CoreTiming().AddTicks(amortized_ticks); | ||
| 85 | num_interpreted_instructions = 0; | ||
| 86 | } | 77 | } |
| 87 | u64 GetTicksRemaining() override { | 78 | u64 GetTicksRemaining() override { |
| 88 | return std::max(parent.system.CoreTiming().GetDowncount(), {}); | 79 | if (!parent.interrupt_handler.IsInterrupted()) { |
| 80 | return 1000ULL; | ||
| 81 | } | ||
| 82 | return 0ULL; | ||
| 89 | } | 83 | } |
| 90 | 84 | ||
| 91 | ARM_Dynarmic_32& parent; | 85 | ARM_Dynarmic_32& parent; |
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index aa1f1a305..ae89e908f 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -552,8 +552,7 @@ void GlobalScheduler::Unlock() { | |||
| 552 | EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread); | 552 | EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread); |
| 553 | } | 553 | } |
| 554 | 554 | ||
| 555 | Scheduler::Scheduler(Core::System& system, std::size_t core_id) | 555 | Scheduler::Scheduler(Core::System& system, std::size_t core_id) : system(system), core_id(core_id) { |
| 556 | : system(system), core_id(core_id) { | ||
| 557 | switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this); | 556 | switch_fiber = std::make_shared<Common::Fiber>(std::function<void(void*)>(OnSwitch), this); |
| 558 | } | 557 | } |
| 559 | 558 | ||
| @@ -601,9 +600,10 @@ void Scheduler::SwitchContextStep2() { | |||
| 601 | 600 | ||
| 602 | // Load context of new thread | 601 | // Load context of new thread |
| 603 | Process* const previous_process = | 602 | Process* const previous_process = |
| 604 | previous_thread != nullptr ? previous_thread->GetOwnerProcess() : nullptr; | 603 | previous_thread != nullptr ? previous_thread->GetOwnerProcess() : nullptr; |
| 605 | 604 | ||
| 606 | if (new_thread) { | 605 | if (new_thread) { |
| 606 | auto& cpu_core = system.ArmInterface(core_id); | ||
| 607 | new_thread->context_guard.lock(); | 607 | new_thread->context_guard.lock(); |
| 608 | cpu_core.Lock(); | 608 | cpu_core.Lock(); |
| 609 | ASSERT_MSG(new_thread->GetProcessorID() == s32(this->core_id), | 609 | ASSERT_MSG(new_thread->GetProcessorID() == s32(this->core_id), |
| @@ -619,7 +619,6 @@ void Scheduler::SwitchContextStep2() { | |||
| 619 | system.Kernel().MakeCurrentProcess(thread_owner_process); | 619 | system.Kernel().MakeCurrentProcess(thread_owner_process); |
| 620 | } | 620 | } |
| 621 | if (!new_thread->IsHLEThread()) { | 621 | if (!new_thread->IsHLEThread()) { |
| 622 | auto& cpu_core = system.ArmInterface(core_id); | ||
| 623 | cpu_core.LoadContext(new_thread->GetContext32()); | 622 | cpu_core.LoadContext(new_thread->GetContext32()); |
| 624 | cpu_core.LoadContext(new_thread->GetContext64()); | 623 | cpu_core.LoadContext(new_thread->GetContext64()); |
| 625 | cpu_core.SetTlsAddress(new_thread->GetTLSAddress()); | 624 | cpu_core.SetTlsAddress(new_thread->GetTLSAddress()); |
| @@ -651,12 +650,12 @@ void Scheduler::SwitchContext() { | |||
| 651 | 650 | ||
| 652 | // Save context for previous thread | 651 | // Save context for previous thread |
| 653 | if (previous_thread) { | 652 | if (previous_thread) { |
| 653 | auto& cpu_core = system.ArmInterface(core_id); | ||
| 654 | if (!previous_thread->IsHLEThread()) { | 654 | if (!previous_thread->IsHLEThread()) { |
| 655 | auto& cpu_core = system.ArmInterface(core_id); | ||
| 656 | cpu_core.SaveContext(previous_thread->GetContext32()); | 655 | cpu_core.SaveContext(previous_thread->GetContext32()); |
| 657 | cpu_core.SaveContext(previous_thread->GetContext64()); | 656 | cpu_core.SaveContext(previous_thread->GetContext64()); |
| 658 | // Save the TPIDR_EL0 system register in case it was modified. | 657 | // Save the TPIDR_EL0 system register in case it was modified. |
| 659 | previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); | 658 | previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); |
| 660 | cpu_core.ClearExclusiveState(); | 659 | cpu_core.ClearExclusiveState(); |
| 661 | } | 660 | } |
| 662 | if (previous_thread->GetStatus() == ThreadStatus::Running) { | 661 | if (previous_thread->GetStatus() == ThreadStatus::Running) { |