diff options
| author | 2020-02-11 19:56:24 -0400 | |
|---|---|---|
| committer | 2020-02-11 20:19:11 -0400 | |
| commit | 1e6f8aba04b7be0f90b97aed2527558c755935d6 (patch) | |
| tree | dea6dc5efd324eb6bc8667a114e20c6e91a28195 /src/core/hle/kernel/thread.cpp | |
| parent | Kernel: Refactor synchronization to better match RE (diff) | |
| download | yuzu-1e6f8aba04b7be0f90b97aed2527558c755935d6.tar.gz yuzu-1e6f8aba04b7be0f90b97aed2527558c755935d6.tar.xz yuzu-1e6f8aba04b7be0f90b97aed2527558c755935d6.zip | |
Core: Set all hardware emulation constants in a single file.
Diffstat (limited to 'src/core/hle/kernel/thread.cpp')
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index ee9ea7d67..43b30dd3d 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include "core/core.h" | 15 | #include "core/core.h" |
| 16 | #include "core/core_timing.h" | 16 | #include "core/core_timing.h" |
| 17 | #include "core/core_timing_util.h" | 17 | #include "core/core_timing_util.h" |
| 18 | #include "core/hardware_properties.h" | ||
| 18 | #include "core/hle/kernel/errors.h" | 19 | #include "core/hle/kernel/errors.h" |
| 19 | #include "core/hle/kernel/handle_table.h" | 20 | #include "core/hle/kernel/handle_table.h" |
| 20 | #include "core/hle/kernel/kernel.h" | 21 | #include "core/hle/kernel/kernel.h" |
| @@ -431,7 +432,7 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { | |||
| 431 | const s32 old_core = processor_id; | 432 | const s32 old_core = processor_id; |
| 432 | if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { | 433 | if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { |
| 433 | if (static_cast<s32>(ideal_core) < 0) { | 434 | if (static_cast<s32>(ideal_core) < 0) { |
| 434 | processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES); | 435 | processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES); |
| 435 | } else { | 436 | } else { |
| 436 | processor_id = ideal_core; | 437 | processor_id = ideal_core; |
| 437 | } | 438 | } |
| @@ -455,7 +456,7 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) { | |||
| 455 | scheduler.Unschedule(current_priority, static_cast<u32>(processor_id), this); | 456 | scheduler.Unschedule(current_priority, static_cast<u32>(processor_id), this); |
| 456 | } | 457 | } |
| 457 | 458 | ||
| 458 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 459 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 459 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { | 460 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { |
| 460 | scheduler.Unsuggest(current_priority, core, this); | 461 | scheduler.Unsuggest(current_priority, core, this); |
| 461 | } | 462 | } |
| @@ -466,7 +467,7 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) { | |||
| 466 | scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this); | 467 | scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this); |
| 467 | } | 468 | } |
| 468 | 469 | ||
| 469 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 470 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 470 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { | 471 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { |
| 471 | scheduler.Suggest(current_priority, core, this); | 472 | scheduler.Suggest(current_priority, core, this); |
| 472 | } | 473 | } |
| @@ -485,7 +486,7 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) { | |||
| 485 | scheduler.Unschedule(old_priority, static_cast<u32>(processor_id), this); | 486 | scheduler.Unschedule(old_priority, static_cast<u32>(processor_id), this); |
| 486 | } | 487 | } |
| 487 | 488 | ||
| 488 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 489 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 489 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { | 490 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { |
| 490 | scheduler.Unsuggest(old_priority, core, this); | 491 | scheduler.Unsuggest(old_priority, core, this); |
| 491 | } | 492 | } |
| @@ -502,7 +503,7 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) { | |||
| 502 | } | 503 | } |
| 503 | } | 504 | } |
| 504 | 505 | ||
| 505 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 506 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 506 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { | 507 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { |
| 507 | scheduler.Suggest(current_priority, core, this); | 508 | scheduler.Suggest(current_priority, core, this); |
| 508 | } | 509 | } |
| @@ -518,7 +519,7 @@ void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) { | |||
| 518 | return; | 519 | return; |
| 519 | } | 520 | } |
| 520 | 521 | ||
| 521 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 522 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 522 | if (((old_affinity_mask >> core) & 1) != 0) { | 523 | if (((old_affinity_mask >> core) & 1) != 0) { |
| 523 | if (core == static_cast<u32>(old_core)) { | 524 | if (core == static_cast<u32>(old_core)) { |
| 524 | scheduler.Unschedule(current_priority, core, this); | 525 | scheduler.Unschedule(current_priority, core, this); |
| @@ -528,7 +529,7 @@ void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) { | |||
| 528 | } | 529 | } |
| 529 | } | 530 | } |
| 530 | 531 | ||
| 531 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 532 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 532 | if (((affinity_mask >> core) & 1) != 0) { | 533 | if (((affinity_mask >> core) & 1) != 0) { |
| 533 | if (core == static_cast<u32>(processor_id)) { | 534 | if (core == static_cast<u32>(processor_id)) { |
| 534 | scheduler.Schedule(current_priority, core, this); | 535 | scheduler.Schedule(current_priority, core, this); |