diff options
Diffstat (limited to 'src/core/hle/kernel/thread.cpp')
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 37 |
1 files changed, 22 insertions, 15 deletions
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index ad464e03b..ae5f2c8bd 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include "core/core.h" | 15 | #include "core/core.h" |
| 16 | #include "core/core_timing.h" | 16 | #include "core/core_timing.h" |
| 17 | #include "core/core_timing_util.h" | 17 | #include "core/core_timing_util.h" |
| 18 | #include "core/hardware_properties.h" | ||
| 18 | #include "core/hle/kernel/errors.h" | 19 | #include "core/hle/kernel/errors.h" |
| 19 | #include "core/hle/kernel/handle_table.h" | 20 | #include "core/hle/kernel/handle_table.h" |
| 20 | #include "core/hle/kernel/kernel.h" | 21 | #include "core/hle/kernel/kernel.h" |
| @@ -31,11 +32,15 @@ bool Thread::ShouldWait(const Thread* thread) const { | |||
| 31 | return status != ThreadStatus::Dead; | 32 | return status != ThreadStatus::Dead; |
| 32 | } | 33 | } |
| 33 | 34 | ||
| 35 | bool Thread::IsSignaled() const { | ||
| 36 | return status == ThreadStatus::Dead; | ||
| 37 | } | ||
| 38 | |||
| 34 | void Thread::Acquire(Thread* thread) { | 39 | void Thread::Acquire(Thread* thread) { |
| 35 | ASSERT_MSG(!ShouldWait(thread), "object unavailable!"); | 40 | ASSERT_MSG(!ShouldWait(thread), "object unavailable!"); |
| 36 | } | 41 | } |
| 37 | 42 | ||
| 38 | Thread::Thread(KernelCore& kernel) : WaitObject{kernel} {} | 43 | Thread::Thread(KernelCore& kernel) : SynchronizationObject{kernel} {} |
| 39 | Thread::~Thread() = default; | 44 | Thread::~Thread() = default; |
| 40 | 45 | ||
| 41 | void Thread::Stop() { | 46 | void Thread::Stop() { |
| @@ -45,7 +50,7 @@ void Thread::Stop() { | |||
| 45 | kernel.ThreadWakeupCallbackHandleTable().Close(callback_handle); | 50 | kernel.ThreadWakeupCallbackHandleTable().Close(callback_handle); |
| 46 | callback_handle = 0; | 51 | callback_handle = 0; |
| 47 | SetStatus(ThreadStatus::Dead); | 52 | SetStatus(ThreadStatus::Dead); |
| 48 | WakeupAllWaitingThreads(); | 53 | Signal(); |
| 49 | 54 | ||
| 50 | // Clean up any dangling references in objects that this thread was waiting for | 55 | // Clean up any dangling references in objects that this thread was waiting for |
| 51 | for (auto& wait_object : wait_objects) { | 56 | for (auto& wait_object : wait_objects) { |
| @@ -215,7 +220,7 @@ void Thread::SetWaitSynchronizationOutput(s32 output) { | |||
| 215 | context.cpu_registers[1] = output; | 220 | context.cpu_registers[1] = output; |
| 216 | } | 221 | } |
| 217 | 222 | ||
| 218 | s32 Thread::GetWaitObjectIndex(std::shared_ptr<WaitObject> object) const { | 223 | s32 Thread::GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const { |
| 219 | ASSERT_MSG(!wait_objects.empty(), "Thread is not waiting for anything"); | 224 | ASSERT_MSG(!wait_objects.empty(), "Thread is not waiting for anything"); |
| 220 | const auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object); | 225 | const auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object); |
| 221 | return static_cast<s32>(std::distance(match, wait_objects.rend()) - 1); | 226 | return static_cast<s32>(std::distance(match, wait_objects.rend()) - 1); |
| @@ -336,14 +341,16 @@ void Thread::ChangeCore(u32 core, u64 mask) { | |||
| 336 | SetCoreAndAffinityMask(core, mask); | 341 | SetCoreAndAffinityMask(core, mask); |
| 337 | } | 342 | } |
| 338 | 343 | ||
| 339 | bool Thread::AllWaitObjectsReady() const { | 344 | bool Thread::AllSynchronizationObjectsReady() const { |
| 340 | return std::none_of( | 345 | return std::none_of(wait_objects.begin(), wait_objects.end(), |
| 341 | wait_objects.begin(), wait_objects.end(), | 346 | [this](const std::shared_ptr<SynchronizationObject>& object) { |
| 342 | [this](const std::shared_ptr<WaitObject>& object) { return object->ShouldWait(this); }); | 347 | return object->ShouldWait(this); |
| 348 | }); | ||
| 343 | } | 349 | } |
| 344 | 350 | ||
| 345 | bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread, | 351 | bool Thread::InvokeWakeupCallback(ThreadWakeupReason reason, std::shared_ptr<Thread> thread, |
| 346 | std::shared_ptr<WaitObject> object, std::size_t index) { | 352 | std::shared_ptr<SynchronizationObject> object, |
| 353 | std::size_t index) { | ||
| 347 | ASSERT(wakeup_callback); | 354 | ASSERT(wakeup_callback); |
| 348 | return wakeup_callback(reason, std::move(thread), std::move(object), index); | 355 | return wakeup_callback(reason, std::move(thread), std::move(object), index); |
| 349 | } | 356 | } |
| @@ -425,7 +432,7 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { | |||
| 425 | const s32 old_core = processor_id; | 432 | const s32 old_core = processor_id; |
| 426 | if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { | 433 | if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { |
| 427 | if (static_cast<s32>(ideal_core) < 0) { | 434 | if (static_cast<s32>(ideal_core) < 0) { |
| 428 | processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES); | 435 | processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES); |
| 429 | } else { | 436 | } else { |
| 430 | processor_id = ideal_core; | 437 | processor_id = ideal_core; |
| 431 | } | 438 | } |
| @@ -449,7 +456,7 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) { | |||
| 449 | scheduler.Unschedule(current_priority, static_cast<u32>(processor_id), this); | 456 | scheduler.Unschedule(current_priority, static_cast<u32>(processor_id), this); |
| 450 | } | 457 | } |
| 451 | 458 | ||
| 452 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 459 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 453 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { | 460 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { |
| 454 | scheduler.Unsuggest(current_priority, core, this); | 461 | scheduler.Unsuggest(current_priority, core, this); |
| 455 | } | 462 | } |
| @@ -460,7 +467,7 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) { | |||
| 460 | scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this); | 467 | scheduler.Schedule(current_priority, static_cast<u32>(processor_id), this); |
| 461 | } | 468 | } |
| 462 | 469 | ||
| 463 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 470 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 464 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { | 471 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { |
| 465 | scheduler.Suggest(current_priority, core, this); | 472 | scheduler.Suggest(current_priority, core, this); |
| 466 | } | 473 | } |
| @@ -479,7 +486,7 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) { | |||
| 479 | scheduler.Unschedule(old_priority, static_cast<u32>(processor_id), this); | 486 | scheduler.Unschedule(old_priority, static_cast<u32>(processor_id), this); |
| 480 | } | 487 | } |
| 481 | 488 | ||
| 482 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 489 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 483 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { | 490 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { |
| 484 | scheduler.Unsuggest(old_priority, core, this); | 491 | scheduler.Unsuggest(old_priority, core, this); |
| 485 | } | 492 | } |
| @@ -496,7 +503,7 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) { | |||
| 496 | } | 503 | } |
| 497 | } | 504 | } |
| 498 | 505 | ||
| 499 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 506 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 500 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { | 507 | if (core != static_cast<u32>(processor_id) && ((affinity_mask >> core) & 1) != 0) { |
| 501 | scheduler.Suggest(current_priority, core, this); | 508 | scheduler.Suggest(current_priority, core, this); |
| 502 | } | 509 | } |
| @@ -512,7 +519,7 @@ void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) { | |||
| 512 | return; | 519 | return; |
| 513 | } | 520 | } |
| 514 | 521 | ||
| 515 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 522 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 516 | if (((old_affinity_mask >> core) & 1) != 0) { | 523 | if (((old_affinity_mask >> core) & 1) != 0) { |
| 517 | if (core == static_cast<u32>(old_core)) { | 524 | if (core == static_cast<u32>(old_core)) { |
| 518 | scheduler.Unschedule(current_priority, core, this); | 525 | scheduler.Unschedule(current_priority, core, this); |
| @@ -522,7 +529,7 @@ void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) { | |||
| 522 | } | 529 | } |
| 523 | } | 530 | } |
| 524 | 531 | ||
| 525 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 532 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 526 | if (((affinity_mask >> core) & 1) != 0) { | 533 | if (((affinity_mask >> core) & 1) != 0) { |
| 527 | if (core == static_cast<u32>(processor_id)) { | 534 | if (core == static_cast<u32>(processor_id)) { |
| 528 | scheduler.Schedule(current_priority, core, this); | 535 | scheduler.Schedule(current_priority, core, this); |