diff options
| author | 2021-01-10 22:09:56 -0700 | |
|---|---|---|
| committer | 2021-01-10 22:09:56 -0700 | |
| commit | 7a3c884e39fccfbb498b855080bffabc9ce2e7f1 (patch) | |
| tree | 5056f9406dec188439cb0deb87603498243a9412 /src/core/hle/kernel/thread.cpp | |
| parent | More forgetting... duh (diff) | |
| parent | Merge pull request #5229 from Morph1984/fullscreen-opt (diff) | |
| download | yuzu-7a3c884e39fccfbb498b855080bffabc9ce2e7f1.tar.gz yuzu-7a3c884e39fccfbb498b855080bffabc9ce2e7f1.tar.xz yuzu-7a3c884e39fccfbb498b855080bffabc9ce2e7f1.zip | |
Merge remote-tracking branch 'upstream/master' into int-flags
Diffstat (limited to 'src/core/hle/kernel/thread.cpp')
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 120 |
1 files changed, 29 insertions, 91 deletions
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index d132aba34..a4f9e0d97 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -12,17 +12,16 @@ | |||
| 12 | #include "common/fiber.h" | 12 | #include "common/fiber.h" |
| 13 | #include "common/logging/log.h" | 13 | #include "common/logging/log.h" |
| 14 | #include "common/thread_queue_list.h" | 14 | #include "common/thread_queue_list.h" |
| 15 | #include "core/arm/arm_interface.h" | ||
| 16 | #include "core/arm/unicorn/arm_unicorn.h" | ||
| 17 | #include "core/core.h" | 15 | #include "core/core.h" |
| 18 | #include "core/cpu_manager.h" | 16 | #include "core/cpu_manager.h" |
| 19 | #include "core/hardware_properties.h" | 17 | #include "core/hardware_properties.h" |
| 20 | #include "core/hle/kernel/errors.h" | 18 | #include "core/hle/kernel/errors.h" |
| 21 | #include "core/hle/kernel/handle_table.h" | 19 | #include "core/hle/kernel/handle_table.h" |
| 20 | #include "core/hle/kernel/k_scheduler.h" | ||
| 21 | #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" | ||
| 22 | #include "core/hle/kernel/kernel.h" | 22 | #include "core/hle/kernel/kernel.h" |
| 23 | #include "core/hle/kernel/object.h" | 23 | #include "core/hle/kernel/object.h" |
| 24 | #include "core/hle/kernel/process.h" | 24 | #include "core/hle/kernel/process.h" |
| 25 | #include "core/hle/kernel/scheduler.h" | ||
| 26 | #include "core/hle/kernel/thread.h" | 25 | #include "core/hle/kernel/thread.h" |
| 27 | #include "core/hle/kernel/time_manager.h" | 26 | #include "core/hle/kernel/time_manager.h" |
| 28 | #include "core/hle/result.h" | 27 | #include "core/hle/result.h" |
| @@ -52,7 +51,7 @@ Thread::~Thread() = default; | |||
| 52 | 51 | ||
| 53 | void Thread::Stop() { | 52 | void Thread::Stop() { |
| 54 | { | 53 | { |
| 55 | SchedulerLock lock(kernel); | 54 | KScopedSchedulerLock lock(kernel); |
| 56 | SetStatus(ThreadStatus::Dead); | 55 | SetStatus(ThreadStatus::Dead); |
| 57 | Signal(); | 56 | Signal(); |
| 58 | kernel.GlobalHandleTable().Close(global_handle); | 57 | kernel.GlobalHandleTable().Close(global_handle); |
| @@ -63,14 +62,13 @@ void Thread::Stop() { | |||
| 63 | // Mark the TLS slot in the thread's page as free. | 62 | // Mark the TLS slot in the thread's page as free. |
| 64 | owner_process->FreeTLSRegion(tls_address); | 63 | owner_process->FreeTLSRegion(tls_address); |
| 65 | } | 64 | } |
| 66 | arm_interface.reset(); | ||
| 67 | has_exited = true; | 65 | has_exited = true; |
| 68 | } | 66 | } |
| 69 | global_handle = 0; | 67 | global_handle = 0; |
| 70 | } | 68 | } |
| 71 | 69 | ||
| 72 | void Thread::ResumeFromWait() { | 70 | void Thread::ResumeFromWait() { |
| 73 | SchedulerLock lock(kernel); | 71 | KScopedSchedulerLock lock(kernel); |
| 74 | switch (status) { | 72 | switch (status) { |
| 75 | case ThreadStatus::Paused: | 73 | case ThreadStatus::Paused: |
| 76 | case ThreadStatus::WaitSynch: | 74 | case ThreadStatus::WaitSynch: |
| @@ -91,10 +89,6 @@ void Thread::ResumeFromWait() { | |||
| 91 | // before actually resuming. We can ignore subsequent wakeups if the thread status has | 89 | // before actually resuming. We can ignore subsequent wakeups if the thread status has |
| 92 | // already been set to ThreadStatus::Ready. | 90 | // already been set to ThreadStatus::Ready. |
| 93 | return; | 91 | return; |
| 94 | |||
| 95 | case ThreadStatus::Running: | ||
| 96 | DEBUG_ASSERT_MSG(false, "Thread with object id {} has already resumed.", GetObjectId()); | ||
| 97 | return; | ||
| 98 | case ThreadStatus::Dead: | 92 | case ThreadStatus::Dead: |
| 99 | // This should never happen, as threads must complete before being stopped. | 93 | // This should never happen, as threads must complete before being stopped. |
| 100 | DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.", | 94 | DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.", |
| @@ -106,19 +100,18 @@ void Thread::ResumeFromWait() { | |||
| 106 | } | 100 | } |
| 107 | 101 | ||
| 108 | void Thread::OnWakeUp() { | 102 | void Thread::OnWakeUp() { |
| 109 | SchedulerLock lock(kernel); | 103 | KScopedSchedulerLock lock(kernel); |
| 110 | |||
| 111 | SetStatus(ThreadStatus::Ready); | 104 | SetStatus(ThreadStatus::Ready); |
| 112 | } | 105 | } |
| 113 | 106 | ||
| 114 | ResultCode Thread::Start() { | 107 | ResultCode Thread::Start() { |
| 115 | SchedulerLock lock(kernel); | 108 | KScopedSchedulerLock lock(kernel); |
| 116 | SetStatus(ThreadStatus::Ready); | 109 | SetStatus(ThreadStatus::Ready); |
| 117 | return RESULT_SUCCESS; | 110 | return RESULT_SUCCESS; |
| 118 | } | 111 | } |
| 119 | 112 | ||
| 120 | void Thread::CancelWait() { | 113 | void Thread::CancelWait() { |
| 121 | SchedulerLock lock(kernel); | 114 | KScopedSchedulerLock lock(kernel); |
| 122 | if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) { | 115 | if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) { |
| 123 | is_sync_cancelled = true; | 116 | is_sync_cancelled = true; |
| 124 | return; | 117 | return; |
| @@ -193,12 +186,14 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy | |||
| 193 | thread->status = ThreadStatus::Dormant; | 186 | thread->status = ThreadStatus::Dormant; |
| 194 | thread->entry_point = entry_point; | 187 | thread->entry_point = entry_point; |
| 195 | thread->stack_top = stack_top; | 188 | thread->stack_top = stack_top; |
| 189 | thread->disable_count = 1; | ||
| 196 | thread->tpidr_el0 = 0; | 190 | thread->tpidr_el0 = 0; |
| 197 | thread->nominal_priority = thread->current_priority = priority; | 191 | thread->nominal_priority = thread->current_priority = priority; |
| 198 | thread->last_running_ticks = 0; | 192 | thread->schedule_count = -1; |
| 193 | thread->last_scheduled_tick = 0; | ||
| 199 | thread->processor_id = processor_id; | 194 | thread->processor_id = processor_id; |
| 200 | thread->ideal_core = processor_id; | 195 | thread->ideal_core = processor_id; |
| 201 | thread->affinity_mask = 1ULL << processor_id; | 196 | thread->affinity_mask.SetAffinity(processor_id, true); |
| 202 | thread->wait_objects = nullptr; | 197 | thread->wait_objects = nullptr; |
| 203 | thread->mutex_wait_address = 0; | 198 | thread->mutex_wait_address = 0; |
| 204 | thread->condvar_wait_address = 0; | 199 | thread->condvar_wait_address = 0; |
| @@ -208,7 +203,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy | |||
| 208 | thread->owner_process = owner_process; | 203 | thread->owner_process = owner_process; |
| 209 | thread->type = type_flags; | 204 | thread->type = type_flags; |
| 210 | if ((type_flags & THREADTYPE_IDLE) == 0) { | 205 | if ((type_flags & THREADTYPE_IDLE) == 0) { |
| 211 | auto& scheduler = kernel.GlobalScheduler(); | 206 | auto& scheduler = kernel.GlobalSchedulerContext(); |
| 212 | scheduler.AddThread(thread); | 207 | scheduler.AddThread(thread); |
| 213 | } | 208 | } |
| 214 | if (owner_process) { | 209 | if (owner_process) { |
| @@ -217,33 +212,10 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy | |||
| 217 | } else { | 212 | } else { |
| 218 | thread->tls_address = 0; | 213 | thread->tls_address = 0; |
| 219 | } | 214 | } |
| 215 | |||
| 220 | // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used | 216 | // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used |
| 221 | // to initialize the context | 217 | // to initialize the context |
| 222 | thread->arm_interface.reset(); | ||
| 223 | if ((type_flags & THREADTYPE_HLE) == 0) { | 218 | if ((type_flags & THREADTYPE_HLE) == 0) { |
| 224 | #ifdef ARCHITECTURE_x86_64 | ||
| 225 | if (owner_process && !owner_process->Is64BitProcess()) { | ||
| 226 | thread->arm_interface = std::make_unique<Core::ARM_Dynarmic_32>( | ||
| 227 | system, kernel.Interrupts(), kernel.IsMulticore(), kernel.GetExclusiveMonitor(), | ||
| 228 | processor_id); | ||
| 229 | } else { | ||
| 230 | thread->arm_interface = std::make_unique<Core::ARM_Dynarmic_64>( | ||
| 231 | system, kernel.Interrupts(), kernel.IsMulticore(), kernel.GetExclusiveMonitor(), | ||
| 232 | processor_id); | ||
| 233 | } | ||
| 234 | |||
| 235 | #else | ||
| 236 | if (owner_process && !owner_process->Is64BitProcess()) { | ||
| 237 | thread->arm_interface = std::make_shared<Core::ARM_Unicorn>( | ||
| 238 | system, kernel.Interrupts(), kernel.IsMulticore(), ARM_Unicorn::Arch::AArch32, | ||
| 239 | processor_id); | ||
| 240 | } else { | ||
| 241 | thread->arm_interface = std::make_shared<Core::ARM_Unicorn>( | ||
| 242 | system, kernel.Interrupts(), kernel.IsMulticore(), ARM_Unicorn::Arch::AArch64, | ||
| 243 | processor_id); | ||
| 244 | } | ||
| 245 | LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available"); | ||
| 246 | #endif | ||
| 247 | ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top), | 219 | ResetThreadContext32(thread->context_32, static_cast<u32>(stack_top), |
| 248 | static_cast<u32>(entry_point), static_cast<u32>(arg)); | 220 | static_cast<u32>(entry_point), static_cast<u32>(arg)); |
| 249 | ResetThreadContext64(thread->context_64, stack_top, entry_point, arg); | 221 | ResetThreadContext64(thread->context_64, stack_top, entry_point, arg); |
| @@ -255,7 +227,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy | |||
| 255 | } | 227 | } |
| 256 | 228 | ||
| 257 | void Thread::SetPriority(u32 priority) { | 229 | void Thread::SetPriority(u32 priority) { |
| 258 | SchedulerLock lock(kernel); | 230 | KScopedSchedulerLock lock(kernel); |
| 259 | ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, | 231 | ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, |
| 260 | "Invalid priority value."); | 232 | "Invalid priority value."); |
| 261 | nominal_priority = priority; | 233 | nominal_priority = priority; |
| @@ -279,14 +251,6 @@ VAddr Thread::GetCommandBufferAddress() const { | |||
| 279 | return GetTLSAddress() + command_header_offset; | 251 | return GetTLSAddress() + command_header_offset; |
| 280 | } | 252 | } |
| 281 | 253 | ||
| 282 | Core::ARM_Interface& Thread::ArmInterface() { | ||
| 283 | return *arm_interface; | ||
| 284 | } | ||
| 285 | |||
| 286 | const Core::ARM_Interface& Thread::ArmInterface() const { | ||
| 287 | return *arm_interface; | ||
| 288 | } | ||
| 289 | |||
| 290 | void Thread::SetStatus(ThreadStatus new_status) { | 254 | void Thread::SetStatus(ThreadStatus new_status) { |
| 291 | if (new_status == status) { | 255 | if (new_status == status) { |
| 292 | return; | 256 | return; |
| @@ -294,7 +258,6 @@ void Thread::SetStatus(ThreadStatus new_status) { | |||
| 294 | 258 | ||
| 295 | switch (new_status) { | 259 | switch (new_status) { |
| 296 | case ThreadStatus::Ready: | 260 | case ThreadStatus::Ready: |
| 297 | case ThreadStatus::Running: | ||
| 298 | SetSchedulingStatus(ThreadSchedStatus::Runnable); | 261 | SetSchedulingStatus(ThreadSchedStatus::Runnable); |
| 299 | break; | 262 | break; |
| 300 | case ThreadStatus::Dormant: | 263 | case ThreadStatus::Dormant: |
| @@ -401,7 +364,7 @@ bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) { | |||
| 401 | } | 364 | } |
| 402 | 365 | ||
| 403 | ResultCode Thread::SetActivity(ThreadActivity value) { | 366 | ResultCode Thread::SetActivity(ThreadActivity value) { |
| 404 | SchedulerLock lock(kernel); | 367 | KScopedSchedulerLock lock(kernel); |
| 405 | 368 | ||
| 406 | auto sched_status = GetSchedulingStatus(); | 369 | auto sched_status = GetSchedulingStatus(); |
| 407 | 370 | ||
| @@ -430,7 +393,7 @@ ResultCode Thread::SetActivity(ThreadActivity value) { | |||
| 430 | ResultCode Thread::Sleep(s64 nanoseconds) { | 393 | ResultCode Thread::Sleep(s64 nanoseconds) { |
| 431 | Handle event_handle{}; | 394 | Handle event_handle{}; |
| 432 | { | 395 | { |
| 433 | SchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); | 396 | KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); |
| 434 | SetStatus(ThreadStatus::WaitSleep); | 397 | SetStatus(ThreadStatus::WaitSleep); |
| 435 | } | 398 | } |
| 436 | 399 | ||
| @@ -441,39 +404,12 @@ ResultCode Thread::Sleep(s64 nanoseconds) { | |||
| 441 | return RESULT_SUCCESS; | 404 | return RESULT_SUCCESS; |
| 442 | } | 405 | } |
| 443 | 406 | ||
| 444 | std::pair<ResultCode, bool> Thread::YieldSimple() { | ||
| 445 | bool is_redundant = false; | ||
| 446 | { | ||
| 447 | SchedulerLock lock(kernel); | ||
| 448 | is_redundant = kernel.GlobalScheduler().YieldThread(this); | ||
| 449 | } | ||
| 450 | return {RESULT_SUCCESS, is_redundant}; | ||
| 451 | } | ||
| 452 | |||
| 453 | std::pair<ResultCode, bool> Thread::YieldAndBalanceLoad() { | ||
| 454 | bool is_redundant = false; | ||
| 455 | { | ||
| 456 | SchedulerLock lock(kernel); | ||
| 457 | is_redundant = kernel.GlobalScheduler().YieldThreadAndBalanceLoad(this); | ||
| 458 | } | ||
| 459 | return {RESULT_SUCCESS, is_redundant}; | ||
| 460 | } | ||
| 461 | |||
| 462 | std::pair<ResultCode, bool> Thread::YieldAndWaitForLoadBalancing() { | ||
| 463 | bool is_redundant = false; | ||
| 464 | { | ||
| 465 | SchedulerLock lock(kernel); | ||
| 466 | is_redundant = kernel.GlobalScheduler().YieldThreadAndWaitForLoadBalancing(this); | ||
| 467 | } | ||
| 468 | return {RESULT_SUCCESS, is_redundant}; | ||
| 469 | } | ||
| 470 | |||
| 471 | void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { | 407 | void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { |
| 472 | const u32 old_state = scheduling_state; | 408 | const u32 old_state = scheduling_state; |
| 473 | pausing_state |= static_cast<u32>(flag); | 409 | pausing_state |= static_cast<u32>(flag); |
| 474 | const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); | 410 | const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); |
| 475 | scheduling_state = base_scheduling | pausing_state; | 411 | scheduling_state = base_scheduling | pausing_state; |
| 476 | kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); | 412 | KScheduler::OnThreadStateChanged(kernel, this, old_state); |
| 477 | } | 413 | } |
| 478 | 414 | ||
| 479 | void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { | 415 | void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { |
| @@ -481,23 +417,24 @@ void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { | |||
| 481 | pausing_state &= ~static_cast<u32>(flag); | 417 | pausing_state &= ~static_cast<u32>(flag); |
| 482 | const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); | 418 | const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); |
| 483 | scheduling_state = base_scheduling | pausing_state; | 419 | scheduling_state = base_scheduling | pausing_state; |
| 484 | kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); | 420 | KScheduler::OnThreadStateChanged(kernel, this, old_state); |
| 485 | } | 421 | } |
| 486 | 422 | ||
| 487 | void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { | 423 | void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { |
| 488 | const u32 old_state = scheduling_state; | 424 | const u32 old_state = scheduling_state; |
| 489 | scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) | | 425 | scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) | |
| 490 | static_cast<u32>(new_status); | 426 | static_cast<u32>(new_status); |
| 491 | kernel.GlobalScheduler().AdjustSchedulingOnStatus(this, old_state); | 427 | KScheduler::OnThreadStateChanged(kernel, this, old_state); |
| 492 | } | 428 | } |
| 493 | 429 | ||
| 494 | void Thread::SetCurrentPriority(u32 new_priority) { | 430 | void Thread::SetCurrentPriority(u32 new_priority) { |
| 495 | const u32 old_priority = std::exchange(current_priority, new_priority); | 431 | const u32 old_priority = std::exchange(current_priority, new_priority); |
| 496 | kernel.GlobalScheduler().AdjustSchedulingOnPriority(this, old_priority); | 432 | KScheduler::OnThreadPriorityChanged(kernel, this, kernel.CurrentScheduler()->GetCurrentThread(), |
| 433 | old_priority); | ||
| 497 | } | 434 | } |
| 498 | 435 | ||
| 499 | ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { | 436 | ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { |
| 500 | SchedulerLock lock(kernel); | 437 | KScopedSchedulerLock lock(kernel); |
| 501 | const auto HighestSetCore = [](u64 mask, u32 max_cores) { | 438 | const auto HighestSetCore = [](u64 mask, u32 max_cores) { |
| 502 | for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) { | 439 | for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) { |
| 503 | if (((mask >> core) & 1) != 0) { | 440 | if (((mask >> core) & 1) != 0) { |
| @@ -518,20 +455,21 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { | |||
| 518 | } | 455 | } |
| 519 | if (use_override) { | 456 | if (use_override) { |
| 520 | ideal_core_override = new_core; | 457 | ideal_core_override = new_core; |
| 521 | affinity_mask_override = new_affinity_mask; | ||
| 522 | } else { | 458 | } else { |
| 523 | const u64 old_affinity_mask = std::exchange(affinity_mask, new_affinity_mask); | 459 | const auto old_affinity_mask = affinity_mask; |
| 460 | affinity_mask.SetAffinityMask(new_affinity_mask); | ||
| 524 | ideal_core = new_core; | 461 | ideal_core = new_core; |
| 525 | if (old_affinity_mask != new_affinity_mask) { | 462 | if (old_affinity_mask.GetAffinityMask() != new_affinity_mask) { |
| 526 | const s32 old_core = processor_id; | 463 | const s32 old_core = processor_id; |
| 527 | if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { | 464 | if (processor_id >= 0 && !affinity_mask.GetAffinity(processor_id)) { |
| 528 | if (static_cast<s32>(ideal_core) < 0) { | 465 | if (static_cast<s32>(ideal_core) < 0) { |
| 529 | processor_id = HighestSetCore(affinity_mask, Core::Hardware::NUM_CPU_CORES); | 466 | processor_id = HighestSetCore(affinity_mask.GetAffinityMask(), |
| 467 | Core::Hardware::NUM_CPU_CORES); | ||
| 530 | } else { | 468 | } else { |
| 531 | processor_id = ideal_core; | 469 | processor_id = ideal_core; |
| 532 | } | 470 | } |
| 533 | } | 471 | } |
| 534 | kernel.GlobalScheduler().AdjustSchedulingOnAffinity(this, old_affinity_mask, old_core); | 472 | KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_affinity_mask, old_core); |
| 535 | } | 473 | } |
| 536 | } | 474 | } |
| 537 | return RESULT_SUCCESS; | 475 | return RESULT_SUCCESS; |