diff options
| author | 2019-03-29 17:01:46 -0400 | |
|---|---|---|
| committer | 2019-10-15 11:55:06 -0400 | |
| commit | a1ac0c6cb47e10863b0bfbb1a6aadc71ccc513ab (patch) | |
| tree | d4476f115b69c74f543f7992006f8e5548cd7f54 /src | |
| parent | Implement a new Core Scheduler (diff) | |
| download | yuzu-a1ac0c6cb47e10863b0bfbb1a6aadc71ccc513ab.tar.gz yuzu-a1ac0c6cb47e10863b0bfbb1a6aadc71ccc513ab.tar.xz yuzu-a1ac0c6cb47e10863b0bfbb1a6aadc71ccc513ab.zip | |
Addapt thread class to the new Scheduler
Diffstat (limited to 'src')
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 242 | ||||
| -rw-r--r-- | src/core/hle/kernel/thread.h | 55 |
2 files changed, 237 insertions, 60 deletions
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index ec529e7f2..d0fa7b370 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -45,15 +45,7 @@ void Thread::Stop() { | |||
| 45 | callback_handle); | 45 | callback_handle); |
| 46 | kernel.ThreadWakeupCallbackHandleTable().Close(callback_handle); | 46 | kernel.ThreadWakeupCallbackHandleTable().Close(callback_handle); |
| 47 | callback_handle = 0; | 47 | callback_handle = 0; |
| 48 | 48 | SetStatus(ThreadStatus::Dead); | |
| 49 | // Clean up thread from ready queue | ||
| 50 | // This is only needed when the thread is terminated forcefully (SVC TerminateProcess) | ||
| 51 | if (status == ThreadStatus::Ready || status == ThreadStatus::Paused) { | ||
| 52 | scheduler->UnscheduleThread(this, current_priority); | ||
| 53 | } | ||
| 54 | |||
| 55 | status = ThreadStatus::Dead; | ||
| 56 | |||
| 57 | WakeupAllWaitingThreads(); | 49 | WakeupAllWaitingThreads(); |
| 58 | 50 | ||
| 59 | // Clean up any dangling references in objects that this thread was waiting for | 51 | // Clean up any dangling references in objects that this thread was waiting for |
| @@ -132,13 +124,11 @@ void Thread::ResumeFromWait() { | |||
| 132 | wakeup_callback = nullptr; | 124 | wakeup_callback = nullptr; |
| 133 | 125 | ||
| 134 | if (activity == ThreadActivity::Paused) { | 126 | if (activity == ThreadActivity::Paused) { |
| 135 | status = ThreadStatus::Paused; | 127 | SetStatus(ThreadStatus::Paused); |
| 136 | return; | 128 | return; |
| 137 | } | 129 | } |
| 138 | 130 | ||
| 139 | status = ThreadStatus::Ready; | 131 | SetStatus(ThreadStatus::Ready); |
| 140 | |||
| 141 | ChangeScheduler(); | ||
| 142 | } | 132 | } |
| 143 | 133 | ||
| 144 | void Thread::CancelWait() { | 134 | void Thread::CancelWait() { |
| @@ -205,9 +195,9 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name | |||
| 205 | thread->name = std::move(name); | 195 | thread->name = std::move(name); |
| 206 | thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap(); | 196 | thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap(); |
| 207 | thread->owner_process = &owner_process; | 197 | thread->owner_process = &owner_process; |
| 198 | auto& scheduler = kernel.GlobalScheduler(); | ||
| 199 | scheduler.AddThread(thread); | ||
| 208 | thread->tls_address = thread->owner_process->CreateTLSRegion(); | 200 | thread->tls_address = thread->owner_process->CreateTLSRegion(); |
| 209 | thread->scheduler = &system.Scheduler(processor_id); | ||
| 210 | thread->scheduler->AddThread(thread); | ||
| 211 | 201 | ||
| 212 | thread->owner_process->RegisterThread(thread.get()); | 202 | thread->owner_process->RegisterThread(thread.get()); |
| 213 | 203 | ||
| @@ -250,6 +240,22 @@ void Thread::SetStatus(ThreadStatus new_status) { | |||
| 250 | return; | 240 | return; |
| 251 | } | 241 | } |
| 252 | 242 | ||
| 243 | switch (new_status) { | ||
| 244 | case ThreadStatus::Ready: | ||
| 245 | case ThreadStatus::Running: | ||
| 246 | SetSchedulingStatus(ThreadSchedStatus::Runnable); | ||
| 247 | break; | ||
| 248 | case ThreadStatus::Dormant: | ||
| 249 | SetSchedulingStatus(ThreadSchedStatus::None); | ||
| 250 | break; | ||
| 251 | case ThreadStatus::Dead: | ||
| 252 | SetSchedulingStatus(ThreadSchedStatus::Exited); | ||
| 253 | break; | ||
| 254 | default: | ||
| 255 | SetSchedulingStatus(ThreadSchedStatus::Paused); | ||
| 256 | break; | ||
| 257 | } | ||
| 258 | |||
| 253 | if (status == ThreadStatus::Running) { | 259 | if (status == ThreadStatus::Running) { |
| 254 | last_running_ticks = Core::System::GetInstance().CoreTiming().GetTicks(); | 260 | last_running_ticks = Core::System::GetInstance().CoreTiming().GetTicks(); |
| 255 | } | 261 | } |
| @@ -311,8 +317,7 @@ void Thread::UpdatePriority() { | |||
| 311 | return; | 317 | return; |
| 312 | } | 318 | } |
| 313 | 319 | ||
| 314 | scheduler->SetThreadPriority(this, new_priority); | 320 | SetCurrentPriority(new_priority); |
| 315 | current_priority = new_priority; | ||
| 316 | 321 | ||
| 317 | if (!lock_owner) { | 322 | if (!lock_owner) { |
| 318 | return; | 323 | return; |
| @@ -328,47 +333,7 @@ void Thread::UpdatePriority() { | |||
| 328 | } | 333 | } |
| 329 | 334 | ||
| 330 | void Thread::ChangeCore(u32 core, u64 mask) { | 335 | void Thread::ChangeCore(u32 core, u64 mask) { |
| 331 | ideal_core = core; | 336 | SetCoreAndAffinityMask(core, mask); |
| 332 | affinity_mask = mask; | ||
| 333 | ChangeScheduler(); | ||
| 334 | } | ||
| 335 | |||
| 336 | void Thread::ChangeScheduler() { | ||
| 337 | if (status != ThreadStatus::Ready) { | ||
| 338 | return; | ||
| 339 | } | ||
| 340 | |||
| 341 | auto& system = Core::System::GetInstance(); | ||
| 342 | std::optional<s32> new_processor_id{GetNextProcessorId(affinity_mask)}; | ||
| 343 | |||
| 344 | if (!new_processor_id) { | ||
| 345 | new_processor_id = processor_id; | ||
| 346 | } | ||
| 347 | if (ideal_core != -1 && system.Scheduler(ideal_core).GetCurrentThread() == nullptr) { | ||
| 348 | new_processor_id = ideal_core; | ||
| 349 | } | ||
| 350 | |||
| 351 | ASSERT(*new_processor_id < 4); | ||
| 352 | |||
| 353 | // Add thread to new core's scheduler | ||
| 354 | auto& next_scheduler = system.Scheduler(*new_processor_id); | ||
| 355 | |||
| 356 | if (*new_processor_id != processor_id) { | ||
| 357 | // Remove thread from previous core's scheduler | ||
| 358 | scheduler->RemoveThread(this); | ||
| 359 | next_scheduler.AddThread(this); | ||
| 360 | } | ||
| 361 | |||
| 362 | processor_id = *new_processor_id; | ||
| 363 | |||
| 364 | // If the thread was ready, unschedule from the previous core and schedule on the new core | ||
| 365 | scheduler->UnscheduleThread(this, current_priority); | ||
| 366 | next_scheduler.ScheduleThread(this, current_priority); | ||
| 367 | |||
| 368 | // Change thread's scheduler | ||
| 369 | scheduler = &next_scheduler; | ||
| 370 | |||
| 371 | system.CpuCore(processor_id).PrepareReschedule(); | ||
| 372 | } | 337 | } |
| 373 | 338 | ||
| 374 | bool Thread::AllWaitObjectsReady() const { | 339 | bool Thread::AllWaitObjectsReady() const { |
| @@ -391,7 +356,7 @@ void Thread::SetActivity(ThreadActivity value) { | |||
| 391 | if (status == ThreadStatus::Ready) { | 356 | if (status == ThreadStatus::Ready) { |
| 392 | status = ThreadStatus::Paused; | 357 | status = ThreadStatus::Paused; |
| 393 | } else if (status == ThreadStatus::Running) { | 358 | } else if (status == ThreadStatus::Running) { |
| 394 | status = ThreadStatus::Paused; | 359 | SetStatus(ThreadStatus::Paused); |
| 395 | Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule(); | 360 | Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule(); |
| 396 | } | 361 | } |
| 397 | } else if (status == ThreadStatus::Paused) { | 362 | } else if (status == ThreadStatus::Paused) { |
| @@ -408,6 +373,165 @@ void Thread::Sleep(s64 nanoseconds) { | |||
| 408 | WakeAfterDelay(nanoseconds); | 373 | WakeAfterDelay(nanoseconds); |
| 409 | } | 374 | } |
| 410 | 375 | ||
| 376 | void Thread::YieldType0() { | ||
| 377 | auto& scheduler = kernel.GlobalScheduler(); | ||
| 378 | scheduler.YieldThread(this); | ||
| 379 | } | ||
| 380 | |||
| 381 | void Thread::YieldType1() { | ||
| 382 | auto& scheduler = kernel.GlobalScheduler(); | ||
| 383 | scheduler.YieldThreadAndBalanceLoad(this); | ||
| 384 | } | ||
| 385 | |||
| 386 | void Thread::YieldType2() { | ||
| 387 | auto& scheduler = kernel.GlobalScheduler(); | ||
| 388 | scheduler.YieldThreadAndWaitForLoadBalancing(this); | ||
| 389 | } | ||
| 390 | |||
| 391 | void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { | ||
| 392 | u32 old_flags = scheduling_state; | ||
| 393 | scheduling_state = | ||
| 394 | (scheduling_state & ThreadSchedMasks::HighMask) | static_cast<u32>(new_status); | ||
| 395 | AdjustSchedulingOnStatus(old_flags); | ||
| 396 | } | ||
| 397 | |||
| 398 | void Thread::SetCurrentPriority(u32 new_priority) { | ||
| 399 | u32 old_priority = current_priority; | ||
| 400 | current_priority = new_priority; | ||
| 401 | AdjustSchedulingOnPriority(old_priority); | ||
| 402 | } | ||
| 403 | |||
| 404 | ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { | ||
| 405 | auto HighestSetCore = [](u64 mask, u32 max_cores) { | ||
| 406 | for (s32 core = max_cores - 1; core >= 0; core--) { | ||
| 407 | if (((mask >> core) & 1) != 0) | ||
| 408 | return core; | ||
| 409 | } | ||
| 410 | return -1; | ||
| 411 | }; | ||
| 412 | bool use_override = affinity_override_count != 0; | ||
| 413 | // The value -3 is "do not change the ideal core". | ||
| 414 | if (new_core == -3) { | ||
| 415 | new_core = use_override ? ideal_core_override : ideal_core; | ||
| 416 | if ((new_affinity_mask & (1 << new_core)) == 0) { | ||
| 417 | return ERR_INVALID_COMBINATION; | ||
| 418 | } | ||
| 419 | } | ||
| 420 | if (use_override) { | ||
| 421 | ideal_core_override = new_core; | ||
| 422 | affinity_mask_override = new_affinity_mask; | ||
| 423 | } else { | ||
| 424 | u64 old_affinity_mask = affinity_mask; | ||
| 425 | ideal_core = new_core; | ||
| 426 | affinity_mask = new_affinity_mask; | ||
| 427 | if (old_affinity_mask != new_affinity_mask) { | ||
| 428 | s32 old_core = processor_id; | ||
| 429 | if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { | ||
| 430 | if (ideal_core < 0) { | ||
| 431 | processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES); | ||
| 432 | } else { | ||
| 433 | processor_id = ideal_core; | ||
| 434 | } | ||
| 435 | } | ||
| 436 | AdjustSchedulingOnAffinity(old_affinity_mask, old_core); | ||
| 437 | } | ||
| 438 | } | ||
| 439 | return RESULT_SUCCESS; | ||
| 440 | } | ||
| 441 | |||
| 442 | void Thread::AdjustSchedulingOnStatus(u32 old_flags) { | ||
| 443 | if (old_flags == scheduling_state) | ||
| 444 | return; | ||
| 445 | |||
| 446 | auto& scheduler = kernel.GlobalScheduler(); | ||
| 447 | if (static_cast<ThreadSchedStatus>(old_flags & ThreadSchedMasks::LowMask) == | ||
| 448 | ThreadSchedStatus::Runnable) { | ||
| 449 | // In this case the thread was running, now it's pausing/exitting | ||
| 450 | if (processor_id >= 0) | ||
| 451 | scheduler.Unschedule(current_priority, processor_id, this); | ||
| 452 | |||
| 453 | for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | ||
| 454 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) | ||
| 455 | scheduler.Unsuggest(current_priority, core, this); | ||
| 456 | } | ||
| 457 | } else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) { | ||
| 458 | // The thread is now set to running from being stopped | ||
| 459 | if (processor_id >= 0) | ||
| 460 | scheduler.Schedule(current_priority, processor_id, this); | ||
| 461 | |||
| 462 | for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | ||
| 463 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) | ||
| 464 | scheduler.Suggest(current_priority, core, this); | ||
| 465 | } | ||
| 466 | } | ||
| 467 | |||
| 468 | scheduler.SetReselectionPending(); | ||
| 469 | } | ||
| 470 | |||
| 471 | void Thread::AdjustSchedulingOnPriority(u32 old_priority) { | ||
| 472 | if (GetSchedulingStatus() != ThreadSchedStatus::Runnable) { | ||
| 473 | return; | ||
| 474 | } | ||
| 475 | auto& scheduler = Core::System::GetInstance().GlobalScheduler(); | ||
| 476 | if (processor_id >= 0) { | ||
| 477 | scheduler.Unschedule(old_priority, processor_id, this); | ||
| 478 | } | ||
| 479 | |||
| 480 | for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | ||
| 481 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { | ||
| 482 | scheduler.Unsuggest(old_priority, core, this); | ||
| 483 | } | ||
| 484 | } | ||
| 485 | |||
| 486 | // Add thread to the new priority queues. | ||
| 487 | Thread* current_thread = GetCurrentThread(); | ||
| 488 | |||
| 489 | if (processor_id >= 0) { | ||
| 490 | if (current_thread == this) { | ||
| 491 | scheduler.SchedulePrepend(current_priority, processor_id, this); | ||
| 492 | } else { | ||
| 493 | scheduler.Schedule(current_priority, processor_id, this); | ||
| 494 | } | ||
| 495 | } | ||
| 496 | |||
| 497 | for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | ||
| 498 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { | ||
| 499 | scheduler.Suggest(current_priority, core, this); | ||
| 500 | } | ||
| 501 | } | ||
| 502 | |||
| 503 | scheduler.SetReselectionPending(); | ||
| 504 | } | ||
| 505 | |||
| 506 | void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) { | ||
| 507 | auto& scheduler = Core::System::GetInstance().GlobalScheduler(); | ||
| 508 | if (GetSchedulingStatus() != ThreadSchedStatus::Runnable || | ||
| 509 | current_priority >= THREADPRIO_COUNT) | ||
| 510 | return; | ||
| 511 | |||
| 512 | for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | ||
| 513 | if (((old_affinity_mask >> core) & 1) != 0) { | ||
| 514 | if (core == old_core) { | ||
| 515 | scheduler.Unschedule(current_priority, core, this); | ||
| 516 | } else { | ||
| 517 | scheduler.Unsuggest(current_priority, core, this); | ||
| 518 | } | ||
| 519 | } | ||
| 520 | } | ||
| 521 | |||
| 522 | for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | ||
| 523 | if (((affinity_mask >> core) & 1) != 0) { | ||
| 524 | if (core == processor_id) { | ||
| 525 | scheduler.Schedule(current_priority, core, this); | ||
| 526 | } else { | ||
| 527 | scheduler.Suggest(current_priority, core, this); | ||
| 528 | } | ||
| 529 | } | ||
| 530 | } | ||
| 531 | |||
| 532 | scheduler.SetReselectionPending(); | ||
| 533 | } | ||
| 534 | |||
| 411 | //////////////////////////////////////////////////////////////////////////////////////////////////// | 535 | //////////////////////////////////////////////////////////////////////////////////////////////////// |
| 412 | 536 | ||
| 413 | /** | 537 | /** |
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index 07e989637..c426a7209 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h | |||
| @@ -75,6 +75,21 @@ enum class ThreadActivity : u32 { | |||
| 75 | Paused = 1, | 75 | Paused = 1, |
| 76 | }; | 76 | }; |
| 77 | 77 | ||
| 78 | enum class ThreadSchedStatus : u32 { None = 0, Paused = 1, Runnable = 2, Exited = 3 }; | ||
| 79 | |||
| 80 | enum ThreadSchedFlags : u32 { | ||
| 81 | ProcessPauseFlag = 1 << 4, | ||
| 82 | ThreadPauseFlag = 1 << 5, | ||
| 83 | ProcessDebugPauseFlag = 1 << 6, | ||
| 84 | KernelInitPauseFlag = 1 << 8, | ||
| 85 | }; | ||
| 86 | |||
| 87 | enum ThreadSchedMasks : u32 { | ||
| 88 | LowMask = 0x000f, | ||
| 89 | HighMask = 0xfff0, | ||
| 90 | ForcePauseMask = 0x0070, | ||
| 91 | }; | ||
| 92 | |||
| 78 | class Thread final : public WaitObject { | 93 | class Thread final : public WaitObject { |
| 79 | public: | 94 | public: |
| 80 | using MutexWaitingThreads = std::vector<SharedPtr<Thread>>; | 95 | using MutexWaitingThreads = std::vector<SharedPtr<Thread>>; |
| @@ -278,6 +293,10 @@ public: | |||
| 278 | return processor_id; | 293 | return processor_id; |
| 279 | } | 294 | } |
| 280 | 295 | ||
| 296 | void SetProcessorID(s32 new_core) { | ||
| 297 | processor_id = new_core; | ||
| 298 | } | ||
| 299 | |||
| 281 | Process* GetOwnerProcess() { | 300 | Process* GetOwnerProcess() { |
| 282 | return owner_process; | 301 | return owner_process; |
| 283 | } | 302 | } |
| @@ -383,11 +402,38 @@ public: | |||
| 383 | /// Sleeps this thread for the given amount of nanoseconds. | 402 | /// Sleeps this thread for the given amount of nanoseconds. |
| 384 | void Sleep(s64 nanoseconds); | 403 | void Sleep(s64 nanoseconds); |
| 385 | 404 | ||
| 405 | /// Yields this thread without rebalancing loads. | ||
| 406 | void YieldType0(); | ||
| 407 | |||
| 408 | /// Yields this thread and does a load rebalancing. | ||
| 409 | void YieldType1(); | ||
| 410 | |||
| 411 | /// Yields this thread and if the core is left idle, loads are rebalanced | ||
| 412 | void YieldType2(); | ||
| 413 | |||
| 414 | ThreadSchedStatus GetSchedulingStatus() { | ||
| 415 | return static_cast<ThreadSchedStatus>(scheduling_state & ThreadSchedMasks::LowMask); | ||
| 416 | } | ||
| 417 | |||
| 418 | bool IsRunning() const { | ||
| 419 | return is_running; | ||
| 420 | } | ||
| 421 | |||
| 422 | void SetIsRunning(bool value) { | ||
| 423 | is_running = value; | ||
| 424 | } | ||
| 425 | |||
| 386 | private: | 426 | private: |
| 387 | explicit Thread(KernelCore& kernel); | 427 | explicit Thread(KernelCore& kernel); |
| 388 | ~Thread() override; | 428 | ~Thread() override; |
| 389 | 429 | ||
| 390 | void ChangeScheduler(); | 430 | void SetSchedulingStatus(ThreadSchedStatus new_status); |
| 431 | void SetCurrentPriority(u32 new_priority); | ||
| 432 | ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask); | ||
| 433 | |||
| 434 | void AdjustSchedulingOnStatus(u32 old_flags); | ||
| 435 | void AdjustSchedulingOnPriority(u32 old_priority); | ||
| 436 | void AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core); | ||
| 391 | 437 | ||
| 392 | Core::ARM_Interface::ThreadContext context{}; | 438 | Core::ARM_Interface::ThreadContext context{}; |
| 393 | 439 | ||
| @@ -453,6 +499,13 @@ private: | |||
| 453 | 499 | ||
| 454 | ThreadActivity activity = ThreadActivity::Normal; | 500 | ThreadActivity activity = ThreadActivity::Normal; |
| 455 | 501 | ||
| 502 | s32 ideal_core_override = -1; | ||
| 503 | u64 affinity_mask_override = 0x1; | ||
| 504 | u32 affinity_override_count = 0; | ||
| 505 | |||
| 506 | u32 scheduling_state = 0; | ||
| 507 | bool is_running = false; | ||
| 508 | |||
| 456 | std::string name; | 509 | std::string name; |
| 457 | }; | 510 | }; |
| 458 | 511 | ||