diff options
| author | 2019-06-19 09:11:18 -0400 | |
|---|---|---|
| committer | 2019-10-15 11:55:12 -0400 | |
| commit | 82218c925af8bcbaa05ae9f39af2d2393de7681f (patch) | |
| tree | e38d90c4838679ae59d58f51fff2904b16b1a155 /src/core/hle/kernel/thread.cpp | |
| parent | Correct PrepareReschedule (diff) | |
| download | yuzu-82218c925af8bcbaa05ae9f39af2d2393de7681f.tar.gz yuzu-82218c925af8bcbaa05ae9f39af2d2393de7681f.tar.xz yuzu-82218c925af8bcbaa05ae9f39af2d2393de7681f.zip | |
Kernel: Style and Corrections
Diffstat (limited to 'src/core/hle/kernel/thread.cpp')
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 54 |
1 files changed, 30 insertions, 24 deletions
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index d0fa7b370..8cf0a7ec7 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -373,43 +373,44 @@ void Thread::Sleep(s64 nanoseconds) { | |||
| 373 | WakeAfterDelay(nanoseconds); | 373 | WakeAfterDelay(nanoseconds); |
| 374 | } | 374 | } |
| 375 | 375 | ||
| 376 | void Thread::YieldType0() { | 376 | void Thread::YieldSimple() { |
| 377 | auto& scheduler = kernel.GlobalScheduler(); | 377 | auto& scheduler = kernel.GlobalScheduler(); |
| 378 | scheduler.YieldThread(this); | 378 | scheduler.YieldThread(this); |
| 379 | } | 379 | } |
| 380 | 380 | ||
| 381 | void Thread::YieldType1() { | 381 | void Thread::YieldAndBalanceLoad() { |
| 382 | auto& scheduler = kernel.GlobalScheduler(); | 382 | auto& scheduler = kernel.GlobalScheduler(); |
| 383 | scheduler.YieldThreadAndBalanceLoad(this); | 383 | scheduler.YieldThreadAndBalanceLoad(this); |
| 384 | } | 384 | } |
| 385 | 385 | ||
| 386 | void Thread::YieldType2() { | 386 | void Thread::YieldAndWaitForLoadBalancing() { |
| 387 | auto& scheduler = kernel.GlobalScheduler(); | 387 | auto& scheduler = kernel.GlobalScheduler(); |
| 388 | scheduler.YieldThreadAndWaitForLoadBalancing(this); | 388 | scheduler.YieldThreadAndWaitForLoadBalancing(this); |
| 389 | } | 389 | } |
| 390 | 390 | ||
| 391 | void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { | 391 | void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { |
| 392 | u32 old_flags = scheduling_state; | 392 | const u32 old_flags = scheduling_state; |
| 393 | scheduling_state = | 393 | scheduling_state = |
| 394 | (scheduling_state & ThreadSchedMasks::HighMask) | static_cast<u32>(new_status); | 394 | (scheduling_state & ThreadSchedMasks::HighMask) | static_cast<u32>(new_status); |
| 395 | AdjustSchedulingOnStatus(old_flags); | 395 | AdjustSchedulingOnStatus(old_flags); |
| 396 | } | 396 | } |
| 397 | 397 | ||
| 398 | void Thread::SetCurrentPriority(u32 new_priority) { | 398 | void Thread::SetCurrentPriority(u32 new_priority) { |
| 399 | u32 old_priority = current_priority; | 399 | u32 old_priority = std::exchange(current_priority, new_priority); |
| 400 | current_priority = new_priority; | ||
| 401 | AdjustSchedulingOnPriority(old_priority); | 400 | AdjustSchedulingOnPriority(old_priority); |
| 402 | } | 401 | } |
| 403 | 402 | ||
| 404 | ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { | 403 | ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { |
| 405 | auto HighestSetCore = [](u64 mask, u32 max_cores) { | 404 | const auto HighestSetCore = [](u64 mask, u32 max_cores) { |
| 406 | for (s32 core = max_cores - 1; core >= 0; core--) { | 405 | for (s32 core = max_cores - 1; core >= 0; core--) { |
| 407 | if (((mask >> core) & 1) != 0) | 406 | if (((mask >> core) & 1) != 0) { |
| 408 | return core; | 407 | return core; |
| 408 | } | ||
| 409 | } | 409 | } |
| 410 | return -1; | 410 | return -1; |
| 411 | }; | 411 | }; |
| 412 | bool use_override = affinity_override_count != 0; | 412 | |
| 413 | const bool use_override = affinity_override_count != 0; | ||
| 413 | // The value -3 is "do not change the ideal core". | 414 | // The value -3 is "do not change the ideal core". |
| 414 | if (new_core == -3) { | 415 | if (new_core == -3) { |
| 415 | new_core = use_override ? ideal_core_override : ideal_core; | 416 | new_core = use_override ? ideal_core_override : ideal_core; |
| @@ -421,11 +422,10 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { | |||
| 421 | ideal_core_override = new_core; | 422 | ideal_core_override = new_core; |
| 422 | affinity_mask_override = new_affinity_mask; | 423 | affinity_mask_override = new_affinity_mask; |
| 423 | } else { | 424 | } else { |
| 424 | u64 old_affinity_mask = affinity_mask; | 425 | const u64 old_affinity_mask = std::exchange(affinity_mask, new_affinity_mask); |
| 425 | ideal_core = new_core; | 426 | ideal_core = new_core; |
| 426 | affinity_mask = new_affinity_mask; | ||
| 427 | if (old_affinity_mask != new_affinity_mask) { | 427 | if (old_affinity_mask != new_affinity_mask) { |
| 428 | s32 old_core = processor_id; | 428 | const s32 old_core = processor_id; |
| 429 | if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { | 429 | if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { |
| 430 | if (ideal_core < 0) { | 430 | if (ideal_core < 0) { |
| 431 | processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES); | 431 | processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES); |
| @@ -440,28 +440,33 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { | |||
| 440 | } | 440 | } |
| 441 | 441 | ||
| 442 | void Thread::AdjustSchedulingOnStatus(u32 old_flags) { | 442 | void Thread::AdjustSchedulingOnStatus(u32 old_flags) { |
| 443 | if (old_flags == scheduling_state) | 443 | if (old_flags == scheduling_state) { |
| 444 | return; | 444 | return; |
| 445 | } | ||
| 445 | 446 | ||
| 446 | auto& scheduler = kernel.GlobalScheduler(); | 447 | auto& scheduler = kernel.GlobalScheduler(); |
| 447 | if (static_cast<ThreadSchedStatus>(old_flags & ThreadSchedMasks::LowMask) == | 448 | if (static_cast<ThreadSchedStatus>(old_flags & ThreadSchedMasks::LowMask) == |
| 448 | ThreadSchedStatus::Runnable) { | 449 | ThreadSchedStatus::Runnable) { |
| 449 | // In this case the thread was running, now it's pausing/exitting | 450 | // In this case the thread was running, now it's pausing/exitting |
| 450 | if (processor_id >= 0) | 451 | if (processor_id >= 0) { |
| 451 | scheduler.Unschedule(current_priority, processor_id, this); | 452 | scheduler.Unschedule(current_priority, processor_id, this); |
| 453 | } | ||
| 452 | 454 | ||
| 453 | for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 455 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { |
| 454 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) | 456 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { |
| 455 | scheduler.Unsuggest(current_priority, core, this); | 457 | scheduler.Unsuggest(current_priority, core, this); |
| 458 | } | ||
| 456 | } | 459 | } |
| 457 | } else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) { | 460 | } else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) { |
| 458 | // The thread is now set to running from being stopped | 461 | // The thread is now set to running from being stopped |
| 459 | if (processor_id >= 0) | 462 | if (processor_id >= 0) { |
| 460 | scheduler.Schedule(current_priority, processor_id, this); | 463 | scheduler.Schedule(current_priority, processor_id, this); |
| 464 | } | ||
| 461 | 465 | ||
| 462 | for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 466 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { |
| 463 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) | 467 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { |
| 464 | scheduler.Suggest(current_priority, core, this); | 468 | scheduler.Suggest(current_priority, core, this); |
| 469 | } | ||
| 465 | } | 470 | } |
| 466 | } | 471 | } |
| 467 | 472 | ||
| @@ -477,7 +482,7 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) { | |||
| 477 | scheduler.Unschedule(old_priority, processor_id, this); | 482 | scheduler.Unschedule(old_priority, processor_id, this); |
| 478 | } | 483 | } |
| 479 | 484 | ||
| 480 | for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 485 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { |
| 481 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { | 486 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { |
| 482 | scheduler.Unsuggest(old_priority, core, this); | 487 | scheduler.Unsuggest(old_priority, core, this); |
| 483 | } | 488 | } |
| @@ -494,7 +499,7 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) { | |||
| 494 | } | 499 | } |
| 495 | } | 500 | } |
| 496 | 501 | ||
| 497 | for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 502 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { |
| 498 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { | 503 | if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { |
| 499 | scheduler.Suggest(current_priority, core, this); | 504 | scheduler.Suggest(current_priority, core, this); |
| 500 | } | 505 | } |
| @@ -506,10 +511,11 @@ void Thread::AdjustSchedulingOnPriority(u32 old_priority) { | |||
| 506 | void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) { | 511 | void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) { |
| 507 | auto& scheduler = Core::System::GetInstance().GlobalScheduler(); | 512 | auto& scheduler = Core::System::GetInstance().GlobalScheduler(); |
| 508 | if (GetSchedulingStatus() != ThreadSchedStatus::Runnable || | 513 | if (GetSchedulingStatus() != ThreadSchedStatus::Runnable || |
| 509 | current_priority >= THREADPRIO_COUNT) | 514 | current_priority >= THREADPRIO_COUNT) { |
| 510 | return; | 515 | return; |
| 516 | } | ||
| 511 | 517 | ||
| 512 | for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 518 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { |
| 513 | if (((old_affinity_mask >> core) & 1) != 0) { | 519 | if (((old_affinity_mask >> core) & 1) != 0) { |
| 514 | if (core == old_core) { | 520 | if (core == old_core) { |
| 515 | scheduler.Unschedule(current_priority, core, this); | 521 | scheduler.Unschedule(current_priority, core, this); |
| @@ -519,7 +525,7 @@ void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) { | |||
| 519 | } | 525 | } |
| 520 | } | 526 | } |
| 521 | 527 | ||
| 522 | for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { | 528 | for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { |
| 523 | if (((affinity_mask >> core) & 1) != 0) { | 529 | if (((affinity_mask >> core) & 1) != 0) { |
| 524 | if (core == processor_id) { | 530 | if (core == processor_id) { |
| 525 | scheduler.Schedule(current_priority, core, this); | 531 | scheduler.Schedule(current_priority, core, this); |