summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/scheduler.cpp
diff options
context:
space:
mode:
authorGravatar Fernando Sahmkow2020-02-28 09:42:06 -0400
committerGravatar Fernando Sahmkow2020-06-27 11:35:21 -0400
commit2a8837ff51a9cf5a0123489dba5f7ab48373c2d3 (patch)
tree119ae561120f78d70efd6e12297248a48ea28901 /src/core/hle/kernel/scheduler.cpp
parentGeneral: Add better safety for JIT use. (diff)
downloadyuzu-2a8837ff51a9cf5a0123489dba5f7ab48373c2d3.tar.gz
yuzu-2a8837ff51a9cf5a0123489dba5f7ab48373c2d3.tar.xz
yuzu-2a8837ff51a9cf5a0123489dba5f7ab48373c2d3.zip
General: Add Asserts
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
-rw-r--r--src/core/hle/kernel/scheduler.cpp18
1 files changed, 18 insertions, 0 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 727d2e6cc..d67d3c5cd 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -44,6 +44,7 @@ void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) {
44} 44}
45 45
46u32 GlobalScheduler::SelectThreads() { 46u32 GlobalScheduler::SelectThreads() {
47 ASSERT(is_locked);
47 const auto update_thread = [](Thread* thread, Scheduler& sched) { 48 const auto update_thread = [](Thread* thread, Scheduler& sched) {
48 sched.guard.lock(); 49 sched.guard.lock();
49 if (thread != sched.selected_thread.get()) { 50 if (thread != sched.selected_thread.get()) {
@@ -136,6 +137,7 @@ u32 GlobalScheduler::SelectThreads() {
136} 137}
137 138
138bool GlobalScheduler::YieldThread(Thread* yielding_thread) { 139bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
140 ASSERT(is_locked);
139 // Note: caller should use critical section, etc. 141 // Note: caller should use critical section, etc.
140 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); 142 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
141 const u32 priority = yielding_thread->GetPriority(); 143 const u32 priority = yielding_thread->GetPriority();
@@ -149,6 +151,7 @@ bool GlobalScheduler::YieldThread(Thread* yielding_thread) {
149} 151}
150 152
151bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { 153bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
154 ASSERT(is_locked);
152 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, 155 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
153 // etc. 156 // etc.
154 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); 157 const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
@@ -197,6 +200,7 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
197} 200}
198 201
199bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) { 202bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
203 ASSERT(is_locked);
200 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, 204 // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
201 // etc. 205 // etc.
202 Thread* winner = nullptr; 206 Thread* winner = nullptr;
@@ -237,6 +241,7 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
237} 241}
238 242
239void GlobalScheduler::PreemptThreads() { 243void GlobalScheduler::PreemptThreads() {
244 ASSERT(is_locked);
240 for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { 245 for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
241 const u32 priority = preemption_priorities[core_id]; 246 const u32 priority = preemption_priorities[core_id];
242 247
@@ -339,33 +344,40 @@ void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
339} 344}
340 345
341void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) { 346void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) {
347 ASSERT(is_locked);
342 suggested_queue[core].add(thread, priority); 348 suggested_queue[core].add(thread, priority);
343} 349}
344 350
345void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) { 351void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) {
352 ASSERT(is_locked);
346 suggested_queue[core].remove(thread, priority); 353 suggested_queue[core].remove(thread, priority);
347} 354}
348 355
349void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) { 356void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) {
357 ASSERT(is_locked);
350 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); 358 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
351 scheduled_queue[core].add(thread, priority); 359 scheduled_queue[core].add(thread, priority);
352} 360}
353 361
354void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) { 362void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) {
363 ASSERT(is_locked);
355 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); 364 ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core.");
356 scheduled_queue[core].add(thread, priority, false); 365 scheduled_queue[core].add(thread, priority, false);
357} 366}
358 367
359void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) { 368void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) {
369 ASSERT(is_locked);
360 scheduled_queue[core].remove(thread, priority); 370 scheduled_queue[core].remove(thread, priority);
361 scheduled_queue[core].add(thread, priority); 371 scheduled_queue[core].add(thread, priority);
362} 372}
363 373
364void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) { 374void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) {
375 ASSERT(is_locked);
365 scheduled_queue[core].remove(thread, priority); 376 scheduled_queue[core].remove(thread, priority);
366} 377}
367 378
368void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) { 379void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
380 ASSERT(is_locked);
369 const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT; 381 const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
370 const s32 source_core = thread->GetProcessorID(); 382 const s32 source_core = thread->GetProcessorID();
371 if (source_core == destination_core || !schedulable) { 383 if (source_core == destination_core || !schedulable) {
@@ -399,6 +411,7 @@ void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) {
399 if (old_flags == thread->scheduling_state) { 411 if (old_flags == thread->scheduling_state) {
400 return; 412 return;
401 } 413 }
414 ASSERT(is_locked);
402 415
403 if (static_cast<ThreadSchedStatus>(old_flags & static_cast<u32>(ThreadSchedMasks::LowMask)) == 416 if (static_cast<ThreadSchedStatus>(old_flags & static_cast<u32>(ThreadSchedMasks::LowMask)) ==
404 ThreadSchedStatus::Runnable) { 417 ThreadSchedStatus::Runnable) {
@@ -434,6 +447,7 @@ void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priorit
434 if (thread->GetSchedulingStatus() != ThreadSchedStatus::Runnable) { 447 if (thread->GetSchedulingStatus() != ThreadSchedStatus::Runnable) {
435 return; 448 return;
436 } 449 }
450 ASSERT(is_locked);
437 if (thread->processor_id >= 0) { 451 if (thread->processor_id >= 0) {
438 Unschedule(old_priority, static_cast<u32>(thread->processor_id), thread); 452 Unschedule(old_priority, static_cast<u32>(thread->processor_id), thread);
439 } 453 }
@@ -472,6 +486,7 @@ void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinit
472 thread->current_priority >= THREADPRIO_COUNT) { 486 thread->current_priority >= THREADPRIO_COUNT) {
473 return; 487 return;
474 } 488 }
489 ASSERT(is_locked);
475 490
476 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { 491 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
477 if (((old_affinity_mask >> core) & 1) != 0) { 492 if (((old_affinity_mask >> core) & 1) != 0) {
@@ -507,10 +522,12 @@ void GlobalScheduler::Shutdown() {
507 522
508void GlobalScheduler::Lock() { 523void GlobalScheduler::Lock() {
509 Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID(); 524 Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID();
525 ASSERT(!current_thread.IsInvalid());
510 if (current_thread == current_owner) { 526 if (current_thread == current_owner) {
511 ++scope_lock; 527 ++scope_lock;
512 } else { 528 } else {
513 inner_lock.lock(); 529 inner_lock.lock();
530 is_locked = true;
514 current_owner = current_thread; 531 current_owner = current_thread;
515 ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle()); 532 ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle());
516 scope_lock = 1; 533 scope_lock = 1;
@@ -526,6 +543,7 @@ void GlobalScheduler::Unlock() {
526 Core::EmuThreadHandle leaving_thread = current_owner; 543 Core::EmuThreadHandle leaving_thread = current_owner;
527 current_owner = Core::EmuThreadHandle::InvalidHandle(); 544 current_owner = Core::EmuThreadHandle::InvalidHandle();
528 scope_lock = 1; 545 scope_lock = 1;
546 is_locked = false;
529 inner_lock.unlock(); 547 inner_lock.unlock();
530 EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread); 548 EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread);
531} 549}