diff options
| author | 2020-02-28 09:42:06 -0400 | |
|---|---|---|
| committer | 2020-06-27 11:35:21 -0400 | |
| commit | 2a8837ff51a9cf5a0123489dba5f7ab48373c2d3 (patch) | |
| tree | 119ae561120f78d70efd6e12297248a48ea28901 /src/core | |
| parent | General: Add better safety for JIT use. (diff) | |
| download | yuzu-2a8837ff51a9cf5a0123489dba5f7ab48373c2d3.tar.gz yuzu-2a8837ff51a9cf5a0123489dba5f7ab48373c2d3.tar.xz yuzu-2a8837ff51a9cf5a0123489dba5f7ab48373c2d3.zip | |
General: Add Asserts
Diffstat (limited to 'src/core')
| -rw-r--r-- | src/core/hardware_properties.h | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 18 | ||||
| -rw-r--r-- | src/core/hle/kernel/scheduler.h | 1 | ||||
| -rw-r--r-- | src/core/hle/kernel/svc.cpp | 1 |
4 files changed, 24 insertions, 0 deletions
diff --git a/src/core/hardware_properties.h b/src/core/hardware_properties.h index b04e046ed..456b41e1b 100644 --- a/src/core/hardware_properties.h +++ b/src/core/hardware_properties.h | |||
| @@ -42,6 +42,10 @@ struct EmuThreadHandle { | |||
| 42 | constexpr u32 invalid_handle = 0xFFFFFFFF; | 42 | constexpr u32 invalid_handle = 0xFFFFFFFF; |
| 43 | return {invalid_handle, invalid_handle}; | 43 | return {invalid_handle, invalid_handle}; |
| 44 | } | 44 | } |
| 45 | |||
| 46 | bool IsInvalid() const { | ||
| 47 | return (*this) == InvalidHandle(); | ||
| 48 | } | ||
| 45 | }; | 49 | }; |
| 46 | 50 | ||
| 47 | } // namespace Core | 51 | } // namespace Core |
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 727d2e6cc..d67d3c5cd 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -44,6 +44,7 @@ void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) { | |||
| 44 | } | 44 | } |
| 45 | 45 | ||
| 46 | u32 GlobalScheduler::SelectThreads() { | 46 | u32 GlobalScheduler::SelectThreads() { |
| 47 | ASSERT(is_locked); | ||
| 47 | const auto update_thread = [](Thread* thread, Scheduler& sched) { | 48 | const auto update_thread = [](Thread* thread, Scheduler& sched) { |
| 48 | sched.guard.lock(); | 49 | sched.guard.lock(); |
| 49 | if (thread != sched.selected_thread.get()) { | 50 | if (thread != sched.selected_thread.get()) { |
| @@ -136,6 +137,7 @@ u32 GlobalScheduler::SelectThreads() { | |||
| 136 | } | 137 | } |
| 137 | 138 | ||
| 138 | bool GlobalScheduler::YieldThread(Thread* yielding_thread) { | 139 | bool GlobalScheduler::YieldThread(Thread* yielding_thread) { |
| 140 | ASSERT(is_locked); | ||
| 139 | // Note: caller should use critical section, etc. | 141 | // Note: caller should use critical section, etc. |
| 140 | const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); | 142 | const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); |
| 141 | const u32 priority = yielding_thread->GetPriority(); | 143 | const u32 priority = yielding_thread->GetPriority(); |
| @@ -149,6 +151,7 @@ bool GlobalScheduler::YieldThread(Thread* yielding_thread) { | |||
| 149 | } | 151 | } |
| 150 | 152 | ||
| 151 | bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { | 153 | bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { |
| 154 | ASSERT(is_locked); | ||
| 152 | // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, | 155 | // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, |
| 153 | // etc. | 156 | // etc. |
| 154 | const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); | 157 | const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); |
| @@ -197,6 +200,7 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { | |||
| 197 | } | 200 | } |
| 198 | 201 | ||
| 199 | bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) { | 202 | bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) { |
| 203 | ASSERT(is_locked); | ||
| 200 | // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, | 204 | // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, |
| 201 | // etc. | 205 | // etc. |
| 202 | Thread* winner = nullptr; | 206 | Thread* winner = nullptr; |
| @@ -237,6 +241,7 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread | |||
| 237 | } | 241 | } |
| 238 | 242 | ||
| 239 | void GlobalScheduler::PreemptThreads() { | 243 | void GlobalScheduler::PreemptThreads() { |
| 244 | ASSERT(is_locked); | ||
| 240 | for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { | 245 | for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { |
| 241 | const u32 priority = preemption_priorities[core_id]; | 246 | const u32 priority = preemption_priorities[core_id]; |
| 242 | 247 | ||
| @@ -339,33 +344,40 @@ void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule, | |||
| 339 | } | 344 | } |
| 340 | 345 | ||
| 341 | void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) { | 346 | void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) { |
| 347 | ASSERT(is_locked); | ||
| 342 | suggested_queue[core].add(thread, priority); | 348 | suggested_queue[core].add(thread, priority); |
| 343 | } | 349 | } |
| 344 | 350 | ||
| 345 | void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) { | 351 | void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) { |
| 352 | ASSERT(is_locked); | ||
| 346 | suggested_queue[core].remove(thread, priority); | 353 | suggested_queue[core].remove(thread, priority); |
| 347 | } | 354 | } |
| 348 | 355 | ||
| 349 | void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) { | 356 | void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) { |
| 357 | ASSERT(is_locked); | ||
| 350 | ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); | 358 | ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); |
| 351 | scheduled_queue[core].add(thread, priority); | 359 | scheduled_queue[core].add(thread, priority); |
| 352 | } | 360 | } |
| 353 | 361 | ||
| 354 | void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) { | 362 | void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) { |
| 363 | ASSERT(is_locked); | ||
| 355 | ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); | 364 | ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); |
| 356 | scheduled_queue[core].add(thread, priority, false); | 365 | scheduled_queue[core].add(thread, priority, false); |
| 357 | } | 366 | } |
| 358 | 367 | ||
| 359 | void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) { | 368 | void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) { |
| 369 | ASSERT(is_locked); | ||
| 360 | scheduled_queue[core].remove(thread, priority); | 370 | scheduled_queue[core].remove(thread, priority); |
| 361 | scheduled_queue[core].add(thread, priority); | 371 | scheduled_queue[core].add(thread, priority); |
| 362 | } | 372 | } |
| 363 | 373 | ||
| 364 | void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) { | 374 | void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) { |
| 375 | ASSERT(is_locked); | ||
| 365 | scheduled_queue[core].remove(thread, priority); | 376 | scheduled_queue[core].remove(thread, priority); |
| 366 | } | 377 | } |
| 367 | 378 | ||
| 368 | void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) { | 379 | void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) { |
| 380 | ASSERT(is_locked); | ||
| 369 | const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT; | 381 | const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT; |
| 370 | const s32 source_core = thread->GetProcessorID(); | 382 | const s32 source_core = thread->GetProcessorID(); |
| 371 | if (source_core == destination_core || !schedulable) { | 383 | if (source_core == destination_core || !schedulable) { |
| @@ -399,6 +411,7 @@ void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) { | |||
| 399 | if (old_flags == thread->scheduling_state) { | 411 | if (old_flags == thread->scheduling_state) { |
| 400 | return; | 412 | return; |
| 401 | } | 413 | } |
| 414 | ASSERT(is_locked); | ||
| 402 | 415 | ||
| 403 | if (static_cast<ThreadSchedStatus>(old_flags & static_cast<u32>(ThreadSchedMasks::LowMask)) == | 416 | if (static_cast<ThreadSchedStatus>(old_flags & static_cast<u32>(ThreadSchedMasks::LowMask)) == |
| 404 | ThreadSchedStatus::Runnable) { | 417 | ThreadSchedStatus::Runnable) { |
| @@ -434,6 +447,7 @@ void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priorit | |||
| 434 | if (thread->GetSchedulingStatus() != ThreadSchedStatus::Runnable) { | 447 | if (thread->GetSchedulingStatus() != ThreadSchedStatus::Runnable) { |
| 435 | return; | 448 | return; |
| 436 | } | 449 | } |
| 450 | ASSERT(is_locked); | ||
| 437 | if (thread->processor_id >= 0) { | 451 | if (thread->processor_id >= 0) { |
| 438 | Unschedule(old_priority, static_cast<u32>(thread->processor_id), thread); | 452 | Unschedule(old_priority, static_cast<u32>(thread->processor_id), thread); |
| 439 | } | 453 | } |
| @@ -472,6 +486,7 @@ void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinit | |||
| 472 | thread->current_priority >= THREADPRIO_COUNT) { | 486 | thread->current_priority >= THREADPRIO_COUNT) { |
| 473 | return; | 487 | return; |
| 474 | } | 488 | } |
| 489 | ASSERT(is_locked); | ||
| 475 | 490 | ||
| 476 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { | 491 | for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { |
| 477 | if (((old_affinity_mask >> core) & 1) != 0) { | 492 | if (((old_affinity_mask >> core) & 1) != 0) { |
| @@ -507,10 +522,12 @@ void GlobalScheduler::Shutdown() { | |||
| 507 | 522 | ||
| 508 | void GlobalScheduler::Lock() { | 523 | void GlobalScheduler::Lock() { |
| 509 | Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID(); | 524 | Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID(); |
| 525 | ASSERT(!current_thread.IsInvalid()); | ||
| 510 | if (current_thread == current_owner) { | 526 | if (current_thread == current_owner) { |
| 511 | ++scope_lock; | 527 | ++scope_lock; |
| 512 | } else { | 528 | } else { |
| 513 | inner_lock.lock(); | 529 | inner_lock.lock(); |
| 530 | is_locked = true; | ||
| 514 | current_owner = current_thread; | 531 | current_owner = current_thread; |
| 515 | ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle()); | 532 | ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle()); |
| 516 | scope_lock = 1; | 533 | scope_lock = 1; |
| @@ -526,6 +543,7 @@ void GlobalScheduler::Unlock() { | |||
| 526 | Core::EmuThreadHandle leaving_thread = current_owner; | 543 | Core::EmuThreadHandle leaving_thread = current_owner; |
| 527 | current_owner = Core::EmuThreadHandle::InvalidHandle(); | 544 | current_owner = Core::EmuThreadHandle::InvalidHandle(); |
| 528 | scope_lock = 1; | 545 | scope_lock = 1; |
| 546 | is_locked = false; | ||
| 529 | inner_lock.unlock(); | 547 | inner_lock.unlock(); |
| 530 | EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread); | 548 | EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread); |
| 531 | } | 549 | } |
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index f5f64338f..f26a554f5 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h | |||
| @@ -182,6 +182,7 @@ private: | |||
| 182 | std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62}; | 182 | std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62}; |
| 183 | 183 | ||
| 184 | /// Scheduler lock mechanisms. | 184 | /// Scheduler lock mechanisms. |
| 185 | bool is_locked{}; | ||
| 185 | std::mutex inner_lock{}; // TODO(Blinkhawk): Replace for a SpinLock | 186 | std::mutex inner_lock{}; // TODO(Blinkhawk): Replace for a SpinLock |
| 186 | std::atomic<s64> scope_lock{}; | 187 | std::atomic<s64> scope_lock{}; |
| 187 | Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()}; | 188 | Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()}; |
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index b535593c7..4c1040a3b 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -1657,6 +1657,7 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_ | |||
| 1657 | update_val = thread->GetWaitHandle(); | 1657 | update_val = thread->GetWaitHandle(); |
| 1658 | } | 1658 | } |
| 1659 | } while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val)); | 1659 | } while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val)); |
| 1660 | monitor.ClearExclusive(); | ||
| 1660 | if (mutex_val == 0) { | 1661 | if (mutex_val == 0) { |
| 1661 | // We were able to acquire the mutex, resume this thread. | 1662 | // We were able to acquire the mutex, resume this thread. |
| 1662 | ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar); | 1663 | ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar); |