summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/svc.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel/svc.cpp')
-rw-r--r--src/core/hle/kernel/svc.cpp397
1 files changed, 137 insertions, 260 deletions
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index de3ed25da..cc8b661af 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -10,6 +10,7 @@
10 10
11#include "common/alignment.h" 11#include "common/alignment.h"
12#include "common/assert.h" 12#include "common/assert.h"
13#include "common/common_funcs.h"
13#include "common/fiber.h" 14#include "common/fiber.h"
14#include "common/logging/log.h" 15#include "common/logging/log.h"
15#include "common/microprofile.h" 16#include "common/microprofile.h"
@@ -19,26 +20,28 @@
19#include "core/core_timing.h" 20#include "core/core_timing.h"
20#include "core/core_timing_util.h" 21#include "core/core_timing_util.h"
21#include "core/cpu_manager.h" 22#include "core/cpu_manager.h"
22#include "core/hle/kernel/address_arbiter.h"
23#include "core/hle/kernel/client_port.h" 23#include "core/hle/kernel/client_port.h"
24#include "core/hle/kernel/client_session.h" 24#include "core/hle/kernel/client_session.h"
25#include "core/hle/kernel/errors.h" 25#include "core/hle/kernel/errors.h"
26#include "core/hle/kernel/handle_table.h" 26#include "core/hle/kernel/handle_table.h"
27#include "core/hle/kernel/k_address_arbiter.h"
28#include "core/hle/kernel/k_condition_variable.h"
27#include "core/hle/kernel/k_scheduler.h" 29#include "core/hle/kernel/k_scheduler.h"
28#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 30#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
31#include "core/hle/kernel/k_synchronization_object.h"
29#include "core/hle/kernel/kernel.h" 32#include "core/hle/kernel/kernel.h"
30#include "core/hle/kernel/memory/memory_block.h" 33#include "core/hle/kernel/memory/memory_block.h"
34#include "core/hle/kernel/memory/memory_layout.h"
31#include "core/hle/kernel/memory/page_table.h" 35#include "core/hle/kernel/memory/page_table.h"
32#include "core/hle/kernel/mutex.h"
33#include "core/hle/kernel/physical_core.h" 36#include "core/hle/kernel/physical_core.h"
34#include "core/hle/kernel/process.h" 37#include "core/hle/kernel/process.h"
35#include "core/hle/kernel/readable_event.h" 38#include "core/hle/kernel/readable_event.h"
36#include "core/hle/kernel/resource_limit.h" 39#include "core/hle/kernel/resource_limit.h"
37#include "core/hle/kernel/shared_memory.h" 40#include "core/hle/kernel/shared_memory.h"
38#include "core/hle/kernel/svc.h" 41#include "core/hle/kernel/svc.h"
42#include "core/hle/kernel/svc_results.h"
39#include "core/hle/kernel/svc_types.h" 43#include "core/hle/kernel/svc_types.h"
40#include "core/hle/kernel/svc_wrap.h" 44#include "core/hle/kernel/svc_wrap.h"
41#include "core/hle/kernel/synchronization.h"
42#include "core/hle/kernel/thread.h" 45#include "core/hle/kernel/thread.h"
43#include "core/hle/kernel/time_manager.h" 46#include "core/hle/kernel/time_manager.h"
44#include "core/hle/kernel/transfer_memory.h" 47#include "core/hle/kernel/transfer_memory.h"
@@ -343,27 +346,11 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
343 auto thread = kernel.CurrentScheduler()->GetCurrentThread(); 346 auto thread = kernel.CurrentScheduler()->GetCurrentThread();
344 { 347 {
345 KScopedSchedulerLock lock(kernel); 348 KScopedSchedulerLock lock(kernel);
346 thread->InvalidateHLECallback(); 349 thread->SetState(ThreadState::Waiting);
347 thread->SetStatus(ThreadStatus::WaitIPC); 350 thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
348 session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); 351 session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming());
349 } 352 }
350 353
351 if (thread->HasHLECallback()) {
352 Handle event_handle = thread->GetHLETimeEvent();
353 if (event_handle != InvalidHandle) {
354 auto& time_manager = kernel.TimeManager();
355 time_manager.UnscheduleTimeEvent(event_handle);
356 }
357
358 {
359 KScopedSchedulerLock lock(kernel);
360 auto* sync_object = thread->GetHLESyncObject();
361 sync_object->RemoveWaitingThread(SharedFrom(thread));
362 }
363
364 thread->InvokeHLECallback(SharedFrom(thread));
365 }
366
367 return thread->GetSignalingResult(); 354 return thread->GetSignalingResult();
368} 355}
369 356
@@ -436,7 +423,7 @@ static ResultCode GetProcessId32(Core::System& system, u32* process_id_low, u32*
436} 423}
437 424
438/// Wait for the given handles to synchronize, timeout after the specified nanoseconds 425/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
439static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr handles_address, 426static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr handles_address,
440 u64 handle_count, s64 nano_seconds) { 427 u64 handle_count, s64 nano_seconds) {
441 LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, handle_count={}, nano_seconds={}", 428 LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, handle_count={}, nano_seconds={}",
442 handles_address, handle_count, nano_seconds); 429 handles_address, handle_count, nano_seconds);
@@ -458,28 +445,26 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr
458 } 445 }
459 446
460 auto& kernel = system.Kernel(); 447 auto& kernel = system.Kernel();
461 Thread::ThreadSynchronizationObjects objects(handle_count); 448 std::vector<KSynchronizationObject*> objects(handle_count);
462 const auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); 449 const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
463 450
464 for (u64 i = 0; i < handle_count; ++i) { 451 for (u64 i = 0; i < handle_count; ++i) {
465 const Handle handle = memory.Read32(handles_address + i * sizeof(Handle)); 452 const Handle handle = memory.Read32(handles_address + i * sizeof(Handle));
466 const auto object = handle_table.Get<SynchronizationObject>(handle); 453 const auto object = handle_table.Get<KSynchronizationObject>(handle);
467 454
468 if (object == nullptr) { 455 if (object == nullptr) {
469 LOG_ERROR(Kernel_SVC, "Object is a nullptr"); 456 LOG_ERROR(Kernel_SVC, "Object is a nullptr");
470 return ERR_INVALID_HANDLE; 457 return ERR_INVALID_HANDLE;
471 } 458 }
472 459
473 objects[i] = object; 460 objects[i] = object.get();
474 } 461 }
475 auto& synchronization = kernel.Synchronization(); 462 return KSynchronizationObject::Wait(kernel, index, objects.data(),
476 const auto [result, handle_result] = synchronization.WaitFor(objects, nano_seconds); 463 static_cast<s32>(objects.size()), nano_seconds);
477 *index = handle_result;
478 return result;
479} 464}
480 465
481static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address, 466static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
482 s32 handle_count, u32 timeout_high, Handle* index) { 467 s32 handle_count, u32 timeout_high, s32* index) {
483 const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)}; 468 const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
484 return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds); 469 return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds);
485} 470}
@@ -504,56 +489,37 @@ static ResultCode CancelSynchronization32(Core::System& system, Handle thread_ha
504 return CancelSynchronization(system, thread_handle); 489 return CancelSynchronization(system, thread_handle);
505} 490}
506 491
507/// Attempts to locks a mutex, creating it if it does not already exist 492/// Attempts to locks a mutex
508static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_handle, 493static ResultCode ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address,
509 VAddr mutex_addr, Handle requesting_thread_handle) { 494 u32 tag) {
510 LOG_TRACE(Kernel_SVC, 495 LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
511 "called holding_thread_handle=0x{:08X}, mutex_addr=0x{:X}, " 496 thread_handle, address, tag);
512 "requesting_current_thread_handle=0x{:08X}",
513 holding_thread_handle, mutex_addr, requesting_thread_handle);
514
515 if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
516 LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}",
517 mutex_addr);
518 return ERR_INVALID_ADDRESS_STATE;
519 }
520 497
521 if (!Common::IsWordAligned(mutex_addr)) { 498 // Validate the input address.
522 LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr); 499 R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
523 return ERR_INVALID_ADDRESS; 500 R_UNLESS(Common::IsAligned(address, sizeof(u32)), Svc::ResultInvalidAddress);
524 }
525 501
526 auto* const current_process = system.Kernel().CurrentProcess(); 502 return system.Kernel().CurrentProcess()->WaitForAddress(thread_handle, address, tag);
527 return current_process->GetMutex().TryAcquire(mutex_addr, holding_thread_handle,
528 requesting_thread_handle);
529} 503}
530 504
531static ResultCode ArbitrateLock32(Core::System& system, Handle holding_thread_handle, 505static ResultCode ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address,
532 u32 mutex_addr, Handle requesting_thread_handle) { 506 u32 tag) {
533 return ArbitrateLock(system, holding_thread_handle, mutex_addr, requesting_thread_handle); 507 return ArbitrateLock(system, thread_handle, address, tag);
534} 508}
535 509
536/// Unlock a mutex 510/// Unlock a mutex
537static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) { 511static ResultCode ArbitrateUnlock(Core::System& system, VAddr address) {
538 LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr); 512 LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
539
540 if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
541 LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}",
542 mutex_addr);
543 return ERR_INVALID_ADDRESS_STATE;
544 }
545 513
546 if (!Common::IsWordAligned(mutex_addr)) { 514 // Validate the input address.
547 LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr); 515 R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
548 return ERR_INVALID_ADDRESS; 516 R_UNLESS(Common::IsAligned(address, sizeof(u32)), Svc::ResultInvalidAddress);
549 }
550 517
551 auto* const current_process = system.Kernel().CurrentProcess(); 518 return system.Kernel().CurrentProcess()->SignalToAddress(address);
552 return current_process->GetMutex().Release(mutex_addr);
553} 519}
554 520
555static ResultCode ArbitrateUnlock32(Core::System& system, u32 mutex_addr) { 521static ResultCode ArbitrateUnlock32(Core::System& system, u32 address) {
556 return ArbitrateUnlock(system, mutex_addr); 522 return ArbitrateUnlock(system, address);
557} 523}
558 524
559enum class BreakType : u32 { 525enum class BreakType : u32 {
@@ -1180,7 +1146,7 @@ static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 pri
1180 return ERR_INVALID_HANDLE; 1146 return ERR_INVALID_HANDLE;
1181 } 1147 }
1182 1148
1183 thread->SetPriority(priority); 1149 thread->SetBasePriority(priority);
1184 1150
1185 return RESULT_SUCCESS; 1151 return RESULT_SUCCESS;
1186} 1152}
@@ -1559,7 +1525,7 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) {
1559 return ERR_INVALID_HANDLE; 1525 return ERR_INVALID_HANDLE;
1560 } 1526 }
1561 1527
1562 ASSERT(thread->GetStatus() == ThreadStatus::Dormant); 1528 ASSERT(thread->GetState() == ThreadState::Initialized);
1563 1529
1564 return thread->Start(); 1530 return thread->Start();
1565} 1531}
@@ -1620,224 +1586,135 @@ static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanosec
1620} 1586}
1621 1587
1622/// Wait process wide key atomic 1588/// Wait process wide key atomic
1623static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_addr, 1589static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key,
1624 VAddr condition_variable_addr, Handle thread_handle, 1590 u32 tag, s64 timeout_ns) {
1625 s64 nano_seconds) { 1591 LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
1626 LOG_TRACE( 1592 cv_key, tag, timeout_ns);
1627 Kernel_SVC, 1593
1628 "called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}", 1594 // Validate input.
1629 mutex_addr, condition_variable_addr, thread_handle, nano_seconds); 1595 R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
1630 1596 R_UNLESS(Common::IsAligned(address, sizeof(int32_t)), Svc::ResultInvalidAddress);
1631 if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) { 1597
1632 LOG_ERROR( 1598 // Convert timeout from nanoseconds to ticks.
1633 Kernel_SVC, 1599 s64 timeout{};
1634 "Given mutex address must not be within the kernel address space. address=0x{:016X}", 1600 if (timeout_ns > 0) {
1635 mutex_addr); 1601 const s64 offset_tick(timeout_ns);
1636 return ERR_INVALID_ADDRESS_STATE; 1602 if (offset_tick > 0) {
1637 } 1603 timeout = offset_tick + 2;
1638 1604 if (timeout <= 0) {
1639 if (!Common::IsWordAligned(mutex_addr)) { 1605 timeout = std::numeric_limits<s64>::max();
1640 LOG_ERROR(Kernel_SVC, "Given mutex address must be word-aligned. address=0x{:016X}", 1606 }
1641 mutex_addr); 1607 } else {
1642 return ERR_INVALID_ADDRESS; 1608 timeout = std::numeric_limits<s64>::max();
1643 }
1644
1645 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
1646 auto& kernel = system.Kernel();
1647 Handle event_handle;
1648 Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
1649 auto* const current_process = kernel.CurrentProcess();
1650 {
1651 KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds);
1652 const auto& handle_table = current_process->GetHandleTable();
1653 std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
1654 ASSERT(thread);
1655
1656 current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
1657
1658 if (thread->IsPendingTermination()) {
1659 lock.CancelSleep();
1660 return ERR_THREAD_TERMINATING;
1661 }
1662
1663 const auto release_result = current_process->GetMutex().Release(mutex_addr);
1664 if (release_result.IsError()) {
1665 lock.CancelSleep();
1666 return release_result;
1667 }
1668
1669 if (nano_seconds == 0) {
1670 lock.CancelSleep();
1671 return RESULT_TIMEOUT;
1672 }
1673
1674 current_thread->SetCondVarWaitAddress(condition_variable_addr);
1675 current_thread->SetMutexWaitAddress(mutex_addr);
1676 current_thread->SetWaitHandle(thread_handle);
1677 current_thread->SetStatus(ThreadStatus::WaitCondVar);
1678 current_process->InsertConditionVariableThread(SharedFrom(current_thread));
1679 }
1680
1681 if (event_handle != InvalidHandle) {
1682 auto& time_manager = kernel.TimeManager();
1683 time_manager.UnscheduleTimeEvent(event_handle);
1684 }
1685
1686 {
1687 KScopedSchedulerLock lock(kernel);
1688
1689 auto* owner = current_thread->GetLockOwner();
1690 if (owner != nullptr) {
1691 owner->RemoveMutexWaiter(SharedFrom(current_thread));
1692 } 1609 }
1693 1610 } else {
1694 current_process->RemoveConditionVariableThread(SharedFrom(current_thread)); 1611 timeout = timeout_ns;
1695 } 1612 }
1696 // Note: Deliberately don't attempt to inherit the lock owner's priority.
1697 1613
1698 return current_thread->GetSignalingResult(); 1614 // Wait on the condition variable.
1615 return system.Kernel().CurrentProcess()->WaitConditionVariable(
1616 address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout);
1699} 1617}
1700 1618
1701static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 mutex_addr, 1619static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
1702 u32 condition_variable_addr, Handle thread_handle, 1620 u32 timeout_ns_low, u32 timeout_ns_high) {
1703 u32 nanoseconds_low, u32 nanoseconds_high) { 1621 const auto timeout_ns = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
1704 const auto nanoseconds = static_cast<s64>(nanoseconds_low | (u64{nanoseconds_high} << 32)); 1622 return WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns);
1705 return WaitProcessWideKeyAtomic(system, mutex_addr, condition_variable_addr, thread_handle,
1706 nanoseconds);
1707} 1623}
1708 1624
1709/// Signal process wide key 1625/// Signal process wide key
1710static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_addr, s32 target) { 1626static void SignalProcessWideKey(Core::System& system, VAddr cv_key, s32 count) {
1711 LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}", 1627 LOG_TRACE(Kernel_SVC, "called, cv_key=0x{:X}, count=0x{:08X}", cv_key, count);
1712 condition_variable_addr, target);
1713 1628
1714 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); 1629 // Signal the condition variable.
1630 return system.Kernel().CurrentProcess()->SignalConditionVariable(
1631 Common::AlignDown(cv_key, sizeof(u32)), count);
1632}
1715 1633
1716 // Retrieve a list of all threads that are waiting for this condition variable. 1634static void SignalProcessWideKey32(Core::System& system, u32 cv_key, s32 count) {
1717 auto& kernel = system.Kernel(); 1635 SignalProcessWideKey(system, cv_key, count);
1718 KScopedSchedulerLock lock(kernel); 1636}
1719 auto* const current_process = kernel.CurrentProcess();
1720 std::vector<std::shared_ptr<Thread>> waiting_threads =
1721 current_process->GetConditionVariableThreads(condition_variable_addr);
1722
1723 // Only process up to 'target' threads, unless 'target' is less equal 0, in which case process
1724 // them all.
1725 std::size_t last = waiting_threads.size();
1726 if (target > 0) {
1727 last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
1728 }
1729 for (std::size_t index = 0; index < last; ++index) {
1730 auto& thread = waiting_threads[index];
1731
1732 ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
1733
1734 // liberate Cond Var Thread.
1735 current_process->RemoveConditionVariableThread(thread);
1736
1737 const std::size_t current_core = system.CurrentCoreIndex();
1738 auto& monitor = system.Monitor();
1739
1740 // Atomically read the value of the mutex.
1741 u32 mutex_val = 0;
1742 u32 update_val = 0;
1743 const VAddr mutex_address = thread->GetMutexWaitAddress();
1744 do {
1745 // If the mutex is not yet acquired, acquire it.
1746 mutex_val = monitor.ExclusiveRead32(current_core, mutex_address);
1747
1748 if (mutex_val != 0) {
1749 update_val = mutex_val | Mutex::MutexHasWaitersFlag;
1750 } else {
1751 update_val = thread->GetWaitHandle();
1752 }
1753 } while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val));
1754 monitor.ClearExclusive();
1755 if (mutex_val == 0) {
1756 // We were able to acquire the mutex, resume this thread.
1757 auto* const lock_owner = thread->GetLockOwner();
1758 if (lock_owner != nullptr) {
1759 lock_owner->RemoveMutexWaiter(thread);
1760 }
1761 1637
1762 thread->SetLockOwner(nullptr); 1638namespace {
1763 thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
1764 thread->ResumeFromWait();
1765 } else {
1766 // The mutex is already owned by some other thread, make this thread wait on it.
1767 const Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
1768 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1769 auto owner = handle_table.Get<Thread>(owner_handle);
1770 ASSERT(owner);
1771 if (thread->GetStatus() == ThreadStatus::WaitCondVar) {
1772 thread->SetStatus(ThreadStatus::WaitMutex);
1773 }
1774 1639
1775 owner->AddMutexWaiter(thread); 1640constexpr bool IsValidSignalType(Svc::SignalType type) {
1776 } 1641 switch (type) {
1642 case Svc::SignalType::Signal:
1643 case Svc::SignalType::SignalAndIncrementIfEqual:
1644 case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
1645 return true;
1646 default:
1647 return false;
1777 } 1648 }
1778} 1649}
1779 1650
1780static void SignalProcessWideKey32(Core::System& system, u32 condition_variable_addr, s32 target) { 1651constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
1781 SignalProcessWideKey(system, condition_variable_addr, target); 1652 switch (type) {
1653 case Svc::ArbitrationType::WaitIfLessThan:
1654 case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
1655 case Svc::ArbitrationType::WaitIfEqual:
1656 return true;
1657 default:
1658 return false;
1659 }
1782} 1660}
1783 1661
1784// Wait for an address (via Address Arbiter) 1662} // namespace
1785static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type, s32 value,
1786 s64 timeout) {
1787 LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, timeout={}", address,
1788 type, value, timeout);
1789
1790 // If the passed address is a kernel virtual address, return invalid memory state.
1791 if (Core::Memory::IsKernelVirtualAddress(address)) {
1792 LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address);
1793 return ERR_INVALID_ADDRESS_STATE;
1794 }
1795 1663
1796 // If the address is not properly aligned to 4 bytes, return invalid address. 1664// Wait for an address (via Address Arbiter)
1797 if (!Common::IsWordAligned(address)) { 1665static ResultCode WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type,
1798 LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address); 1666 s32 value, s64 timeout_ns) {
1799 return ERR_INVALID_ADDRESS; 1667 LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
1668 address, arb_type, value, timeout_ns);
1669
1670 // Validate input.
1671 R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
1672 R_UNLESS(Common::IsAligned(address, sizeof(int32_t)), Svc::ResultInvalidAddress);
1673 R_UNLESS(IsValidArbitrationType(arb_type), Svc::ResultInvalidEnumValue);
1674
1675 // Convert timeout from nanoseconds to ticks.
1676 s64 timeout{};
1677 if (timeout_ns > 0) {
1678 const s64 offset_tick(timeout_ns);
1679 if (offset_tick > 0) {
1680 timeout = offset_tick + 2;
1681 if (timeout <= 0) {
1682 timeout = std::numeric_limits<s64>::max();
1683 }
1684 } else {
1685 timeout = std::numeric_limits<s64>::max();
1686 }
1687 } else {
1688 timeout = timeout_ns;
1800 } 1689 }
1801 1690
1802 const auto arbitration_type = static_cast<AddressArbiter::ArbitrationType>(type); 1691 return system.Kernel().CurrentProcess()->WaitAddressArbiter(address, arb_type, value, timeout);
1803 auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter();
1804 const ResultCode result =
1805 address_arbiter.WaitForAddress(address, arbitration_type, value, timeout);
1806 return result;
1807} 1692}
1808 1693
1809static ResultCode WaitForAddress32(Core::System& system, u32 address, u32 type, s32 value, 1694static ResultCode WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type,
1810 u32 timeout_low, u32 timeout_high) { 1695 s32 value, u32 timeout_ns_low, u32 timeout_ns_high) {
1811 const auto timeout = static_cast<s64>(timeout_low | (u64{timeout_high} << 32)); 1696 const auto timeout = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
1812 return WaitForAddress(system, address, type, value, timeout); 1697 return WaitForAddress(system, address, arb_type, value, timeout);
1813} 1698}
1814 1699
1815// Signals to an address (via Address Arbiter) 1700// Signals to an address (via Address Arbiter)
1816static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, s32 value, 1701static ResultCode SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type,
1817 s32 num_to_wake) { 1702 s32 value, s32 count) {
1818 LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, num_to_wake=0x{:X}", 1703 LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
1819 address, type, value, num_to_wake); 1704 address, signal_type, value, count);
1820
1821 // If the passed address is a kernel virtual address, return invalid memory state.
1822 if (Core::Memory::IsKernelVirtualAddress(address)) {
1823 LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address);
1824 return ERR_INVALID_ADDRESS_STATE;
1825 }
1826 1705
1827 // If the address is not properly aligned to 4 bytes, return invalid address. 1706 // Validate input.
1828 if (!Common::IsWordAligned(address)) { 1707 R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
1829 LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address); 1708 R_UNLESS(Common::IsAligned(address, sizeof(s32)), Svc::ResultInvalidAddress);
1830 return ERR_INVALID_ADDRESS; 1709 R_UNLESS(IsValidSignalType(signal_type), Svc::ResultInvalidEnumValue);
1831 }
1832 1710
1833 const auto signal_type = static_cast<AddressArbiter::SignalType>(type); 1711 return system.Kernel().CurrentProcess()->SignalAddressArbiter(address, signal_type, value,
1834 auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter(); 1712 count);
1835 return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake);
1836} 1713}
1837 1714
1838static ResultCode SignalToAddress32(Core::System& system, u32 address, u32 type, s32 value, 1715static ResultCode SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
1839 s32 num_to_wake) { 1716 s32 value, s32 count) {
1840 return SignalToAddress(system, address, type, value, num_to_wake); 1717 return SignalToAddress(system, address, signal_type, value, count);
1841} 1718}
1842 1719
1843static void KernelDebug([[maybe_unused]] Core::System& system, 1720static void KernelDebug([[maybe_unused]] Core::System& system,