summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar bunnei2022-10-29 16:08:33 -0700
committerGravatar bunnei2022-11-03 21:17:07 -0700
commit661fe06d9daee555a39c16a558c0722ea6bc84be (patch)
tree0a3ae5f5ea6f506d17ef9222397ae3d5f15a683f /src
parentcore: hle: kernel: k_memory_manager: Refresh. (diff)
downloadyuzu-661fe06d9daee555a39c16a558c0722ea6bc84be.tar.gz
yuzu-661fe06d9daee555a39c16a558c0722ea6bc84be.tar.xz
yuzu-661fe06d9daee555a39c16a558c0722ea6bc84be.zip
core: hle: kernel: k_page_table: Implement IPC memory methods.
Diffstat (limited to 'src')
-rw-r--r--src/core/hle/kernel/k_page_table.cpp812
-rw-r--r--src/core/hle/kernel/k_page_table.h100
-rw-r--r--src/core/hle/kernel/svc_results.h1
3 files changed, 910 insertions, 3 deletions
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 0f1bab067..2635d8148 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -24,6 +24,65 @@ namespace Kernel {
24 24
25namespace { 25namespace {
26 26
27class KScopedLightLockPair {
28 YUZU_NON_COPYABLE(KScopedLightLockPair);
29 YUZU_NON_MOVEABLE(KScopedLightLockPair);
30
31private:
32 KLightLock* m_lower;
33 KLightLock* m_upper;
34
35public:
36 KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) {
37 // Ensure our locks are in a consistent order.
38 if (std::addressof(lhs) <= std::addressof(rhs)) {
39 m_lower = std::addressof(lhs);
40 m_upper = std::addressof(rhs);
41 } else {
42 m_lower = std::addressof(rhs);
43 m_upper = std::addressof(lhs);
44 }
45
46 // Acquire both locks.
47 m_lower->Lock();
48 if (m_lower != m_upper) {
49 m_upper->Lock();
50 }
51 }
52
53 ~KScopedLightLockPair() {
54 // Unlock the upper lock.
55 if (m_upper != nullptr && m_upper != m_lower) {
56 m_upper->Unlock();
57 }
58
59 // Unlock the lower lock.
60 if (m_lower != nullptr) {
61 m_lower->Unlock();
62 }
63 }
64
65public:
66 // Utility.
67 void TryUnlockHalf(KLightLock& lock) {
68 // Only allow unlocking if the lock is half the pair.
69 if (m_lower != m_upper) {
70 // We want to be sure the lock is one we own.
71 if (m_lower == std::addressof(lock)) {
72 lock.Unlock();
73 m_lower = nullptr;
74 } else if (m_upper == std::addressof(lock)) {
75 lock.Unlock();
76 m_upper = nullptr;
77 }
78 }
79 }
80};
81
82} // namespace
83
84namespace {
85
27using namespace Common::Literals; 86using namespace Common::Literals;
28 87
29constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { 88constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) {
@@ -676,7 +735,8 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu
676 735
677Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, 736Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
678 VAddr src_addr) { 737 VAddr src_addr) {
679 KScopedLightLock lk(m_general_lock); 738 // Acquire the table locks.
739 KScopedLightLockPair lk(src_page_table.m_general_lock, m_general_lock);
680 740
681 const size_t num_pages{size / PageSize}; 741 const size_t num_pages{size / PageSize};
682 742
@@ -712,6 +772,723 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& s
712 R_SUCCEED(); 772 R_SUCCEED();
713} 773}
714 774
775Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
776 VAddr address, size_t size, KMemoryPermission test_perm,
777 KMemoryState dst_state) {
778 // Validate pre-conditions.
779 ASSERT(this->IsLockedByCurrentThread());
780 ASSERT(test_perm == KMemoryPermission::UserReadWrite ||
781 test_perm == KMemoryPermission::UserRead);
782
783 // Check that the address is in range.
784 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
785
786 // Get the source permission.
787 const auto src_perm = static_cast<KMemoryPermission>(
788 (test_perm == KMemoryPermission::UserReadWrite)
789 ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
790 : KMemoryPermission::UserRead);
791
792 // Get aligned extents.
793 const VAddr aligned_src_start = Common::AlignDown((address), PageSize);
794 const VAddr aligned_src_end = Common::AlignUp((address) + size, PageSize);
795 const VAddr mapping_src_start = Common::AlignUp((address), PageSize);
796 const VAddr mapping_src_end = Common::AlignDown((address) + size, PageSize);
797
798 const auto aligned_src_last = (aligned_src_end)-1;
799 const auto mapping_src_last = (mapping_src_end)-1;
800
801 // Get the test state and attribute mask.
802 KMemoryState test_state;
803 KMemoryAttribute test_attr_mask;
804 switch (dst_state) {
805 case KMemoryState::Ipc:
806 test_state = KMemoryState::FlagCanUseIpc;
807 test_attr_mask =
808 KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
809 break;
810 case KMemoryState::NonSecureIpc:
811 test_state = KMemoryState::FlagCanUseNonSecureIpc;
812 test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
813 break;
814 case KMemoryState::NonDeviceIpc:
815 test_state = KMemoryState::FlagCanUseNonDeviceIpc;
816 test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
817 break;
818 default:
819 R_THROW(ResultInvalidCombination);
820 }
821
822 // Ensure that on failure, we roll back appropriately.
823 size_t mapped_size = 0;
824 ON_RESULT_FAILURE {
825 if (mapped_size > 0) {
826 this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size,
827 src_perm);
828 }
829 };
830
831 size_t blocks_needed = 0;
832
833 // Iterate, mapping as needed.
834 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start);
835 while (true) {
836 const KMemoryInfo info = it->GetMemoryInfo();
837
838 // Validate the current block.
839 R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm,
840 test_attr_mask, KMemoryAttribute::None));
841
842 if (mapping_src_start < mapping_src_end && (mapping_src_start) < info.GetEndAddress() &&
843 info.GetAddress() < (mapping_src_end)) {
844 const auto cur_start =
845 info.GetAddress() >= (mapping_src_start) ? info.GetAddress() : (mapping_src_start);
846 const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress()
847 : (mapping_src_end);
848 const size_t cur_size = cur_end - cur_start;
849
850 if (info.GetAddress() < (mapping_src_start)) {
851 ++blocks_needed;
852 }
853 if (mapping_src_last < info.GetLastAddress()) {
854 ++blocks_needed;
855 }
856
857 // Set the permissions on the block, if we need to.
858 if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) {
859 R_TRY(Operate(cur_start, cur_size / PageSize, src_perm,
860 OperationType::ChangePermissions));
861 }
862
863 // Note that we mapped this part.
864 mapped_size += cur_size;
865 }
866
867 // If the block is at the end, we're done.
868 if (aligned_src_last <= info.GetLastAddress()) {
869 break;
870 }
871
872 // Advance.
873 ++it;
874 ASSERT(it != m_memory_block_manager.end());
875 }
876
877 if (out_blocks_needed != nullptr) {
878 ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
879 *out_blocks_needed = blocks_needed;
880 }
881
882 R_SUCCEED();
883}
884
885Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr,
886 KMemoryPermission test_perm, KMemoryState dst_state,
887 KPageTable& src_page_table, bool send) {
888 ASSERT(this->IsLockedByCurrentThread());
889 ASSERT(src_page_table.IsLockedByCurrentThread());
890
891 // Check that we can theoretically map.
892 const VAddr region_start = m_alias_region_start;
893 const size_t region_size = m_alias_region_end - m_alias_region_start;
894 R_UNLESS(size < region_size, ResultOutOfAddressSpace);
895
896 // Get aligned source extents.
897 const VAddr src_start = src_addr;
898 const VAddr src_end = src_addr + size;
899 const VAddr aligned_src_start = Common::AlignDown((src_start), PageSize);
900 const VAddr aligned_src_end = Common::AlignUp((src_start) + size, PageSize);
901 const VAddr mapping_src_start = Common::AlignUp((src_start), PageSize);
902 const VAddr mapping_src_end = Common::AlignDown((src_start) + size, PageSize);
903 const size_t aligned_src_size = aligned_src_end - aligned_src_start;
904 const size_t mapping_src_size =
905 (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0;
906
907 // Select a random address to map at.
908 VAddr dst_addr =
909 this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize,
910 PageSize, 0, this->GetNumGuardPages());
911
912 R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace);
913
914 // Check that we can perform the operation we're about to perform.
915 ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state));
916
917 // Create an update allocator.
918 Result allocator_result;
919 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
920 m_memory_block_slab_manager);
921 R_TRY(allocator_result);
922
923 // We're going to perform an update, so create a helper.
924 KScopedPageTableUpdater updater(this);
925
926 // Reserve space for any partial pages we allocate.
927 const size_t unmapped_size = aligned_src_size - mapping_src_size;
928 KScopedResourceReservation memory_reservation(m_resource_limit,
929 LimitableResource::PhysicalMemory, unmapped_size);
930 R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
931
932 // Ensure that we manage page references correctly.
933 PAddr start_partial_page = 0;
934 PAddr end_partial_page = 0;
935 VAddr cur_mapped_addr = dst_addr;
936
937 // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
938 // free on scope exit.
939 SCOPE_EXIT({
940 if (start_partial_page != 0) {
941 m_system.Kernel().MemoryManager().Close(start_partial_page, 1);
942 }
943 if (end_partial_page != 0) {
944 m_system.Kernel().MemoryManager().Close(end_partial_page, 1);
945 }
946 });
947
948 ON_RESULT_FAILURE {
949 if (cur_mapped_addr != dst_addr) {
950 ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize,
951 KMemoryPermission::None, OperationType::Unmap)
952 .IsSuccess());
953 }
954 };
955
956 // Allocate the start page as needed.
957 if (aligned_src_start < mapping_src_start) {
958 start_partial_page =
959 m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
960 R_UNLESS(start_partial_page != 0, ResultOutOfMemory);
961 }
962
963 // Allocate the end page as needed.
964 if (mapping_src_end < aligned_src_end &&
965 (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
966 end_partial_page =
967 m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
968 R_UNLESS(end_partial_page != 0, ResultOutOfMemory);
969 }
970
971 // Get the implementation.
972 auto& src_impl = src_page_table.PageTableImpl();
973
974 // Get the fill value for partial pages.
975 const auto fill_val = m_ipc_fill_value;
976
977 // Begin traversal.
978 Common::PageTable::TraversalContext context;
979 Common::PageTable::TraversalEntry next_entry;
980 bool traverse_valid = src_impl.BeginTraversal(next_entry, context, aligned_src_start);
981 ASSERT(traverse_valid);
982
983 // Prepare tracking variables.
984 PAddr cur_block_addr = next_entry.phys_addr;
985 size_t cur_block_size =
986 next_entry.block_size - ((cur_block_addr) & (next_entry.block_size - 1));
987 size_t tot_block_size = cur_block_size;
988
989 // Map the start page, if we have one.
990 if (start_partial_page != 0) {
991 // Ensure the page holds correct data.
992 const VAddr start_partial_virt =
993 GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), start_partial_page);
994 if (send) {
995 const size_t partial_offset = src_start - aligned_src_start;
996 size_t copy_size, clear_size;
997 if (src_end < mapping_src_start) {
998 copy_size = size;
999 clear_size = mapping_src_start - src_end;
1000 } else {
1001 copy_size = mapping_src_start - src_start;
1002 clear_size = 0;
1003 }
1004
1005 std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val,
1006 partial_offset);
1007 std::memcpy(
1008 m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset),
1009 m_system.Memory().GetPointer<void>(
1010 GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), cur_block_addr) +
1011 partial_offset),
1012 copy_size);
1013 if (clear_size > 0) {
1014 std::memset(m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset +
1015 copy_size),
1016 fill_val, clear_size);
1017 }
1018 } else {
1019 std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val, PageSize);
1020 }
1021
1022 // Map the page.
1023 R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page));
1024
1025 // Update tracking extents.
1026 cur_mapped_addr += PageSize;
1027 cur_block_addr += PageSize;
1028 cur_block_size -= PageSize;
1029
1030 // If the block's size was one page, we may need to continue traversal.
1031 if (cur_block_size == 0 && aligned_src_size > PageSize) {
1032 traverse_valid = src_impl.ContinueTraversal(next_entry, context);
1033 ASSERT(traverse_valid);
1034
1035 cur_block_addr = next_entry.phys_addr;
1036 cur_block_size = next_entry.block_size;
1037 tot_block_size += next_entry.block_size;
1038 }
1039 }
1040
1041 // Map the remaining pages.
1042 while (aligned_src_start + tot_block_size < mapping_src_end) {
1043 // Continue the traversal.
1044 traverse_valid = src_impl.ContinueTraversal(next_entry, context);
1045 ASSERT(traverse_valid);
1046
1047 // Process the block.
1048 if (next_entry.phys_addr != cur_block_addr + cur_block_size) {
1049 // Map the block we've been processing so far.
1050 R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map,
1051 cur_block_addr));
1052
1053 // Update tracking extents.
1054 cur_mapped_addr += cur_block_size;
1055 cur_block_addr = next_entry.phys_addr;
1056 cur_block_size = next_entry.block_size;
1057 } else {
1058 cur_block_size += next_entry.block_size;
1059 }
1060 tot_block_size += next_entry.block_size;
1061 }
1062
1063 // Handle the last direct-mapped page.
1064 if (const VAddr mapped_block_end = aligned_src_start + tot_block_size - cur_block_size;
1065 mapped_block_end < mapping_src_end) {
1066 const size_t last_block_size = mapping_src_end - mapped_block_end;
1067
1068 // Map the last block.
1069 R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map,
1070 cur_block_addr));
1071
1072 // Update tracking extents.
1073 cur_mapped_addr += last_block_size;
1074 cur_block_addr += last_block_size;
1075 if (mapped_block_end + cur_block_size < aligned_src_end &&
1076 cur_block_size == last_block_size) {
1077 traverse_valid = src_impl.ContinueTraversal(next_entry, context);
1078 ASSERT(traverse_valid);
1079
1080 cur_block_addr = next_entry.phys_addr;
1081 }
1082 }
1083
1084 // Map the end page, if we have one.
1085 if (end_partial_page != 0) {
1086 // Ensure the page holds correct data.
1087 const VAddr end_partial_virt =
1088 GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), end_partial_page);
1089 if (send) {
1090 const size_t copy_size = src_end - mapping_src_end;
1091 std::memcpy(m_system.Memory().GetPointer<void>(end_partial_virt),
1092 m_system.Memory().GetPointer<void>(GetHeapVirtualAddress(
1093 m_system.Kernel().MemoryLayout(), cur_block_addr)),
1094 copy_size);
1095 std::memset(m_system.Memory().GetPointer<void>(end_partial_virt + copy_size), fill_val,
1096 PageSize - copy_size);
1097 } else {
1098 std::memset(m_system.Memory().GetPointer<void>(end_partial_virt), fill_val, PageSize);
1099 }
1100
1101 // Map the page.
1102 R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page));
1103 }
1104
1105 // Update memory blocks to reflect our changes
1106 m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize,
1107 dst_state, test_perm, KMemoryAttribute::None,
1108 KMemoryBlockDisableMergeAttribute::Normal,
1109 KMemoryBlockDisableMergeAttribute::None);
1110
1111 // Set the output address.
1112 *out_addr = dst_addr + (src_start - aligned_src_start);
1113
1114 // We succeeded.
1115 memory_reservation.Commit();
1116 R_SUCCEED();
1117}
1118
1119Result KPageTable::SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr,
1120 KPageTable& src_page_table, KMemoryPermission test_perm,
1121 KMemoryState dst_state, bool send) {
1122 // For convenience, alias this.
1123 KPageTable& dst_page_table = *this;
1124
1125 // Acquire the table locks.
1126 KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
1127
1128 // We're going to perform an update, so create a helper.
1129 KScopedPageTableUpdater updater(std::addressof(src_page_table));
1130
1131 // Perform client setup.
1132 size_t num_allocator_blocks;
1133 R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(),
1134 std::addressof(num_allocator_blocks), src_addr, size,
1135 test_perm, dst_state));
1136
1137 // Create an update allocator.
1138 Result allocator_result;
1139 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1140 src_page_table.m_memory_block_slab_manager,
1141 num_allocator_blocks);
1142 R_TRY(allocator_result);
1143
1144 // Get the mapped extents.
1145 const VAddr src_map_start = Common::AlignUp((src_addr), PageSize);
1146 const VAddr src_map_end = Common::AlignDown((src_addr) + size, PageSize);
1147 const size_t src_map_size = src_map_end - src_map_start;
1148
1149 // Ensure that we clean up appropriately if we fail after this.
1150 const auto src_perm = static_cast<KMemoryPermission>(
1151 (test_perm == KMemoryPermission::UserReadWrite)
1152 ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
1153 : KMemoryPermission::UserRead);
1154 ON_RESULT_FAILURE {
1155 if (src_map_end > src_map_start) {
1156 src_page_table.CleanupForIpcClientOnServerSetupFailure(
1157 updater.GetPageList(), src_map_start, src_map_size, src_perm);
1158 }
1159 };
1160
1161 // Perform server setup.
1162 R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state,
1163 src_page_table, send));
1164
1165 // If anything was mapped, ipc-lock the pages.
1166 if (src_map_start < src_map_end) {
1167 // Get the source permission.
1168 src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start,
1169 (src_map_end - src_map_start) / PageSize,
1170 &KMemoryBlock::LockForIpc, src_perm);
1171 }
1172
1173 R_SUCCEED();
1174}
1175
1176Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state) {
1177 // Validate the address.
1178 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1179
1180 // Lock the table.
1181 KScopedLightLock lk(m_general_lock);
1182
1183 // Validate the memory state.
1184 size_t num_allocator_blocks;
1185 R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
1186 KMemoryState::All, dst_state, KMemoryPermission::UserRead,
1187 KMemoryPermission::UserRead, KMemoryAttribute::All,
1188 KMemoryAttribute::None));
1189
1190 // Create an update allocator.
1191 Result allocator_result;
1192 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1193 m_memory_block_slab_manager, num_allocator_blocks);
1194 R_TRY(allocator_result);
1195
1196 // We're going to perform an update, so create a helper.
1197 KScopedPageTableUpdater updater(this);
1198
1199 // Get aligned extents.
1200 const VAddr aligned_start = Common::AlignDown((address), PageSize);
1201 const VAddr aligned_end = Common::AlignUp((address) + size, PageSize);
1202 const size_t aligned_size = aligned_end - aligned_start;
1203 const size_t aligned_num_pages = aligned_size / PageSize;
1204
1205 // Unmap the pages.
1206 R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap));
1207
1208 // Update memory blocks.
1209 m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages,
1210 KMemoryState::None, KMemoryPermission::None,
1211 KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
1212 KMemoryBlockDisableMergeAttribute::Normal);
1213
1214 // Release from the resource limit as relevant.
1215 const VAddr mapping_start = Common::AlignUp((address), PageSize);
1216 const VAddr mapping_end = Common::AlignDown((address) + size, PageSize);
1217 const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
1218 m_resource_limit->Release(LimitableResource::PhysicalMemory, aligned_size - mapping_size);
1219
1220 R_SUCCEED();
1221}
1222
1223Result KPageTable::CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state) {
1224 // Validate the address.
1225 R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
1226
1227 // Get aligned source extents.
1228 const VAddr mapping_start = Common::AlignUp((address), PageSize);
1229 const VAddr mapping_end = Common::AlignDown((address) + size, PageSize);
1230 const VAddr mapping_last = mapping_end - 1;
1231 const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0;
1232
1233 // If nothing was mapped, we're actually done immediately.
1234 R_SUCCEED_IF(mapping_size == 0);
1235
1236 // Get the test state and attribute mask.
1237 KMemoryState test_state;
1238 KMemoryAttribute test_attr_mask;
1239 switch (dst_state) {
1240 case KMemoryState::Ipc:
1241 test_state = KMemoryState::FlagCanUseIpc;
1242 test_attr_mask =
1243 KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
1244 break;
1245 case KMemoryState::NonSecureIpc:
1246 test_state = KMemoryState::FlagCanUseNonSecureIpc;
1247 test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
1248 break;
1249 case KMemoryState::NonDeviceIpc:
1250 test_state = KMemoryState::FlagCanUseNonDeviceIpc;
1251 test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
1252 break;
1253 default:
1254 R_THROW(ResultInvalidCombination);
1255 }
1256
1257 // Lock the table.
1258 // NOTE: Nintendo does this *after* creating the updater below, but this does not follow
1259 // convention elsewhere in KPageTable.
1260 KScopedLightLock lk(m_general_lock);
1261
1262 // We're going to perform an update, so create a helper.
1263 KScopedPageTableUpdater updater(this);
1264
1265 // Ensure that on failure, we roll back appropriately.
1266 size_t mapped_size = 0;
1267 ON_RESULT_FAILURE {
1268 if (mapped_size > 0) {
1269 // Determine where the mapping ends.
1270 const auto mapped_end = (mapping_start) + mapped_size;
1271 const auto mapped_last = mapped_end - 1;
1272
1273 // Get current and next iterators.
1274 KMemoryBlockManager::const_iterator start_it =
1275 m_memory_block_manager.FindIterator(mapping_start);
1276 KMemoryBlockManager::const_iterator next_it = start_it;
1277 ++next_it;
1278
1279 // Get the current block info.
1280 KMemoryInfo cur_info = start_it->GetMemoryInfo();
1281
1282 // Create tracking variables.
1283 VAddr cur_address = cur_info.GetAddress();
1284 size_t cur_size = cur_info.GetSize();
1285 bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
1286 bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
1287 bool first =
1288 cur_info.GetIpcDisableMergeCount() == 1 &&
1289 (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) ==
1290 KMemoryBlockDisableMergeAttribute::None;
1291
1292 while (((cur_address) + cur_size - 1) < mapped_last) {
1293 // Check that we have a next block.
1294 ASSERT(next_it != m_memory_block_manager.end());
1295
1296 // Get the next info.
1297 const KMemoryInfo next_info = next_it->GetMemoryInfo();
1298
1299 // Check if we can consolidate the next block's permission set with the current one.
1300
1301 const bool next_perm_eq =
1302 next_info.GetPermission() == next_info.GetOriginalPermission();
1303 const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
1304 if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
1305 cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
1306 // We can consolidate the reprotection for the current and next block into a
1307 // single call.
1308 cur_size += next_info.GetSize();
1309 } else {
1310 // We have to operate on the current block.
1311 if ((cur_needs_set_perm || first) && !cur_perm_eq) {
1312 ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(),
1313 OperationType::ChangePermissions)
1314 .IsSuccess());
1315 }
1316
1317 // Advance.
1318 cur_address = next_info.GetAddress();
1319 cur_size = next_info.GetSize();
1320 first = false;
1321 }
1322
1323 // Advance.
1324 cur_info = next_info;
1325 cur_perm_eq = next_perm_eq;
1326 cur_needs_set_perm = next_needs_set_perm;
1327 ++next_it;
1328 }
1329
1330 // Process the last block.
1331 if ((first || cur_needs_set_perm) && !cur_perm_eq) {
1332 ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(),
1333 OperationType::ChangePermissions)
1334 .IsSuccess());
1335 }
1336 }
1337 };
1338
1339 // Iterate, reprotecting as needed.
1340 {
1341 // Get current and next iterators.
1342 KMemoryBlockManager::const_iterator start_it =
1343 m_memory_block_manager.FindIterator(mapping_start);
1344 KMemoryBlockManager::const_iterator next_it = start_it;
1345 ++next_it;
1346
1347 // Validate the current block.
1348 KMemoryInfo cur_info = start_it->GetMemoryInfo();
1349 ASSERT(this->CheckMemoryState(cur_info, test_state, test_state, KMemoryPermission::None,
1350 KMemoryPermission::None,
1351 test_attr_mask | KMemoryAttribute::IpcLocked,
1352 KMemoryAttribute::IpcLocked)
1353 .IsSuccess());
1354
1355 // Create tracking variables.
1356 VAddr cur_address = cur_info.GetAddress();
1357 size_t cur_size = cur_info.GetSize();
1358 bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
1359 bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
1360 bool first =
1361 cur_info.GetIpcDisableMergeCount() == 1 &&
1362 (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) ==
1363 KMemoryBlockDisableMergeAttribute::None;
1364
1365 while ((cur_address + cur_size - 1) < mapping_last) {
1366 // Check that we have a next block.
1367 ASSERT(next_it != m_memory_block_manager.end());
1368
1369 // Get the next info.
1370 const KMemoryInfo next_info = next_it->GetMemoryInfo();
1371
1372 // Validate the next block.
1373 ASSERT(this->CheckMemoryState(next_info, test_state, test_state,
1374 KMemoryPermission::None, KMemoryPermission::None,
1375 test_attr_mask | KMemoryAttribute::IpcLocked,
1376 KMemoryAttribute::IpcLocked)
1377 .IsSuccess());
1378
1379 // Check if we can consolidate the next block's permission set with the current one.
1380 const bool next_perm_eq =
1381 next_info.GetPermission() == next_info.GetOriginalPermission();
1382 const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
1383 if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
1384 cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
1385 // We can consolidate the reprotection for the current and next block into a single
1386 // call.
1387 cur_size += next_info.GetSize();
1388 } else {
1389 // We have to operate on the current block.
1390 if ((cur_needs_set_perm || first) && !cur_perm_eq) {
1391 R_TRY(Operate(cur_address, cur_size / PageSize,
1392 cur_needs_set_perm ? cur_info.GetOriginalPermission()
1393 : cur_info.GetPermission(),
1394 OperationType::ChangePermissions));
1395 }
1396
1397 // Mark that we mapped the block.
1398 mapped_size += cur_size;
1399
1400 // Advance.
1401 cur_address = next_info.GetAddress();
1402 cur_size = next_info.GetSize();
1403 first = false;
1404 }
1405
1406 // Advance.
1407 cur_info = next_info;
1408 cur_perm_eq = next_perm_eq;
1409 cur_needs_set_perm = next_needs_set_perm;
1410 ++next_it;
1411 }
1412
1413 // Process the last block.
1414 const auto lock_count =
1415 cur_info.GetIpcLockCount() +
1416 (next_it != m_memory_block_manager.end()
1417 ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount())
1418 : 0);
1419 if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) {
1420 R_TRY(Operate(cur_address, cur_size / PageSize,
1421 cur_needs_set_perm ? cur_info.GetOriginalPermission()
1422 : cur_info.GetPermission(),
1423 OperationType::ChangePermissions));
1424 }
1425 }
1426
1427 // Create an update allocator.
1428 // NOTE: Guaranteed zero blocks needed here.
1429 Result allocator_result;
1430 KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
1431 m_memory_block_slab_manager, 0);
1432 R_TRY(allocator_result);
1433
1434 // Unlock the pages.
1435 m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start,
1436 mapping_size / PageSize, &KMemoryBlock::UnlockForIpc,
1437 KMemoryPermission::None);
1438
1439 R_SUCCEED();
1440}
1441
1442void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLinkedList* page_list,
1443 VAddr address, size_t size,
1444 KMemoryPermission prot_perm) {
1445 ASSERT(this->IsLockedByCurrentThread());
1446 ASSERT(Common::IsAligned(address, PageSize));
1447 ASSERT(Common::IsAligned(size, PageSize));
1448
1449 // Get the mapped extents.
1450 const VAddr src_map_start = address;
1451 const VAddr src_map_end = address + size;
1452 const VAddr src_map_last = src_map_end - 1;
1453
1454 // This function is only invoked when there's something to do.
1455 ASSERT(src_map_end > src_map_start);
1456
1457 // Iterate over blocks, fixing permissions.
1458 KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
1459 while (true) {
1460 const KMemoryInfo info = it->GetMemoryInfo();
1461
1462 const auto cur_start =
1463 info.GetAddress() >= src_map_start ? info.GetAddress() : src_map_start;
1464 const auto cur_end =
1465 src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress();
1466
1467 // If we can, fix the protections on the block.
1468 if ((info.GetIpcLockCount() == 0 &&
1469 (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) ||
1470 (info.GetIpcLockCount() != 0 &&
1471 (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) {
1472 // Check if we actually need to fix the protections on the block.
1473 if (cur_end == src_map_end || info.GetAddress() <= src_map_start ||
1474 (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) {
1475 ASSERT(Operate(cur_start, (cur_end - cur_start) / PageSize, info.GetPermission(),
1476 OperationType::ChangePermissions)
1477 .IsSuccess());
1478 }
1479 }
1480
1481 // If we're past the end of the region, we're done.
1482 if (src_map_last <= info.GetLastAddress()) {
1483 break;
1484 }
1485
1486 // Advance.
1487 ++it;
1488 ASSERT(it != m_memory_block_manager.end());
1489 }
1490}
1491
715void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) { 1492void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) {
716 m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages); 1493 m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages);
717} 1494}
@@ -858,7 +1635,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
858 R_TRY(allocator_result); 1635 R_TRY(allocator_result);
859 1636
860 // We're going to perform an update, so create a helper. 1637 // We're going to perform an update, so create a helper.
861 // KScopedPageTableUpdater updater(this); 1638 KScopedPageTableUpdater updater(this);
862 1639
863 // Prepare to iterate over the memory. 1640 // Prepare to iterate over the memory.
864 auto pg_it = pg.Nodes().begin(); 1641 auto pg_it = pg.Nodes().begin();
@@ -1074,7 +1851,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
1074 R_TRY(allocator_result); 1851 R_TRY(allocator_result);
1075 1852
1076 // We're going to perform an update, so create a helper. 1853 // We're going to perform an update, so create a helper.
1077 // KScopedPageTableUpdater updater(this); 1854 KScopedPageTableUpdater updater(this);
1078 1855
1079 // Separate the mapping. 1856 // Separate the mapping.
1080 R_TRY(Operate(map_start_address, (map_last_address + 1 - map_start_address) / PageSize, 1857 R_TRY(Operate(map_start_address, (map_last_address + 1 - map_start_address) / PageSize,
@@ -1935,6 +2712,24 @@ Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) {
1935 R_SUCCEED(); 2712 R_SUCCEED();
1936} 2713}
1937 2714
2715Result KPageTable::LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size) {
2716 R_RETURN(this->LockMemoryAndOpen(
2717 nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer,
2718 KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All,
2719 KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None,
2720 static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
2721 KMemoryPermission::KernelReadWrite),
2722 KMemoryAttribute::Locked));
2723}
2724
2725Result KPageTable::UnlockForIpcUserBuffer(VAddr address, size_t size) {
2726 R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer,
2727 KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None,
2728 KMemoryPermission::None, KMemoryAttribute::All,
2729 KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
2730 KMemoryAttribute::Locked, nullptr));
2731}
2732
1938Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) { 2733Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) {
1939 R_RETURN(this->LockMemoryAndOpen( 2734 R_RETURN(this->LockMemoryAndOpen(
1940 out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, 2735 out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
@@ -2038,6 +2833,17 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
2038 R_SUCCEED(); 2833 R_SUCCEED();
2039} 2834}
2040 2835
2836void KPageTable::FinalizeUpdate(PageLinkedList* page_list) {
2837 while (page_list->Peek()) {
2838 [[maybe_unused]] auto page = page_list->Pop();
2839
2840 // TODO(bunnei): Free pages once they are allocated in guest memory
2841 // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page));
2842 // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0);
2843 // this->GetPageTableManager().Free(page);
2844 }
2845}
2846
2041VAddr KPageTable::GetRegionAddress(KMemoryState state) const { 2847VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
2042 switch (state) { 2848 switch (state) {
2043 case KMemoryState::Free: 2849 case KMemoryState::Free:
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 753e07c94..950850291 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -16,6 +16,7 @@
16#include "core/hle/kernel/k_memory_layout.h" 16#include "core/hle/kernel/k_memory_layout.h"
17#include "core/hle/kernel/k_memory_manager.h" 17#include "core/hle/kernel/k_memory_manager.h"
18#include "core/hle/result.h" 18#include "core/hle/result.h"
19#include "core/memory.h"
19 20
20namespace Core { 21namespace Core {
21class System; 22class System;
@@ -83,6 +84,14 @@ public:
83 84
84 Result UnlockForDeviceAddressSpace(VAddr addr, size_t size); 85 Result UnlockForDeviceAddressSpace(VAddr addr, size_t size);
85 86
87 Result LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size);
88 Result UnlockForIpcUserBuffer(VAddr address, size_t size);
89
90 Result SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr, KPageTable& src_page_table,
91 KMemoryPermission test_perm, KMemoryState dst_state, bool send);
92 Result CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state);
93 Result CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state);
94
86 Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size); 95 Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size);
87 Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg); 96 Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg);
88 Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, 97 Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
@@ -100,6 +109,45 @@ public:
100 109
101 bool CanContain(VAddr addr, size_t size, KMemoryState state) const; 110 bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
102 111
112protected:
113 struct PageLinkedList {
114 private:
115 struct Node {
116 Node* m_next;
117 std::array<u8, PageSize - sizeof(Node*)> m_buffer;
118 };
119
120 public:
121 constexpr PageLinkedList() = default;
122
123 void Push(Node* n) {
124 ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
125 n->m_next = m_root;
126 m_root = n;
127 }
128
129 void Push(Core::Memory::Memory& memory, VAddr addr) {
130 this->Push(memory.GetPointer<Node>(addr));
131 }
132
133 Node* Peek() const {
134 return m_root;
135 }
136
137 Node* Pop() {
138 Node* const r = m_root;
139
140 m_root = r->m_next;
141 r->m_next = nullptr;
142
143 return r;
144 }
145
146 private:
147 Node* m_root{};
148 };
149 static_assert(std::is_trivially_destructible<PageLinkedList>::value);
150
103private: 151private:
104 enum class OperationType : u32 { 152 enum class OperationType : u32 {
105 Map = 0, 153 Map = 0,
@@ -128,6 +176,7 @@ private:
128 OperationType operation); 176 OperationType operation);
129 Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, 177 Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation,
130 PAddr map_addr = 0); 178 PAddr map_addr = 0);
179 void FinalizeUpdate(PageLinkedList* page_list);
131 VAddr GetRegionAddress(KMemoryState state) const; 180 VAddr GetRegionAddress(KMemoryState state) const;
132 size_t GetRegionSize(KMemoryState state) const; 181 size_t GetRegionSize(KMemoryState state) const;
133 182
@@ -204,6 +253,14 @@ private:
204 return *out != 0; 253 return *out != 0;
205 } 254 }
206 255
256 Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, VAddr address,
257 size_t size, KMemoryPermission test_perm, KMemoryState dst_state);
258 Result SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr,
259 KMemoryPermission test_perm, KMemoryState dst_state,
260 KPageTable& src_page_table, bool send);
261 void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address,
262 size_t size, KMemoryPermission prot_perm);
263
207 // HACK: These will be removed once we automatically manage page reference counts. 264 // HACK: These will be removed once we automatically manage page reference counts.
208 void HACK_OpenPages(PAddr phys_addr, size_t num_pages); 265 void HACK_OpenPages(PAddr phys_addr, size_t num_pages);
209 void HACK_ClosePages(VAddr virt_addr, size_t num_pages); 266 void HACK_ClosePages(VAddr virt_addr, size_t num_pages);
@@ -325,6 +382,31 @@ public:
325 addr + size - 1 <= m_address_space_end - 1; 382 addr + size - 1 <= m_address_space_end - 1;
326 } 383 }
327 384
385public:
386 static VAddr GetLinearMappedVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
387 return layout.GetLinearVirtualAddress(addr);
388 }
389
390 static PAddr GetLinearMappedPhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
391 return layout.GetLinearPhysicalAddress(addr);
392 }
393
394 static VAddr GetHeapVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
395 return GetLinearMappedVirtualAddress(layout, addr);
396 }
397
398 static PAddr GetHeapPhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
399 return GetLinearMappedPhysicalAddress(layout, addr);
400 }
401
402 static VAddr GetPageTableVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
403 return GetLinearMappedVirtualAddress(layout, addr);
404 }
405
406 static PAddr GetPageTablePhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
407 return GetLinearMappedPhysicalAddress(layout, addr);
408 }
409
328private: 410private:
329 constexpr bool IsKernel() const { 411 constexpr bool IsKernel() const {
330 return m_is_kernel; 412 return m_is_kernel;
@@ -340,6 +422,24 @@ private:
340 } 422 }
341 423
342private: 424private:
425 class KScopedPageTableUpdater {
426 private:
427 KPageTable* m_pt{};
428 PageLinkedList m_ll;
429
430 public:
431 explicit KScopedPageTableUpdater(KPageTable* pt) : m_pt(pt) {}
432 explicit KScopedPageTableUpdater(KPageTable& pt) : KScopedPageTableUpdater(&pt) {}
433 ~KScopedPageTableUpdater() {
434 m_pt->FinalizeUpdate(this->GetPageList());
435 }
436
437 PageLinkedList* GetPageList() {
438 return &m_ll;
439 }
440 };
441
442private:
343 VAddr m_address_space_start{}; 443 VAddr m_address_space_start{};
344 VAddr m_address_space_end{}; 444 VAddr m_address_space_end{};
345 VAddr m_heap_region_start{}; 445 VAddr m_heap_region_start{};
diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h
index f27cade33..b7ca53085 100644
--- a/src/core/hle/kernel/svc_results.h
+++ b/src/core/hle/kernel/svc_results.h
@@ -37,6 +37,7 @@ constexpr Result ResultInvalidState{ErrorModule::Kernel, 125};
37constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126}; 37constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126};
38constexpr Result ResultPortClosed{ErrorModule::Kernel, 131}; 38constexpr Result ResultPortClosed{ErrorModule::Kernel, 131};
39constexpr Result ResultLimitReached{ErrorModule::Kernel, 132}; 39constexpr Result ResultLimitReached{ErrorModule::Kernel, 132};
40constexpr Result ResultOutOfAddressSpace{ErrorModule::Kernel, 259};
40constexpr Result ResultInvalidId{ErrorModule::Kernel, 519}; 41constexpr Result ResultInvalidId{ErrorModule::Kernel, 519};
41 42
42} // namespace Kernel 43} // namespace Kernel