summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/core/CMakeLists.txt2
-rw-r--r--src/core/hardware_properties.h20
-rw-r--r--src/core/hle/kernel/k_capabilities.cpp358
-rw-r--r--src/core/hle/kernel/k_capabilities.h295
-rw-r--r--src/core/hle/kernel/svc_types.h5
-rw-r--r--src/core/hle/kernel/svc_version.h58
6 files changed, 738 insertions, 0 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 5afdeb5ff..3eee1cfbe 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -182,6 +182,8 @@ add_library(core STATIC
182 hle/kernel/k_auto_object_container.cpp 182 hle/kernel/k_auto_object_container.cpp
183 hle/kernel/k_auto_object_container.h 183 hle/kernel/k_auto_object_container.h
184 hle/kernel/k_affinity_mask.h 184 hle/kernel/k_affinity_mask.h
185 hle/kernel/k_capabilities.cpp
186 hle/kernel/k_capabilities.h
185 hle/kernel/k_class_token.cpp 187 hle/kernel/k_class_token.cpp
186 hle/kernel/k_class_token.h 188 hle/kernel/k_class_token.h
187 hle/kernel/k_client_port.cpp 189 hle/kernel/k_client_port.cpp
diff --git a/src/core/hardware_properties.h b/src/core/hardware_properties.h
index 13cbdb734..45567b840 100644
--- a/src/core/hardware_properties.h
+++ b/src/core/hardware_properties.h
@@ -25,6 +25,26 @@ constexpr std::array<s32, Common::BitSize<u64>()> VirtualToPhysicalCoreMap{
25 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 25 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
26}; 26};
27 27
28static constexpr inline size_t NumVirtualCores = Common::BitSize<u64>();
29
30static constexpr inline u64 VirtualCoreMask = [] {
31 u64 mask = 0;
32 for (size_t i = 0; i < NumVirtualCores; ++i) {
33 mask |= (UINT64_C(1) << i);
34 }
35 return mask;
36}();
37
38static constexpr inline u64 ConvertVirtualCoreMaskToPhysical(u64 v_core_mask) {
39 u64 p_core_mask = 0;
40 while (v_core_mask != 0) {
41 const u64 next = std::countr_zero(v_core_mask);
42 v_core_mask &= ~(static_cast<u64>(1) << next);
43 p_core_mask |= (static_cast<u64>(1) << VirtualToPhysicalCoreMap[next]);
44 }
45 return p_core_mask;
46}
47
28// Cortex-A57 supports 4 memory watchpoints 48// Cortex-A57 supports 4 memory watchpoints
29constexpr u64 NUM_WATCHPOINTS = 4; 49constexpr u64 NUM_WATCHPOINTS = 4;
30 50
diff --git a/src/core/hle/kernel/k_capabilities.cpp b/src/core/hle/kernel/k_capabilities.cpp
new file mode 100644
index 000000000..64f1d7371
--- /dev/null
+++ b/src/core/hle/kernel/k_capabilities.cpp
@@ -0,0 +1,358 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/hardware_properties.h"
5#include "core/hle/kernel/k_capabilities.h"
6#include "core/hle/kernel/k_memory_layout.h"
7#include "core/hle/kernel/k_page_table.h"
8#include "core/hle/kernel/kernel.h"
9#include "core/hle/kernel/svc_results.h"
10#include "core/hle/kernel/svc_version.h"
11
12namespace Kernel {
13
14Result KCapabilities::InitializeForKIP(std::span<const u32> kern_caps, KPageTable* page_table) {
15 // We're initializing an initial process.
16 m_svc_access_flags.reset();
17 m_irq_access_flags.reset();
18 m_debug_capabilities = 0;
19 m_handle_table_size = 0;
20 m_intended_kernel_version = 0;
21 m_program_type = 0;
22
23 // Initial processes may run on all cores.
24 constexpr u64 VirtMask = Core::Hardware::VirtualCoreMask;
25 constexpr u64 PhysMask = Core::Hardware::ConvertVirtualCoreMaskToPhysical(VirtMask);
26
27 m_core_mask = VirtMask;
28 m_phys_core_mask = PhysMask;
29
30 // Initial processes may use any user priority they like.
31 m_priority_mask = ~0xFULL;
32
33 // Here, Nintendo sets the kernel version to the current kernel version.
34 // We will follow suit and set the version to the highest supported kernel version.
35 KernelVersion intended_kernel_version{};
36 intended_kernel_version.major_version.Assign(Svc::SupportedKernelMajorVersion);
37 intended_kernel_version.minor_version.Assign(Svc::SupportedKernelMinorVersion);
38 m_intended_kernel_version = intended_kernel_version.raw;
39
40 // Parse the capabilities array.
41 R_RETURN(this->SetCapabilities(kern_caps, page_table));
42}
43
44Result KCapabilities::InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table) {
45 // We're initializing a user process.
46 m_svc_access_flags.reset();
47 m_irq_access_flags.reset();
48 m_debug_capabilities = 0;
49 m_handle_table_size = 0;
50 m_intended_kernel_version = 0;
51 m_program_type = 0;
52
53 // User processes must specify what cores/priorities they can use.
54 m_core_mask = 0;
55 m_priority_mask = 0;
56
57 // Parse the user capabilities array.
58 R_RETURN(this->SetCapabilities(user_caps, page_table));
59}
60
61Result KCapabilities::SetCorePriorityCapability(const u32 cap) {
62 // We can't set core/priority if we've already set them.
63 R_UNLESS(m_core_mask == 0, ResultInvalidArgument);
64 R_UNLESS(m_priority_mask == 0, ResultInvalidArgument);
65
66 // Validate the core/priority.
67 CorePriority pack{cap};
68 const u32 min_core = pack.minimum_core_id;
69 const u32 max_core = pack.maximum_core_id;
70 const u32 max_prio = pack.lowest_thread_priority;
71 const u32 min_prio = pack.highest_thread_priority;
72
73 R_UNLESS(min_core <= max_core, ResultInvalidCombination);
74 R_UNLESS(min_prio <= max_prio, ResultInvalidCombination);
75 R_UNLESS(max_core < Core::Hardware::NumVirtualCores, ResultInvalidCoreId);
76
77 ASSERT(max_prio < Common::BitSize<u64>());
78
79 // Set core mask.
80 for (auto core_id = min_core; core_id <= max_core; core_id++) {
81 m_core_mask |= (1ULL << core_id);
82 }
83 ASSERT((m_core_mask & Core::Hardware::VirtualCoreMask) == m_core_mask);
84
85 // Set physical core mask.
86 m_phys_core_mask = Core::Hardware::ConvertVirtualCoreMaskToPhysical(m_core_mask);
87
88 // Set priority mask.
89 for (auto prio = min_prio; prio <= max_prio; prio++) {
90 m_priority_mask |= (1ULL << prio);
91 }
92
93 // We must have some core/priority we can use.
94 R_UNLESS(m_core_mask != 0, ResultInvalidArgument);
95 R_UNLESS(m_priority_mask != 0, ResultInvalidArgument);
96
97 // Processes must not have access to kernel thread priorities.
98 R_UNLESS((m_priority_mask & 0xF) == 0, ResultInvalidArgument);
99
100 R_SUCCEED();
101}
102
103Result KCapabilities::SetSyscallMaskCapability(const u32 cap, u32& set_svc) {
104 // Validate the index.
105 SyscallMask pack{cap};
106 const u32 mask = pack.mask;
107 const u32 index = pack.index;
108
109 const u32 index_flag = (1U << index);
110 R_UNLESS((set_svc & index_flag) == 0, ResultInvalidCombination);
111 set_svc |= index_flag;
112
113 // Set SVCs.
114 for (size_t i = 0; i < decltype(SyscallMask::mask)::bits; i++) {
115 const u32 svc_id = static_cast<u32>(decltype(SyscallMask::mask)::bits * index + i);
116 if (mask & (1U << i)) {
117 R_UNLESS(this->SetSvcAllowed(svc_id), ResultOutOfRange);
118 }
119 }
120
121 R_SUCCEED();
122}
123
124Result KCapabilities::MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table) {
125 const auto range_pack = MapRange{cap};
126 const auto size_pack = MapRangeSize{size_cap};
127
128 // Get/validate address/size
129 const u64 phys_addr = range_pack.address.Value() * PageSize;
130
131 // Validate reserved bits are unused.
132 R_UNLESS(size_pack.reserved.Value() == 0, ResultOutOfRange);
133
134 const size_t num_pages = size_pack.pages;
135 const size_t size = num_pages * PageSize;
136 R_UNLESS(num_pages != 0, ResultInvalidSize);
137 R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress);
138 R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress);
139
140 // Do the mapping.
141 [[maybe_unused]] const KMemoryPermission perm = range_pack.read_only.Value()
142 ? KMemoryPermission::UserRead
143 : KMemoryPermission::UserReadWrite;
144 if (MapRangeSize{size_cap}.normal) {
145 // R_RETURN(page_table->MapStatic(phys_addr, size, perm));
146 } else {
147 // R_RETURN(page_table->MapIo(phys_addr, size, perm));
148 }
149
150 UNIMPLEMENTED();
151 R_SUCCEED();
152}
153
154Result KCapabilities::MapIoPage_(const u32 cap, KPageTable* page_table) {
155 // Get/validate address/size
156 const u64 phys_addr = MapIoPage{cap}.address.Value() * PageSize;
157 const size_t num_pages = 1;
158 const size_t size = num_pages * PageSize;
159 R_UNLESS(num_pages != 0, ResultInvalidSize);
160 R_UNLESS(phys_addr < phys_addr + size, ResultInvalidAddress);
161 R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, ResultInvalidAddress);
162
163 // Do the mapping.
164 // R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission_UserReadWrite));
165
166 UNIMPLEMENTED();
167 R_SUCCEED();
168}
169
170template <typename F>
171Result KCapabilities::ProcessMapRegionCapability(const u32 cap, F f) {
172 // Define the allowed memory regions.
173 constexpr std::array<KMemoryRegionType, 4> MemoryRegions{
174 KMemoryRegionType_None,
175 KMemoryRegionType_KernelTraceBuffer,
176 KMemoryRegionType_OnMemoryBootImage,
177 KMemoryRegionType_DTB,
178 };
179
180 // Extract regions/read only.
181 const MapRegion pack{cap};
182 const std::array<RegionType, 3> types{pack.region0, pack.region1, pack.region2};
183 const std::array<u32, 3> ro{pack.read_only0, pack.read_only1, pack.read_only2};
184
185 for (size_t i = 0; i < types.size(); i++) {
186 const auto type = types[i];
187 const auto perm = ro[i] ? KMemoryPermission::UserRead : KMemoryPermission::UserReadWrite;
188 switch (type) {
189 case RegionType::NoMapping:
190 break;
191 case RegionType::KernelTraceBuffer:
192 case RegionType::OnMemoryBootImage:
193 case RegionType::DTB:
194 R_TRY(f(MemoryRegions[static_cast<u32>(type)], perm));
195 break;
196 default:
197 R_THROW(ResultNotFound);
198 }
199 }
200
201 R_SUCCEED();
202}
203
204Result KCapabilities::MapRegion_(const u32 cap, KPageTable* page_table) {
205 // Map each region into the process's page table.
206 R_RETURN(ProcessMapRegionCapability(
207 cap, [](KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
208 // R_RETURN(page_table->MapRegion(region_type, perm));
209 UNIMPLEMENTED();
210 R_SUCCEED();
211 }));
212}
213
214Result KCapabilities::CheckMapRegion(KernelCore& kernel, const u32 cap) {
215 // Check that each region has a physical backing store.
216 R_RETURN(ProcessMapRegionCapability(
217 cap, [&](KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
218 R_UNLESS(kernel.MemoryLayout().GetPhysicalMemoryRegionTree().FindFirstDerived(
219 region_type) != nullptr,
220 ResultOutOfRange);
221 R_SUCCEED();
222 }));
223}
224
225Result KCapabilities::SetInterruptPairCapability(const u32 cap) {
226 // Extract interrupts.
227 const InterruptPair pack{cap};
228 const std::array<u32, 2> ids{pack.interrupt_id0, pack.interrupt_id1};
229
230 for (size_t i = 0; i < ids.size(); i++) {
231 if (ids[i] != PaddingInterruptId) {
232 UNIMPLEMENTED();
233 // R_UNLESS(Kernel::GetInterruptManager().IsInterruptDefined(ids[i]), ResultOutOfRange);
234 // R_UNLESS(this->SetInterruptPermitted(ids[i]), ResultOutOfRange);
235 }
236 }
237
238 R_SUCCEED();
239}
240
241Result KCapabilities::SetProgramTypeCapability(const u32 cap) {
242 // Validate.
243 const ProgramType pack{cap};
244 R_UNLESS(pack.reserved == 0, ResultReservedUsed);
245
246 m_program_type = pack.type;
247 R_SUCCEED();
248}
249
250Result KCapabilities::SetKernelVersionCapability(const u32 cap) {
251 // Ensure we haven't set our version before.
252 R_UNLESS(KernelVersion{m_intended_kernel_version}.major_version == 0, ResultInvalidArgument);
253
254 // Set, ensure that we set a valid version.
255 m_intended_kernel_version = cap;
256 R_UNLESS(KernelVersion{m_intended_kernel_version}.major_version != 0, ResultInvalidArgument);
257
258 R_SUCCEED();
259}
260
261Result KCapabilities::SetHandleTableCapability(const u32 cap) {
262 // Validate.
263 const HandleTable pack{cap};
264 R_UNLESS(pack.reserved == 0, ResultReservedUsed);
265
266 m_handle_table_size = pack.size;
267 R_SUCCEED();
268}
269
270Result KCapabilities::SetDebugFlagsCapability(const u32 cap) {
271 // Validate.
272 const DebugFlags pack{cap};
273 R_UNLESS(pack.reserved == 0, ResultReservedUsed);
274
275 DebugFlags debug_capabilities{m_debug_capabilities};
276 debug_capabilities.allow_debug.Assign(pack.allow_debug);
277 debug_capabilities.force_debug.Assign(pack.force_debug);
278 m_debug_capabilities = debug_capabilities.raw;
279
280 R_SUCCEED();
281}
282
283Result KCapabilities::SetCapability(const u32 cap, u32& set_flags, u32& set_svc,
284 KPageTable* page_table) {
285 // Validate this is a capability we can act on.
286 const auto type = GetCapabilityType(cap);
287 R_UNLESS(type != CapabilityType::Invalid, ResultInvalidArgument);
288
289 // If the type is padding, we have no work to do.
290 R_SUCCEED_IF(type == CapabilityType::Padding);
291
292 // Check that we haven't already processed this capability.
293 const auto flag = GetCapabilityFlag(type);
294 R_UNLESS(((set_flags & InitializeOnceFlags) & flag) == 0, ResultInvalidCombination);
295 set_flags |= flag;
296
297 // Process the capability.
298 switch (type) {
299 case CapabilityType::CorePriority:
300 R_RETURN(this->SetCorePriorityCapability(cap));
301 case CapabilityType::SyscallMask:
302 R_RETURN(this->SetSyscallMaskCapability(cap, set_svc));
303 case CapabilityType::MapIoPage:
304 R_RETURN(this->MapIoPage_(cap, page_table));
305 case CapabilityType::MapRegion:
306 R_RETURN(this->MapRegion_(cap, page_table));
307 case CapabilityType::InterruptPair:
308 R_RETURN(this->SetInterruptPairCapability(cap));
309 case CapabilityType::ProgramType:
310 R_RETURN(this->SetProgramTypeCapability(cap));
311 case CapabilityType::KernelVersion:
312 R_RETURN(this->SetKernelVersionCapability(cap));
313 case CapabilityType::HandleTable:
314 R_RETURN(this->SetHandleTableCapability(cap));
315 case CapabilityType::DebugFlags:
316 R_RETURN(this->SetDebugFlagsCapability(cap));
317 default:
318 R_THROW(ResultInvalidArgument);
319 }
320}
321
322Result KCapabilities::SetCapabilities(std::span<const u32> caps, KPageTable* page_table) {
323 u32 set_flags = 0, set_svc = 0;
324
325 for (size_t i = 0; i < caps.size(); i++) {
326 const u32 cap{caps[i]};
327
328 if (GetCapabilityType(cap) == CapabilityType::MapRange) {
329 // Check that the pair cap exists.
330 R_UNLESS((++i) < caps.size(), ResultInvalidCombination);
331
332 // Check the pair cap is a map range cap.
333 const u32 size_cap{caps[i]};
334 R_UNLESS(GetCapabilityType(size_cap) == CapabilityType::MapRange,
335 ResultInvalidCombination);
336
337 // Map the range.
338 R_TRY(this->MapRange_(cap, size_cap, page_table));
339 } else {
340 R_TRY(this->SetCapability(cap, set_flags, set_svc, page_table));
341 }
342 }
343
344 R_SUCCEED();
345}
346
347Result KCapabilities::CheckCapabilities(KernelCore& kernel, std::span<const u32> caps) {
348 for (auto cap : caps) {
349 // Check the capability refers to a valid region.
350 if (GetCapabilityType(cap) == CapabilityType::MapRegion) {
351 R_TRY(CheckMapRegion(kernel, cap));
352 }
353 }
354
355 R_SUCCEED();
356}
357
358} // namespace Kernel
diff --git a/src/core/hle/kernel/k_capabilities.h b/src/core/hle/kernel/k_capabilities.h
new file mode 100644
index 000000000..cd96f8d23
--- /dev/null
+++ b/src/core/hle/kernel/k_capabilities.h
@@ -0,0 +1,295 @@
1
2// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
3// SPDX-License-Identifier: GPL-2.0-or-later
4
5#pragma once
6
7#include <bitset>
8#include <span>
9
10#include "common/bit_field.h"
11#include "common/common_types.h"
12
13#include "core/hle/kernel/svc_types.h"
14#include "core/hle/result.h"
15
16namespace Kernel {
17
18class KPageTable;
19class KernelCore;
20
21class KCapabilities {
22public:
23 constexpr explicit KCapabilities() = default;
24
25 Result InitializeForKIP(std::span<const u32> kern_caps, KPageTable* page_table);
26 Result InitializeForUser(std::span<const u32> user_caps, KPageTable* page_table);
27
28 static Result CheckCapabilities(KernelCore& kernel, std::span<const u32> user_caps);
29
30 constexpr u64 GetCoreMask() const {
31 return m_core_mask;
32 }
33
34 constexpr u64 GetPhysicalCoreMask() const {
35 return m_phys_core_mask;
36 }
37
38 constexpr u64 GetPriorityMask() const {
39 return m_priority_mask;
40 }
41
42 constexpr s32 GetHandleTableSize() const {
43 return m_handle_table_size;
44 }
45
46 constexpr const Svc::SvcAccessFlagSet& GetSvcPermissions() const {
47 return m_svc_access_flags;
48 }
49
50 constexpr bool IsPermittedSvc(u32 id) const {
51 return (id < m_svc_access_flags.size()) && m_svc_access_flags[id];
52 }
53
54 constexpr bool IsPermittedInterrupt(u32 id) const {
55 return (id < m_irq_access_flags.size()) && m_irq_access_flags[id];
56 }
57
58 constexpr bool IsPermittedDebug() const {
59 return DebugFlags{m_debug_capabilities}.allow_debug.Value() != 0;
60 }
61
62 constexpr bool CanForceDebug() const {
63 return DebugFlags{m_debug_capabilities}.force_debug.Value() != 0;
64 }
65
66 constexpr u32 GetIntendedKernelMajorVersion() const {
67 return KernelVersion{m_intended_kernel_version}.major_version;
68 }
69
70 constexpr u32 GetIntendedKernelMinorVersion() const {
71 return KernelVersion{m_intended_kernel_version}.minor_version;
72 }
73
74private:
75 static constexpr size_t InterruptIdCount = 0x400;
76 using InterruptFlagSet = std::bitset<InterruptIdCount>;
77
78 enum class CapabilityType : u32 {
79 CorePriority = (1U << 3) - 1,
80 SyscallMask = (1U << 4) - 1,
81 MapRange = (1U << 6) - 1,
82 MapIoPage = (1U << 7) - 1,
83 MapRegion = (1U << 10) - 1,
84 InterruptPair = (1U << 11) - 1,
85 ProgramType = (1U << 13) - 1,
86 KernelVersion = (1U << 14) - 1,
87 HandleTable = (1U << 15) - 1,
88 DebugFlags = (1U << 16) - 1,
89
90 Invalid = 0U,
91 Padding = ~0U,
92 };
93
94 using RawCapabilityValue = u32;
95
96 static constexpr CapabilityType GetCapabilityType(const RawCapabilityValue value) {
97 return static_cast<CapabilityType>((~value & (value + 1)) - 1);
98 }
99
100 static constexpr u32 GetCapabilityFlag(CapabilityType type) {
101 return static_cast<u32>(type) + 1;
102 }
103
104 template <CapabilityType Type>
105 static constexpr inline u32 CapabilityFlag = static_cast<u32>(Type) + 1;
106
107 template <CapabilityType Type>
108 static constexpr inline u32 CapabilityId = std::countr_zero(CapabilityFlag<Type>);
109
110 union CorePriority {
111 static_assert(CapabilityId<CapabilityType::CorePriority> + 1 == 4);
112
113 RawCapabilityValue raw;
114 BitField<0, 4, CapabilityType> id;
115 BitField<4, 6, u32> lowest_thread_priority;
116 BitField<10, 6, u32> highest_thread_priority;
117 BitField<16, 8, u32> minimum_core_id;
118 BitField<24, 8, u32> maximum_core_id;
119 };
120
121 union SyscallMask {
122 static_assert(CapabilityId<CapabilityType::SyscallMask> + 1 == 5);
123
124 RawCapabilityValue raw;
125 BitField<0, 5, CapabilityType> id;
126 BitField<5, 24, u32> mask;
127 BitField<29, 3, u32> index;
128 };
129
130 // #undef MESOSPHERE_ENABLE_LARGE_PHYSICAL_ADDRESS_CAPABILITIES
131 static constexpr u64 PhysicalMapAllowedMask = (1ULL << 36) - 1;
132
133 union MapRange {
134 static_assert(CapabilityId<CapabilityType::MapRange> + 1 == 7);
135
136 RawCapabilityValue raw;
137 BitField<0, 7, CapabilityType> id;
138 BitField<7, 24, u32> address;
139 BitField<31, 1, u32> read_only;
140 };
141
142 union MapRangeSize {
143 static_assert(CapabilityId<CapabilityType::MapRange> + 1 == 7);
144
145 RawCapabilityValue raw;
146 BitField<0, 7, CapabilityType> id;
147 BitField<7, 20, u32> pages;
148 BitField<27, 4, u32> reserved;
149 BitField<31, 1, u32> normal;
150 };
151
152 union MapIoPage {
153 static_assert(CapabilityId<CapabilityType::MapIoPage> + 1 == 8);
154
155 RawCapabilityValue raw;
156 BitField<0, 8, CapabilityType> id;
157 BitField<8, 24, u32> address;
158 };
159
160 enum class RegionType : u32 {
161 NoMapping = 0,
162 KernelTraceBuffer = 1,
163 OnMemoryBootImage = 2,
164 DTB = 3,
165 };
166
167 union MapRegion {
168 static_assert(CapabilityId<CapabilityType::MapRegion> + 1 == 11);
169
170 RawCapabilityValue raw;
171 BitField<0, 11, CapabilityType> id;
172 BitField<11, 6, RegionType> region0;
173 BitField<17, 1, u32> read_only0;
174 BitField<18, 6, RegionType> region1;
175 BitField<24, 1, u32> read_only1;
176 BitField<25, 6, RegionType> region2;
177 BitField<31, 1, u32> read_only2;
178 };
179
180 union InterruptPair {
181 static_assert(CapabilityId<CapabilityType::InterruptPair> + 1 == 12);
182
183 RawCapabilityValue raw;
184 BitField<0, 12, CapabilityType> id;
185 BitField<12, 10, u32> interrupt_id0;
186 BitField<22, 10, u32> interrupt_id1;
187 };
188
189 union ProgramType {
190 static_assert(CapabilityId<CapabilityType::ProgramType> + 1 == 14);
191
192 RawCapabilityValue raw;
193 BitField<0, 14, CapabilityType> id;
194 BitField<14, 3, u32> type;
195 BitField<17, 15, u32> reserved;
196 };
197
198 union KernelVersion {
199 static_assert(CapabilityId<CapabilityType::KernelVersion> + 1 == 15);
200
201 RawCapabilityValue raw;
202 BitField<0, 15, CapabilityType> id;
203 BitField<15, 4, u32> major_version;
204 BitField<19, 13, u32> minor_version;
205 };
206
207 union HandleTable {
208 static_assert(CapabilityId<CapabilityType::HandleTable> + 1 == 16);
209
210 RawCapabilityValue raw;
211 BitField<0, 16, CapabilityType> id;
212 BitField<16, 10, u32> size;
213 BitField<26, 6, u32> reserved;
214 };
215
216 union DebugFlags {
217 static_assert(CapabilityId<CapabilityType::DebugFlags> + 1 == 17);
218
219 RawCapabilityValue raw;
220 BitField<0, 17, CapabilityType> id;
221 BitField<17, 1, u32> allow_debug;
222 BitField<18, 1, u32> force_debug;
223 BitField<19, 13, u32> reserved;
224 };
225
226 static_assert(sizeof(CorePriority) == 4);
227 static_assert(sizeof(SyscallMask) == 4);
228 static_assert(sizeof(MapRange) == 4);
229 static_assert(sizeof(MapRangeSize) == 4);
230 static_assert(sizeof(MapIoPage) == 4);
231 static_assert(sizeof(MapRegion) == 4);
232 static_assert(sizeof(InterruptPair) == 4);
233 static_assert(sizeof(ProgramType) == 4);
234 static_assert(sizeof(KernelVersion) == 4);
235 static_assert(sizeof(HandleTable) == 4);
236 static_assert(sizeof(DebugFlags) == 4);
237
238 static constexpr u32 InitializeOnceFlags =
239 CapabilityFlag<CapabilityType::CorePriority> | CapabilityFlag<CapabilityType::ProgramType> |
240 CapabilityFlag<CapabilityType::KernelVersion> |
241 CapabilityFlag<CapabilityType::HandleTable> | CapabilityFlag<CapabilityType::DebugFlags>;
242
243 static const u32 PaddingInterruptId = 0x3FF;
244 static_assert(PaddingInterruptId < InterruptIdCount);
245
246private:
247 constexpr bool SetSvcAllowed(u32 id) {
248 if (id < m_svc_access_flags.size()) [[likely]] {
249 m_svc_access_flags[id] = true;
250 return true;
251 } else {
252 return false;
253 }
254 }
255
256 constexpr bool SetInterruptPermitted(u32 id) {
257 if (id < m_irq_access_flags.size()) [[likely]] {
258 m_irq_access_flags[id] = true;
259 return true;
260 } else {
261 return false;
262 }
263 }
264
265 Result SetCorePriorityCapability(const u32 cap);
266 Result SetSyscallMaskCapability(const u32 cap, u32& set_svc);
267 Result MapRange_(const u32 cap, const u32 size_cap, KPageTable* page_table);
268 Result MapIoPage_(const u32 cap, KPageTable* page_table);
269 Result MapRegion_(const u32 cap, KPageTable* page_table);
270 Result SetInterruptPairCapability(const u32 cap);
271 Result SetProgramTypeCapability(const u32 cap);
272 Result SetKernelVersionCapability(const u32 cap);
273 Result SetHandleTableCapability(const u32 cap);
274 Result SetDebugFlagsCapability(const u32 cap);
275
276 template <typename F>
277 static Result ProcessMapRegionCapability(const u32 cap, F f);
278 static Result CheckMapRegion(KernelCore& kernel, const u32 cap);
279
280 Result SetCapability(const u32 cap, u32& set_flags, u32& set_svc, KPageTable* page_table);
281 Result SetCapabilities(std::span<const u32> caps, KPageTable* page_table);
282
283private:
284 Svc::SvcAccessFlagSet m_svc_access_flags{};
285 InterruptFlagSet m_irq_access_flags{};
286 u64 m_core_mask{};
287 u64 m_phys_core_mask{};
288 u64 m_priority_mask{};
289 u32 m_debug_capabilities{};
290 s32 m_handle_table_size{};
291 u32 m_intended_kernel_version{};
292 u32 m_program_type{};
293};
294
295} // namespace Kernel
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 33eebcef6..9c2f9998a 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -3,6 +3,8 @@
3 3
4#pragma once 4#pragma once
5 5
6#include <bitset>
7
6#include "common/common_funcs.h" 8#include "common/common_funcs.h"
7#include "common/common_types.h" 9#include "common/common_types.h"
8 10
@@ -592,4 +594,7 @@ struct CreateProcessParameter {
592}; 594};
593static_assert(sizeof(CreateProcessParameter) == 0x30); 595static_assert(sizeof(CreateProcessParameter) == 0x30);
594 596
597constexpr size_t NumSupervisorCalls = 0xC0;
598using SvcAccessFlagSet = std::bitset<NumSupervisorCalls>;
599
595} // namespace Kernel::Svc 600} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_version.h b/src/core/hle/kernel/svc_version.h
new file mode 100644
index 000000000..e4f47b34b
--- /dev/null
+++ b/src/core/hle/kernel/svc_version.h
@@ -0,0 +1,58 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include "common/bit_field.h"
7#include "common/common_types.h"
8#include "common/literals.h"
9
10namespace Kernel::Svc {
11
12constexpr inline u32 ConvertToSvcMajorVersion(u32 sdk) {
13 return sdk + 4;
14}
15constexpr inline u32 ConvertToSdkMajorVersion(u32 svc) {
16 return svc - 4;
17}
18
19constexpr inline u32 ConvertToSvcMinorVersion(u32 sdk) {
20 return sdk;
21}
22constexpr inline u32 ConvertToSdkMinorVersion(u32 svc) {
23 return svc;
24}
25
26union KernelVersion {
27 u32 value;
28 BitField<0, 4, u32> minor_version;
29 BitField<4, 13, u32> major_version;
30};
31
32constexpr inline u32 EncodeKernelVersion(u32 major, u32 minor) {
33 return decltype(KernelVersion::minor_version)::FormatValue(minor) |
34 decltype(KernelVersion::major_version)::FormatValue(major);
35}
36
37constexpr inline u32 GetKernelMajorVersion(u32 encoded) {
38 return std::bit_cast<decltype(KernelVersion::major_version)>(encoded).Value();
39}
40
41constexpr inline u32 GetKernelMinorVersion(u32 encoded) {
42 return std::bit_cast<decltype(KernelVersion::minor_version)>(encoded).Value();
43}
44
45// Nintendo doesn't support programs targeting SVC versions < 3.0.
46constexpr inline u32 RequiredKernelMajorVersion = 3;
47constexpr inline u32 RequiredKernelMinorVersion = 0;
48constexpr inline u32 RequiredKernelVersion =
49 EncodeKernelVersion(RequiredKernelMajorVersion, RequiredKernelMinorVersion);
50
51// This is the highest SVC version supported, to be updated on new kernel releases.
52// NOTE: Official kernel versions have SVC major = SDK major + 4, SVC minor = SDK minor.
53constexpr inline u32 SupportedKernelMajorVersion = ConvertToSvcMajorVersion(15);
54constexpr inline u32 SupportedKernelMinorVersion = ConvertToSvcMinorVersion(3);
55constexpr inline u32 SupportedKernelVersion =
56 EncodeKernelVersion(SupportedKernelMajorVersion, SupportedKernelMinorVersion);
57
58} // namespace Kernel::Svc