summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/board
diff options
context:
space:
mode:
authorGravatar Liam2023-03-06 21:04:12 -0500
committerGravatar Liam2023-03-12 22:06:53 -0400
commit0483dfae1a7db26dfe310b4119120a9b411d2244 (patch)
treebff15f2a7deff5bde641c8ce197473dd2d2917ed /src/core/hle/kernel/board
parentkernel: remove KLinkedList (diff)
downloadyuzu-0483dfae1a7db26dfe310b4119120a9b411d2244.tar.gz
yuzu-0483dfae1a7db26dfe310b4119120a9b411d2244.tar.xz
yuzu-0483dfae1a7db26dfe310b4119120a9b411d2244.zip
kernel: move KMemoryLayout for NX board
Diffstat (limited to 'src/core/hle/kernel/board')
-rw-r--r--src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp201
1 files changed, 201 insertions, 0 deletions
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp
new file mode 100644
index 000000000..098ba6eac
--- /dev/null
+++ b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.cpp
@@ -0,0 +1,201 @@
1// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/alignment.h"
5#include "common/literals.h"
6#include "core/hle/kernel/k_memory_layout.h"
7#include "core/hle/kernel/k_memory_manager.h"
8#include "core/hle/kernel/k_system_control.h"
9#include "core/hle/kernel/k_trace.h"
10
11namespace Kernel {
12
13namespace {
14
15using namespace Common::Literals;
16
17constexpr size_t CarveoutAlignment = 0x20000;
18constexpr size_t CarveoutSizeMax = (512_MiB) - CarveoutAlignment;
19
20bool SetupPowerManagementControllerMemoryRegion(KMemoryLayout& memory_layout) {
21 // Above firmware 2.0.0, the PMC is not mappable.
22 return memory_layout.GetPhysicalMemoryRegionTree().Insert(
23 0x7000E000, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap) &&
24 memory_layout.GetPhysicalMemoryRegionTree().Insert(
25 0x7000E400, 0xC00,
26 KMemoryRegionType_PowerManagementController | KMemoryRegionAttr_NoUserMap);
27}
28
29void InsertPoolPartitionRegionIntoBothTrees(KMemoryLayout& memory_layout, size_t start, size_t size,
30 KMemoryRegionType phys_type,
31 KMemoryRegionType virt_type, u32& cur_attr) {
32 const u32 attr = cur_attr++;
33 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(start, size,
34 static_cast<u32>(phys_type), attr));
35 const KMemoryRegion* phys = memory_layout.GetPhysicalMemoryRegionTree().FindByTypeAndAttribute(
36 static_cast<u32>(phys_type), attr);
37 ASSERT(phys != nullptr);
38 ASSERT(phys->GetEndAddress() != 0);
39 ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(phys->GetPairAddress(), size,
40 static_cast<u32>(virt_type), attr));
41}
42
43} // namespace
44
45namespace Init {
46
47void SetupDevicePhysicalMemoryRegions(KMemoryLayout& memory_layout) {
48 ASSERT(SetupPowerManagementControllerMemoryRegion(memory_layout));
49 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
50 0x70019000, 0x1000, KMemoryRegionType_MemoryController | KMemoryRegionAttr_NoUserMap));
51 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
52 0x7001C000, 0x1000, KMemoryRegionType_MemoryController0 | KMemoryRegionAttr_NoUserMap));
53 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
54 0x7001D000, 0x1000, KMemoryRegionType_MemoryController1 | KMemoryRegionAttr_NoUserMap));
55 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
56 0x50040000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
57 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
58 0x50041000, 0x1000,
59 KMemoryRegionType_InterruptDistributor | KMemoryRegionAttr_ShouldKernelMap));
60 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
61 0x50042000, 0x1000,
62 KMemoryRegionType_InterruptCpuInterface | KMemoryRegionAttr_ShouldKernelMap));
63 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
64 0x50043000, 0x1D000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
65
66 // Map IRAM unconditionally, to support debug-logging-to-iram build config.
67 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
68 0x40000000, 0x40000, KMemoryRegionType_LegacyLpsIram | KMemoryRegionAttr_ShouldKernelMap));
69
70 // Above firmware 2.0.0, prevent mapping the bpmp exception vectors or the ipatch region.
71 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
72 0x6000F000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
73 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
74 0x6001DC00, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap));
75}
76
77void SetupDramPhysicalMemoryRegions(KMemoryLayout& memory_layout) {
78 const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize();
79 const PAddr physical_memory_base_address =
80 KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress);
81
82 // Insert blocks into the tree.
83 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
84 physical_memory_base_address, intended_memory_size, KMemoryRegionType_Dram));
85 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
86 physical_memory_base_address, ReservedEarlyDramSize, KMemoryRegionType_DramReservedEarly));
87
88 // Insert the KTrace block at the end of Dram, if KTrace is enabled.
89 static_assert(!IsKTraceEnabled || KTraceBufferSize > 0);
90 if constexpr (IsKTraceEnabled) {
91 const PAddr ktrace_buffer_phys_addr =
92 physical_memory_base_address + intended_memory_size - KTraceBufferSize;
93 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
94 ktrace_buffer_phys_addr, KTraceBufferSize, KMemoryRegionType_KernelTraceBuffer));
95 }
96}
97
98void SetupPoolPartitionMemoryRegions(KMemoryLayout& memory_layout) {
99 // Start by identifying the extents of the DRAM memory region.
100 const auto dram_extents = memory_layout.GetMainMemoryPhysicalExtents();
101 ASSERT(dram_extents.GetEndAddress() != 0);
102
103 // Determine the end of the pool region.
104 const u64 pool_end = dram_extents.GetEndAddress() - KTraceBufferSize;
105
106 // Find the start of the kernel DRAM region.
107 const KMemoryRegion* kernel_dram_region =
108 memory_layout.GetPhysicalMemoryRegionTree().FindFirstDerived(
109 KMemoryRegionType_DramKernelBase);
110 ASSERT(kernel_dram_region != nullptr);
111
112 const u64 kernel_dram_start = kernel_dram_region->GetAddress();
113 ASSERT(Common::IsAligned(kernel_dram_start, CarveoutAlignment));
114
115 // Find the start of the pool partitions region.
116 const KMemoryRegion* pool_partitions_region =
117 memory_layout.GetPhysicalMemoryRegionTree().FindByTypeAndAttribute(
118 KMemoryRegionType_DramPoolPartition, 0);
119 ASSERT(pool_partitions_region != nullptr);
120 const u64 pool_partitions_start = pool_partitions_region->GetAddress();
121
122 // Setup the pool partition layouts.
123 // On 5.0.0+, setup modern 4-pool-partition layout.
124
125 // Get Application and Applet pool sizes.
126 const size_t application_pool_size = KSystemControl::Init::GetApplicationPoolSize();
127 const size_t applet_pool_size = KSystemControl::Init::GetAppletPoolSize();
128 const size_t unsafe_system_pool_min_size =
129 KSystemControl::Init::GetMinimumNonSecureSystemPoolSize();
130
131 // Decide on starting addresses for our pools.
132 const u64 application_pool_start = pool_end - application_pool_size;
133 const u64 applet_pool_start = application_pool_start - applet_pool_size;
134 const u64 unsafe_system_pool_start = std::min(
135 kernel_dram_start + CarveoutSizeMax,
136 Common::AlignDown(applet_pool_start - unsafe_system_pool_min_size, CarveoutAlignment));
137 const size_t unsafe_system_pool_size = applet_pool_start - unsafe_system_pool_start;
138
139 // We want to arrange application pool depending on where the middle of dram is.
140 const u64 dram_midpoint = (dram_extents.GetAddress() + dram_extents.GetEndAddress()) / 2;
141 u32 cur_pool_attr = 0;
142 size_t total_overhead_size = 0;
143 if (dram_extents.GetEndAddress() <= dram_midpoint || dram_midpoint <= application_pool_start) {
144 InsertPoolPartitionRegionIntoBothTrees(
145 memory_layout, application_pool_start, application_pool_size,
146 KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool,
147 cur_pool_attr);
148 total_overhead_size +=
149 KMemoryManager::CalculateManagementOverheadSize(application_pool_size);
150 } else {
151 const size_t first_application_pool_size = dram_midpoint - application_pool_start;
152 const size_t second_application_pool_size =
153 application_pool_start + application_pool_size - dram_midpoint;
154 InsertPoolPartitionRegionIntoBothTrees(
155 memory_layout, application_pool_start, first_application_pool_size,
156 KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool,
157 cur_pool_attr);
158 InsertPoolPartitionRegionIntoBothTrees(
159 memory_layout, dram_midpoint, second_application_pool_size,
160 KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool,
161 cur_pool_attr);
162 total_overhead_size +=
163 KMemoryManager::CalculateManagementOverheadSize(first_application_pool_size);
164 total_overhead_size +=
165 KMemoryManager::CalculateManagementOverheadSize(second_application_pool_size);
166 }
167
168 // Insert the applet pool.
169 InsertPoolPartitionRegionIntoBothTrees(memory_layout, applet_pool_start, applet_pool_size,
170 KMemoryRegionType_DramAppletPool,
171 KMemoryRegionType_VirtualDramAppletPool, cur_pool_attr);
172 total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(applet_pool_size);
173
174 // Insert the nonsecure system pool.
175 InsertPoolPartitionRegionIntoBothTrees(
176 memory_layout, unsafe_system_pool_start, unsafe_system_pool_size,
177 KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool,
178 cur_pool_attr);
179 total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(unsafe_system_pool_size);
180
181 // Insert the pool management region.
182 total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(
183 (unsafe_system_pool_start - pool_partitions_start) - total_overhead_size);
184 const u64 pool_management_start = unsafe_system_pool_start - total_overhead_size;
185 const size_t pool_management_size = total_overhead_size;
186 u32 pool_management_attr = 0;
187 InsertPoolPartitionRegionIntoBothTrees(
188 memory_layout, pool_management_start, pool_management_size,
189 KMemoryRegionType_DramPoolManagement, KMemoryRegionType_VirtualDramPoolManagement,
190 pool_management_attr);
191
192 // Insert the system pool.
193 const u64 system_pool_size = pool_management_start - pool_partitions_start;
194 InsertPoolPartitionRegionIntoBothTrees(memory_layout, pool_partitions_start, system_pool_size,
195 KMemoryRegionType_DramSystemPool,
196 KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
197}
198
199} // namespace Init
200
201} // namespace Kernel