diff options
| author | 2021-03-20 00:53:00 -0700 | |
|---|---|---|
| committer | 2021-03-21 14:45:13 -0700 | |
| commit | 343eaecd388dc688cea18d754ad2e10f741d0a2e (patch) | |
| tree | d46c1989ed2b8032ab5f1bbaa88c1f75786b02cd /src | |
| parent | common: common_sizes: Move Invalid to Size_* prefix and add missing values. (diff) | |
| download | yuzu-343eaecd388dc688cea18d754ad2e10f741d0a2e.tar.gz yuzu-343eaecd388dc688cea18d754ad2e10f741d0a2e.tar.xz yuzu-343eaecd388dc688cea18d754ad2e10f741d0a2e.zip | |
hle: kernel: k_memory_layout: Derive memory regions based on board layout.
Diffstat (limited to 'src')
| -rw-r--r-- | src/core/CMakeLists.txt | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp | 199 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_memory_layout.cpp | 183 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_memory_layout.h | 384 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 319 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.h | 2 |
6 files changed, 1033 insertions, 56 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index b09938de9..0c1f5b0c8 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -172,6 +172,8 @@ add_library(core STATIC | |||
| 172 | hle/kernel/k_memory_block.h | 172 | hle/kernel/k_memory_block.h |
| 173 | hle/kernel/k_memory_block_manager.cpp | 173 | hle/kernel/k_memory_block_manager.cpp |
| 174 | hle/kernel/k_memory_block_manager.h | 174 | hle/kernel/k_memory_block_manager.h |
| 175 | hle/kernel/k_memory_layout.cpp | ||
| 176 | hle/kernel/k_memory_layout.board.nintendo_nx.cpp | ||
| 175 | hle/kernel/k_memory_layout.h | 177 | hle/kernel/k_memory_layout.h |
| 176 | hle/kernel/k_memory_manager.cpp | 178 | hle/kernel/k_memory_manager.cpp |
| 177 | hle/kernel/k_memory_manager.h | 179 | hle/kernel/k_memory_manager.h |
diff --git a/src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp b/src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp new file mode 100644 index 000000000..a78551291 --- /dev/null +++ b/src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp | |||
| @@ -0,0 +1,199 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "common/alignment.h" | ||
| 6 | #include "core/hle/kernel/k_memory_layout.h" | ||
| 7 | #include "core/hle/kernel/k_memory_manager.h" | ||
| 8 | #include "core/hle/kernel/k_system_control.h" | ||
| 9 | #include "core/hle/kernel/k_trace.h" | ||
| 10 | |||
| 11 | namespace Kernel { | ||
| 12 | |||
| 13 | namespace { | ||
| 14 | |||
| 15 | constexpr size_t CarveoutAlignment = 0x20000; | ||
| 16 | constexpr size_t CarveoutSizeMax = (512ULL * 1024 * 1024) - CarveoutAlignment; | ||
| 17 | |||
| 18 | bool SetupPowerManagementControllerMemoryRegion(KMemoryLayout& memory_layout) { | ||
| 19 | // Above firmware 2.0.0, the PMC is not mappable. | ||
| 20 | return memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 21 | 0x7000E000, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap) && | ||
| 22 | memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 23 | 0x7000E400, 0xC00, | ||
| 24 | KMemoryRegionType_PowerManagementController | KMemoryRegionAttr_NoUserMap); | ||
| 25 | } | ||
| 26 | |||
| 27 | void InsertPoolPartitionRegionIntoBothTrees(KMemoryLayout& memory_layout, size_t start, size_t size, | ||
| 28 | KMemoryRegionType phys_type, | ||
| 29 | KMemoryRegionType virt_type, u32& cur_attr) { | ||
| 30 | const u32 attr = cur_attr++; | ||
| 31 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(start, size, | ||
| 32 | static_cast<u32>(phys_type), attr)); | ||
| 33 | const KMemoryRegion* phys = memory_layout.GetPhysicalMemoryRegionTree().FindByTypeAndAttribute( | ||
| 34 | static_cast<u32>(phys_type), attr); | ||
| 35 | ASSERT(phys != nullptr); | ||
| 36 | ASSERT(phys->GetEndAddress() != 0); | ||
| 37 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(phys->GetPairAddress(), size, | ||
| 38 | static_cast<u32>(virt_type), attr)); | ||
| 39 | } | ||
| 40 | |||
| 41 | } // namespace | ||
| 42 | |||
| 43 | namespace Init { | ||
| 44 | |||
| 45 | void SetupDevicePhysicalMemoryRegions(KMemoryLayout& memory_layout) { | ||
| 46 | ASSERT(SetupPowerManagementControllerMemoryRegion(memory_layout)); | ||
| 47 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 48 | 0x70019000, 0x1000, KMemoryRegionType_MemoryController | KMemoryRegionAttr_NoUserMap)); | ||
| 49 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 50 | 0x7001C000, 0x1000, KMemoryRegionType_MemoryController0 | KMemoryRegionAttr_NoUserMap)); | ||
| 51 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 52 | 0x7001D000, 0x1000, KMemoryRegionType_MemoryController1 | KMemoryRegionAttr_NoUserMap)); | ||
| 53 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 54 | 0x50040000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); | ||
| 55 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 56 | 0x50041000, 0x1000, | ||
| 57 | KMemoryRegionType_InterruptDistributor | KMemoryRegionAttr_ShouldKernelMap)); | ||
| 58 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 59 | 0x50042000, 0x1000, | ||
| 60 | KMemoryRegionType_InterruptCpuInterface | KMemoryRegionAttr_ShouldKernelMap)); | ||
| 61 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 62 | 0x50043000, 0x1D000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); | ||
| 63 | |||
| 64 | // Map IRAM unconditionally, to support debug-logging-to-iram build config. | ||
| 65 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 66 | 0x40000000, 0x40000, KMemoryRegionType_LegacyLpsIram | KMemoryRegionAttr_ShouldKernelMap)); | ||
| 67 | |||
| 68 | // Above firmware 2.0.0, prevent mapping the bpmp exception vectors or the ipatch region. | ||
| 69 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 70 | 0x6000F000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); | ||
| 71 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 72 | 0x6001DC00, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); | ||
| 73 | } | ||
| 74 | |||
| 75 | void SetupDramPhysicalMemoryRegions(KMemoryLayout& memory_layout) { | ||
| 76 | const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize(); | ||
| 77 | const PAddr physical_memory_base_address = | ||
| 78 | KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress); | ||
| 79 | |||
| 80 | // Insert blocks into the tree. | ||
| 81 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 82 | physical_memory_base_address, intended_memory_size, KMemoryRegionType_Dram)); | ||
| 83 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 84 | physical_memory_base_address, ReservedEarlyDramSize, KMemoryRegionType_DramReservedEarly)); | ||
| 85 | |||
| 86 | // Insert the KTrace block at the end of Dram, if KTrace is enabled. | ||
| 87 | static_assert(!IsKTraceEnabled || KTraceBufferSize > 0); | ||
| 88 | if constexpr (IsKTraceEnabled) { | ||
| 89 | const PAddr ktrace_buffer_phys_addr = | ||
| 90 | physical_memory_base_address + intended_memory_size - KTraceBufferSize; | ||
| 91 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 92 | ktrace_buffer_phys_addr, KTraceBufferSize, KMemoryRegionType_KernelTraceBuffer)); | ||
| 93 | } | ||
| 94 | } | ||
| 95 | |||
| 96 | void SetupPoolPartitionMemoryRegions(KMemoryLayout& memory_layout) { | ||
| 97 | // Start by identifying the extents of the DRAM memory region. | ||
| 98 | const auto dram_extents = memory_layout.GetMainMemoryPhysicalExtents(); | ||
| 99 | ASSERT(dram_extents.GetEndAddress() != 0); | ||
| 100 | |||
| 101 | // Determine the end of the pool region. | ||
| 102 | const u64 pool_end = dram_extents.GetEndAddress() - KTraceBufferSize; | ||
| 103 | |||
| 104 | // Find the start of the kernel DRAM region. | ||
| 105 | const KMemoryRegion* kernel_dram_region = | ||
| 106 | memory_layout.GetPhysicalMemoryRegionTree().FindFirstDerived( | ||
| 107 | KMemoryRegionType_DramKernelBase); | ||
| 108 | ASSERT(kernel_dram_region != nullptr); | ||
| 109 | |||
| 110 | const u64 kernel_dram_start = kernel_dram_region->GetAddress(); | ||
| 111 | ASSERT(Common::IsAligned(kernel_dram_start, CarveoutAlignment)); | ||
| 112 | |||
| 113 | // Find the start of the pool partitions region. | ||
| 114 | const KMemoryRegion* pool_partitions_region = | ||
| 115 | memory_layout.GetPhysicalMemoryRegionTree().FindByTypeAndAttribute( | ||
| 116 | KMemoryRegionType_DramPoolPartition, 0); | ||
| 117 | ASSERT(pool_partitions_region != nullptr); | ||
| 118 | const u64 pool_partitions_start = pool_partitions_region->GetAddress(); | ||
| 119 | |||
| 120 | // Setup the pool partition layouts. | ||
| 121 | // On 5.0.0+, setup modern 4-pool-partition layout. | ||
| 122 | |||
| 123 | // Get Application and Applet pool sizes. | ||
| 124 | const size_t application_pool_size = KSystemControl::Init::GetApplicationPoolSize(); | ||
| 125 | const size_t applet_pool_size = KSystemControl::Init::GetAppletPoolSize(); | ||
| 126 | const size_t unsafe_system_pool_min_size = | ||
| 127 | KSystemControl::Init::GetMinimumNonSecureSystemPoolSize(); | ||
| 128 | |||
| 129 | // Decide on starting addresses for our pools. | ||
| 130 | const u64 application_pool_start = pool_end - application_pool_size; | ||
| 131 | const u64 applet_pool_start = application_pool_start - applet_pool_size; | ||
| 132 | const u64 unsafe_system_pool_start = std::min( | ||
| 133 | kernel_dram_start + CarveoutSizeMax, | ||
| 134 | Common::AlignDown(applet_pool_start - unsafe_system_pool_min_size, CarveoutAlignment)); | ||
| 135 | const size_t unsafe_system_pool_size = applet_pool_start - unsafe_system_pool_start; | ||
| 136 | |||
| 137 | // We want to arrange application pool depending on where the middle of dram is. | ||
| 138 | const u64 dram_midpoint = (dram_extents.GetAddress() + dram_extents.GetEndAddress()) / 2; | ||
| 139 | u32 cur_pool_attr = 0; | ||
| 140 | size_t total_overhead_size = 0; | ||
| 141 | if (dram_extents.GetEndAddress() <= dram_midpoint || dram_midpoint <= application_pool_start) { | ||
| 142 | InsertPoolPartitionRegionIntoBothTrees( | ||
| 143 | memory_layout, application_pool_start, application_pool_size, | ||
| 144 | KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, | ||
| 145 | cur_pool_attr); | ||
| 146 | total_overhead_size += | ||
| 147 | KMemoryManager::CalculateManagementOverheadSize(application_pool_size); | ||
| 148 | } else { | ||
| 149 | const size_t first_application_pool_size = dram_midpoint - application_pool_start; | ||
| 150 | const size_t second_application_pool_size = | ||
| 151 | application_pool_start + application_pool_size - dram_midpoint; | ||
| 152 | InsertPoolPartitionRegionIntoBothTrees( | ||
| 153 | memory_layout, application_pool_start, first_application_pool_size, | ||
| 154 | KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, | ||
| 155 | cur_pool_attr); | ||
| 156 | InsertPoolPartitionRegionIntoBothTrees( | ||
| 157 | memory_layout, dram_midpoint, second_application_pool_size, | ||
| 158 | KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, | ||
| 159 | cur_pool_attr); | ||
| 160 | total_overhead_size += | ||
| 161 | KMemoryManager::CalculateManagementOverheadSize(first_application_pool_size); | ||
| 162 | total_overhead_size += | ||
| 163 | KMemoryManager::CalculateManagementOverheadSize(second_application_pool_size); | ||
| 164 | } | ||
| 165 | |||
| 166 | // Insert the applet pool. | ||
| 167 | InsertPoolPartitionRegionIntoBothTrees(memory_layout, applet_pool_start, applet_pool_size, | ||
| 168 | KMemoryRegionType_DramAppletPool, | ||
| 169 | KMemoryRegionType_VirtualDramAppletPool, cur_pool_attr); | ||
| 170 | total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(applet_pool_size); | ||
| 171 | |||
| 172 | // Insert the nonsecure system pool. | ||
| 173 | InsertPoolPartitionRegionIntoBothTrees( | ||
| 174 | memory_layout, unsafe_system_pool_start, unsafe_system_pool_size, | ||
| 175 | KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool, | ||
| 176 | cur_pool_attr); | ||
| 177 | total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(unsafe_system_pool_size); | ||
| 178 | |||
| 179 | // Insert the pool management region. | ||
| 180 | total_overhead_size += KMemoryManager::CalculateManagementOverheadSize( | ||
| 181 | (unsafe_system_pool_start - pool_partitions_start) - total_overhead_size); | ||
| 182 | const u64 pool_management_start = unsafe_system_pool_start - total_overhead_size; | ||
| 183 | const size_t pool_management_size = total_overhead_size; | ||
| 184 | u32 pool_management_attr = 0; | ||
| 185 | InsertPoolPartitionRegionIntoBothTrees( | ||
| 186 | memory_layout, pool_management_start, pool_management_size, | ||
| 187 | KMemoryRegionType_DramPoolManagement, KMemoryRegionType_VirtualDramPoolManagement, | ||
| 188 | pool_management_attr); | ||
| 189 | |||
| 190 | // Insert the system pool. | ||
| 191 | const u64 system_pool_size = pool_management_start - pool_partitions_start; | ||
| 192 | InsertPoolPartitionRegionIntoBothTrees(memory_layout, pool_partitions_start, system_pool_size, | ||
| 193 | KMemoryRegionType_DramSystemPool, | ||
| 194 | KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr); | ||
| 195 | } | ||
| 196 | |||
| 197 | } // namespace Init | ||
| 198 | |||
| 199 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_memory_layout.cpp b/src/core/hle/kernel/k_memory_layout.cpp new file mode 100644 index 000000000..58fe4a133 --- /dev/null +++ b/src/core/hle/kernel/k_memory_layout.cpp | |||
| @@ -0,0 +1,183 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "common/alignment.h" | ||
| 6 | #include "core/hle/kernel/k_memory_layout.h" | ||
| 7 | #include "core/hle/kernel/k_system_control.h" | ||
| 8 | |||
| 9 | namespace Kernel { | ||
| 10 | |||
| 11 | namespace { | ||
| 12 | |||
| 13 | class KMemoryRegionAllocator final : NonCopyable { | ||
| 14 | public: | ||
| 15 | static constexpr size_t MaxMemoryRegions = 200; | ||
| 16 | |||
| 17 | private: | ||
| 18 | KMemoryRegion region_heap[MaxMemoryRegions]{}; | ||
| 19 | size_t num_regions{}; | ||
| 20 | |||
| 21 | public: | ||
| 22 | constexpr KMemoryRegionAllocator() = default; | ||
| 23 | |||
| 24 | public: | ||
| 25 | template <typename... Args> | ||
| 26 | KMemoryRegion* Allocate(Args&&... args) { | ||
| 27 | // Ensure we stay within the bounds of our heap. | ||
| 28 | ASSERT(this->num_regions < MaxMemoryRegions); | ||
| 29 | |||
| 30 | // Create the new region. | ||
| 31 | KMemoryRegion* region = std::addressof(this->region_heap[this->num_regions++]); | ||
| 32 | new (region) KMemoryRegion(std::forward<Args>(args)...); | ||
| 33 | |||
| 34 | return region; | ||
| 35 | } | ||
| 36 | }; | ||
| 37 | |||
| 38 | KMemoryRegionAllocator g_memory_region_allocator; | ||
| 39 | |||
| 40 | template <typename... Args> | ||
| 41 | KMemoryRegion* AllocateRegion(Args&&... args) { | ||
| 42 | return g_memory_region_allocator.Allocate(std::forward<Args>(args)...); | ||
| 43 | } | ||
| 44 | |||
| 45 | } // namespace | ||
| 46 | |||
| 47 | void KMemoryRegionTree::InsertDirectly(u64 address, u64 last_address, u32 attr, u32 type_id) { | ||
| 48 | this->insert(*AllocateRegion(address, last_address, attr, type_id)); | ||
| 49 | } | ||
| 50 | |||
| 51 | bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) { | ||
| 52 | // Locate the memory region that contains the address. | ||
| 53 | KMemoryRegion* found = this->FindModifiable(address); | ||
| 54 | |||
| 55 | // We require that the old attr is correct. | ||
| 56 | if (found->GetAttributes() != old_attr) { | ||
| 57 | return false; | ||
| 58 | } | ||
| 59 | |||
| 60 | // We further require that the region can be split from the old region. | ||
| 61 | const u64 inserted_region_end = address + size; | ||
| 62 | const u64 inserted_region_last = inserted_region_end - 1; | ||
| 63 | if (found->GetLastAddress() < inserted_region_last) { | ||
| 64 | return false; | ||
| 65 | } | ||
| 66 | |||
| 67 | // Further, we require that the type id is a valid transformation. | ||
| 68 | if (!found->CanDerive(type_id)) { | ||
| 69 | return false; | ||
| 70 | } | ||
| 71 | |||
| 72 | // Cache information from the region before we remove it. | ||
| 73 | const u64 old_address = found->GetAddress(); | ||
| 74 | const u64 old_last = found->GetLastAddress(); | ||
| 75 | const u64 old_pair = found->GetPairAddress(); | ||
| 76 | const u32 old_type = found->GetType(); | ||
| 77 | |||
| 78 | // Erase the existing region from the tree. | ||
| 79 | this->erase(this->iterator_to(*found)); | ||
| 80 | |||
| 81 | // Insert the new region into the tree. | ||
| 82 | if (old_address == address) { | ||
| 83 | // Reuse the old object for the new region, if we can. | ||
| 84 | found->Reset(address, inserted_region_last, old_pair, new_attr, type_id); | ||
| 85 | this->insert(*found); | ||
| 86 | } else { | ||
| 87 | // If we can't re-use, adjust the old region. | ||
| 88 | found->Reset(old_address, address - 1, old_pair, old_attr, old_type); | ||
| 89 | this->insert(*found); | ||
| 90 | |||
| 91 | // Insert a new region for the split. | ||
| 92 | const u64 new_pair = (old_pair != std::numeric_limits<u64>::max()) | ||
| 93 | ? old_pair + (address - old_address) | ||
| 94 | : old_pair; | ||
| 95 | this->insert(*AllocateRegion(address, inserted_region_last, new_pair, new_attr, type_id)); | ||
| 96 | } | ||
| 97 | |||
| 98 | // If we need to insert a region after the region, do so. | ||
| 99 | if (old_last != inserted_region_last) { | ||
| 100 | const u64 after_pair = (old_pair != std::numeric_limits<u64>::max()) | ||
| 101 | ? old_pair + (inserted_region_end - old_address) | ||
| 102 | : old_pair; | ||
| 103 | this->insert( | ||
| 104 | *AllocateRegion(inserted_region_end, old_last, after_pair, old_attr, old_type)); | ||
| 105 | } | ||
| 106 | |||
| 107 | return true; | ||
| 108 | } | ||
| 109 | |||
| 110 | VAddr KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id) { | ||
| 111 | // We want to find the total extents of the type id. | ||
| 112 | const auto extents = this->GetDerivedRegionExtents(static_cast<KMemoryRegionType>(type_id)); | ||
| 113 | |||
| 114 | // Ensure that our alignment is correct. | ||
| 115 | ASSERT(Common::IsAligned(extents.GetAddress(), alignment)); | ||
| 116 | |||
| 117 | const u64 first_address = extents.GetAddress(); | ||
| 118 | const u64 last_address = extents.GetLastAddress(); | ||
| 119 | |||
| 120 | const u64 first_index = first_address / alignment; | ||
| 121 | const u64 last_index = last_address / alignment; | ||
| 122 | |||
| 123 | while (true) { | ||
| 124 | const u64 candidate = | ||
| 125 | KSystemControl::GenerateRandomRange(first_index, last_index) * alignment; | ||
| 126 | |||
| 127 | // Ensure that the candidate doesn't overflow with the size. | ||
| 128 | if (!(candidate < candidate + size)) { | ||
| 129 | continue; | ||
| 130 | } | ||
| 131 | |||
| 132 | const u64 candidate_last = candidate + size - 1; | ||
| 133 | |||
| 134 | // Ensure that the candidate fits within the region. | ||
| 135 | if (candidate_last > last_address) { | ||
| 136 | continue; | ||
| 137 | } | ||
| 138 | |||
| 139 | // Locate the candidate region, and ensure it fits and has the correct type id. | ||
| 140 | if (const auto& candidate_region = *this->Find(candidate); | ||
| 141 | !(candidate_last <= candidate_region.GetLastAddress() && | ||
| 142 | candidate_region.GetType() == type_id)) { | ||
| 143 | continue; | ||
| 144 | } | ||
| 145 | |||
| 146 | return candidate; | ||
| 147 | } | ||
| 148 | } | ||
| 149 | |||
| 150 | void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, | ||
| 151 | VAddr linear_virtual_start) { | ||
| 152 | // Set static differences. | ||
| 153 | linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start; | ||
| 154 | linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start; | ||
| 155 | |||
| 156 | // Initialize linear trees. | ||
| 157 | for (auto& region : GetPhysicalMemoryRegionTree()) { | ||
| 158 | if (region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) { | ||
| 159 | GetPhysicalLinearMemoryRegionTree().InsertDirectly( | ||
| 160 | region.GetAddress(), region.GetLastAddress(), region.GetAttributes(), | ||
| 161 | region.GetType()); | ||
| 162 | } | ||
| 163 | } | ||
| 164 | |||
| 165 | for (auto& region : GetVirtualMemoryRegionTree()) { | ||
| 166 | if (region.IsDerivedFrom(KMemoryRegionType_Dram)) { | ||
| 167 | GetVirtualLinearMemoryRegionTree().InsertDirectly( | ||
| 168 | region.GetAddress(), region.GetLastAddress(), region.GetAttributes(), | ||
| 169 | region.GetType()); | ||
| 170 | } | ||
| 171 | } | ||
| 172 | } | ||
| 173 | |||
| 174 | size_t KMemoryLayout::GetResourceRegionSizeForInit() { | ||
| 175 | // Calculate resource region size based on whether we allow extra threads. | ||
| 176 | const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit(); | ||
| 177 | size_t resource_region_size = | ||
| 178 | KernelResourceSize + (use_extra_resources ? KernelSlabHeapAdditionalSize : 0); | ||
| 179 | |||
| 180 | return resource_region_size; | ||
| 181 | } | ||
| 182 | |||
| 183 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index a76ffa02e..f2b46c932 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h | |||
| @@ -1,24 +1,67 @@ | |||
| 1 | // Copyright 2020 yuzu Emulator Project | 1 | // Copyright 2021 yuzu Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include "common/alignment.h" | ||
| 8 | #include "common/common_sizes.h" | ||
| 7 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 8 | #include "core/device_memory.h" | 10 | #include "core/device_memory.h" |
| 9 | #include "core/hle/kernel/k_memory_region.h" | 11 | #include "core/hle/kernel/k_memory_region.h" |
| 12 | #include "core/hle/kernel/k_memory_region_type.h" | ||
| 13 | #include "core/hle/kernel/memory_types.h" | ||
| 10 | 14 | ||
| 11 | namespace Kernel { | 15 | namespace Kernel { |
| 12 | 16 | ||
| 13 | constexpr std::size_t KernelAslrAlignment = 2 * 1024 * 1024; | 17 | constexpr std::size_t L1BlockSize = Size_1_GB; |
| 18 | constexpr std::size_t L2BlockSize = Size_2_MB; | ||
| 19 | |||
| 20 | constexpr std::size_t GetMaximumOverheadSize(std::size_t size) { | ||
| 21 | return (Common::DivideUp(size, L1BlockSize) + Common::DivideUp(size, L2BlockSize)) * PageSize; | ||
| 22 | } | ||
| 23 | |||
| 24 | constexpr std::size_t MainMemorySize = Size_4_GB; | ||
| 25 | constexpr std::size_t MainMemorySizeMax = Size_8_GB; | ||
| 26 | |||
| 27 | constexpr std::size_t ReservedEarlyDramSize = 0x60000; | ||
| 28 | constexpr std::size_t DramPhysicalAddress = 0x80000000; | ||
| 29 | |||
| 30 | constexpr std::size_t KernelAslrAlignment = Size_2_MB; | ||
| 14 | constexpr std::size_t KernelVirtualAddressSpaceWidth = 1ULL << 39; | 31 | constexpr std::size_t KernelVirtualAddressSpaceWidth = 1ULL << 39; |
| 15 | constexpr std::size_t KernelPhysicalAddressSpaceWidth = 1ULL << 48; | 32 | constexpr std::size_t KernelPhysicalAddressSpaceWidth = 1ULL << 48; |
| 33 | |||
| 16 | constexpr std::size_t KernelVirtualAddressSpaceBase = 0ULL - KernelVirtualAddressSpaceWidth; | 34 | constexpr std::size_t KernelVirtualAddressSpaceBase = 0ULL - KernelVirtualAddressSpaceWidth; |
| 17 | constexpr std::size_t KernelVirtualAddressSpaceEnd = | 35 | constexpr std::size_t KernelVirtualAddressSpaceEnd = |
| 18 | KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment); | 36 | KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment); |
| 19 | constexpr std::size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1; | 37 | constexpr std::size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1ULL; |
| 20 | constexpr std::size_t KernelVirtualAddressSpaceSize = | 38 | constexpr std::size_t KernelVirtualAddressSpaceSize = |
| 21 | KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase; | 39 | KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase; |
| 40 | constexpr std::size_t KernelVirtualAddressCodeBase = KernelVirtualAddressSpaceBase; | ||
| 41 | constexpr std::size_t KernelVirtualAddressCodeSize = 0x62000; | ||
| 42 | constexpr std::size_t KernelVirtualAddressCodeEnd = | ||
| 43 | KernelVirtualAddressCodeBase + KernelVirtualAddressCodeSize; | ||
| 44 | |||
| 45 | constexpr std::size_t KernelPhysicalAddressSpaceBase = 0ULL; | ||
| 46 | constexpr std::size_t KernelPhysicalAddressSpaceEnd = | ||
| 47 | KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceWidth; | ||
| 48 | constexpr std::size_t KernelPhysicalAddressSpaceLast = KernelPhysicalAddressSpaceEnd - 1ULL; | ||
| 49 | constexpr std::size_t KernelPhysicalAddressSpaceSize = | ||
| 50 | KernelPhysicalAddressSpaceEnd - KernelPhysicalAddressSpaceBase; | ||
| 51 | constexpr std::size_t KernelPhysicalAddressCodeBase = DramPhysicalAddress + ReservedEarlyDramSize; | ||
| 52 | |||
| 53 | constexpr std::size_t KernelPageTableHeapSize = GetMaximumOverheadSize(MainMemorySizeMax); | ||
| 54 | constexpr std::size_t KernelInitialPageHeapSize = Size_128_KB; | ||
| 55 | |||
| 56 | constexpr std::size_t KernelSlabHeapDataSize = Size_5_MB; | ||
| 57 | constexpr std::size_t KernelSlabHeapGapsSize = Size_2_MB - Size_64_KB; | ||
| 58 | constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSize; | ||
| 59 | |||
| 60 | // NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860. | ||
| 61 | constexpr std::size_t KernelSlabHeapAdditionalSize = 0x68000ULL; | ||
| 62 | |||
| 63 | constexpr std::size_t KernelResourceSize = | ||
| 64 | KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize; | ||
| 22 | 65 | ||
| 23 | constexpr bool IsKernelAddressKey(VAddr key) { | 66 | constexpr bool IsKernelAddressKey(VAddr key) { |
| 24 | return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast; | 67 | return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast; |
| @@ -30,41 +73,324 @@ constexpr bool IsKernelAddress(VAddr address) { | |||
| 30 | 73 | ||
| 31 | class KMemoryLayout final { | 74 | class KMemoryLayout final { |
| 32 | public: | 75 | public: |
| 33 | constexpr const KMemoryRegion& Application() const { | 76 | KMemoryLayout() = default; |
| 34 | return application; | 77 | |
| 78 | KMemoryRegionTree& GetVirtualMemoryRegionTree() { | ||
| 79 | return virtual_tree; | ||
| 80 | } | ||
| 81 | const KMemoryRegionTree& GetVirtualMemoryRegionTree() const { | ||
| 82 | return virtual_tree; | ||
| 83 | } | ||
| 84 | KMemoryRegionTree& GetPhysicalMemoryRegionTree() { | ||
| 85 | return physical_tree; | ||
| 86 | } | ||
| 87 | const KMemoryRegionTree& GetPhysicalMemoryRegionTree() const { | ||
| 88 | return physical_tree; | ||
| 89 | } | ||
| 90 | KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() { | ||
| 91 | return virtual_linear_tree; | ||
| 92 | } | ||
| 93 | const KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() const { | ||
| 94 | return virtual_linear_tree; | ||
| 95 | } | ||
| 96 | KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() { | ||
| 97 | return physical_linear_tree; | ||
| 98 | } | ||
| 99 | const KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() const { | ||
| 100 | return physical_linear_tree; | ||
| 35 | } | 101 | } |
| 36 | 102 | ||
| 37 | constexpr const KMemoryRegion& Applet() const { | 103 | VAddr GetLinearVirtualAddress(PAddr address) const { |
| 38 | return applet; | 104 | return address + linear_phys_to_virt_diff; |
| 105 | } | ||
| 106 | PAddr GetLinearPhysicalAddress(VAddr address) const { | ||
| 107 | return address + linear_virt_to_phys_diff; | ||
| 39 | } | 108 | } |
| 40 | 109 | ||
| 41 | constexpr const KMemoryRegion& System() const { | 110 | const KMemoryRegion* FindVirtual(VAddr address) const { |
| 42 | return system; | 111 | return Find(address, GetVirtualMemoryRegionTree()); |
| 112 | } | ||
| 113 | const KMemoryRegion* FindPhysical(PAddr address) const { | ||
| 114 | return Find(address, GetPhysicalMemoryRegionTree()); | ||
| 43 | } | 115 | } |
| 44 | 116 | ||
| 45 | static constexpr KMemoryLayout GetDefaultLayout() { | 117 | const KMemoryRegion* FindVirtualLinear(VAddr address) const { |
| 46 | constexpr std::size_t application_size{0xcd500000}; | 118 | return Find(address, GetVirtualLinearMemoryRegionTree()); |
| 47 | constexpr std::size_t applet_size{0x1fb00000}; | 119 | } |
| 48 | constexpr PAddr application_start_address{Core::DramMemoryMap::End - application_size}; | 120 | const KMemoryRegion* FindPhysicalLinear(PAddr address) const { |
| 49 | constexpr PAddr application_end_address{Core::DramMemoryMap::End}; | 121 | return Find(address, GetPhysicalLinearMemoryRegionTree()); |
| 50 | constexpr PAddr applet_start_address{application_start_address - applet_size}; | 122 | } |
| 51 | constexpr PAddr applet_end_address{applet_start_address + applet_size}; | 123 | |
| 52 | constexpr PAddr system_start_address{Core::DramMemoryMap::SlabHeapEnd}; | 124 | VAddr GetMainStackTopAddress(s32 core_id) const { |
| 53 | constexpr PAddr system_end_address{applet_start_address}; | 125 | return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscMainStack); |
| 54 | return {application_start_address, application_end_address, applet_start_address, | 126 | } |
| 55 | applet_end_address, system_start_address, system_end_address}; | 127 | VAddr GetIdleStackTopAddress(s32 core_id) const { |
| 128 | return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscIdleStack); | ||
| 129 | } | ||
| 130 | VAddr GetExceptionStackTopAddress(s32 core_id) const { | ||
| 131 | return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack); | ||
| 132 | } | ||
| 133 | |||
| 134 | VAddr GetSlabRegionAddress() const { | ||
| 135 | return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab)) | ||
| 136 | .GetAddress(); | ||
| 137 | } | ||
| 138 | |||
| 139 | const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const { | ||
| 140 | return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type)); | ||
| 141 | } | ||
| 142 | PAddr GetDevicePhysicalAddress(KMemoryRegionType type) const { | ||
| 143 | return GetDeviceRegion(type).GetAddress(); | ||
| 144 | } | ||
| 145 | VAddr GetDeviceVirtualAddress(KMemoryRegionType type) const { | ||
| 146 | return GetDeviceRegion(type).GetPairAddress(); | ||
| 147 | } | ||
| 148 | |||
| 149 | const KMemoryRegion& GetPoolManagementRegion() const { | ||
| 150 | return Dereference( | ||
| 151 | GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramPoolManagement)); | ||
| 152 | } | ||
| 153 | const KMemoryRegion& GetPageTableHeapRegion() const { | ||
| 154 | return Dereference( | ||
| 155 | GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelPtHeap)); | ||
| 156 | } | ||
| 157 | const KMemoryRegion& GetKernelStackRegion() const { | ||
| 158 | return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelStack)); | ||
| 159 | } | ||
| 160 | const KMemoryRegion& GetTempRegion() const { | ||
| 161 | return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelTemp)); | ||
| 162 | } | ||
| 163 | |||
| 164 | const KMemoryRegion& GetKernelTraceBufferRegion() const { | ||
| 165 | return Dereference(GetVirtualLinearMemoryRegionTree().FindByType( | ||
| 166 | KMemoryRegionType_VirtualDramKernelTraceBuffer)); | ||
| 167 | } | ||
| 168 | |||
| 169 | const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const { | ||
| 170 | return Dereference(FindVirtualLinear(address)); | ||
| 171 | } | ||
| 172 | |||
| 173 | const KMemoryRegion* GetPhysicalKernelTraceBufferRegion() const { | ||
| 174 | return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_KernelTraceBuffer); | ||
| 175 | } | ||
| 176 | const KMemoryRegion* GetPhysicalOnMemoryBootImageRegion() const { | ||
| 177 | return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_OnMemoryBootImage); | ||
| 178 | } | ||
| 179 | const KMemoryRegion* GetPhysicalDTBRegion() const { | ||
| 180 | return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DTB); | ||
| 181 | } | ||
| 182 | |||
| 183 | bool IsHeapPhysicalAddress(const KMemoryRegion*& region, PAddr address) const { | ||
| 184 | return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(), | ||
| 185 | KMemoryRegionType_DramUserPool); | ||
| 186 | } | ||
| 187 | bool IsHeapVirtualAddress(const KMemoryRegion*& region, VAddr address) const { | ||
| 188 | return IsTypedAddress(region, address, GetVirtualLinearMemoryRegionTree(), | ||
| 189 | KMemoryRegionType_VirtualDramUserPool); | ||
| 190 | } | ||
| 191 | |||
| 192 | bool IsHeapPhysicalAddress(const KMemoryRegion*& region, PAddr address, size_t size) const { | ||
| 193 | return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(), | ||
| 194 | KMemoryRegionType_DramUserPool); | ||
| 195 | } | ||
| 196 | bool IsHeapVirtualAddress(const KMemoryRegion*& region, VAddr address, size_t size) const { | ||
| 197 | return IsTypedAddress(region, address, size, GetVirtualLinearMemoryRegionTree(), | ||
| 198 | KMemoryRegionType_VirtualDramUserPool); | ||
| 199 | } | ||
| 200 | |||
| 201 | bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, PAddr address) const { | ||
| 202 | return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(), | ||
| 203 | static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped)); | ||
| 204 | } | ||
| 205 | bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, PAddr address, | ||
| 206 | size_t size) const { | ||
| 207 | return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(), | ||
| 208 | static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped)); | ||
| 209 | } | ||
| 210 | |||
| 211 | std::tuple<size_t, size_t> GetTotalAndKernelMemorySizes() const { | ||
| 212 | size_t total_size = 0, kernel_size = 0; | ||
| 213 | for (const auto& region : GetPhysicalMemoryRegionTree()) { | ||
| 214 | if (region.IsDerivedFrom(KMemoryRegionType_Dram)) { | ||
| 215 | total_size += region.GetSize(); | ||
| 216 | if (!region.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { | ||
| 217 | kernel_size += region.GetSize(); | ||
| 218 | } | ||
| 219 | } | ||
| 220 | } | ||
| 221 | return std::make_tuple(total_size, kernel_size); | ||
| 222 | } | ||
| 223 | |||
| 224 | void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, | ||
| 225 | VAddr linear_virtual_start); | ||
| 226 | static size_t GetResourceRegionSizeForInit(); | ||
| 227 | |||
| 228 | auto GetKernelRegionExtents() const { | ||
| 229 | return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel); | ||
| 230 | } | ||
| 231 | auto GetKernelCodeRegionExtents() const { | ||
| 232 | return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelCode); | ||
| 233 | } | ||
| 234 | auto GetKernelStackRegionExtents() const { | ||
| 235 | return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelStack); | ||
| 236 | } | ||
| 237 | auto GetKernelMiscRegionExtents() const { | ||
| 238 | return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelMisc); | ||
| 239 | } | ||
| 240 | auto GetKernelSlabRegionExtents() const { | ||
| 241 | return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelSlab); | ||
| 242 | } | ||
| 243 | |||
| 244 | auto GetLinearRegionPhysicalExtents() const { | ||
| 245 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 246 | KMemoryRegionAttr_LinearMapped); | ||
| 247 | } | ||
| 248 | |||
| 249 | auto GetLinearRegionVirtualExtents() const { | ||
| 250 | const auto physical = GetLinearRegionPhysicalExtents(); | ||
| 251 | return KMemoryRegion(GetLinearVirtualAddress(physical.GetAddress()), | ||
| 252 | GetLinearVirtualAddress(physical.GetLastAddress()), 0, | ||
| 253 | KMemoryRegionType_None); | ||
| 254 | } | ||
| 255 | |||
| 256 | auto GetMainMemoryPhysicalExtents() const { | ||
| 257 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Dram); | ||
| 258 | } | ||
| 259 | auto GetCarveoutRegionExtents() const { | ||
| 260 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 261 | KMemoryRegionAttr_CarveoutProtected); | ||
| 262 | } | ||
| 263 | |||
| 264 | auto GetKernelRegionPhysicalExtents() const { | ||
| 265 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 266 | KMemoryRegionType_DramKernelBase); | ||
| 267 | } | ||
| 268 | auto GetKernelCodeRegionPhysicalExtents() const { | ||
| 269 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 270 | KMemoryRegionType_DramKernelCode); | ||
| 271 | } | ||
| 272 | auto GetKernelSlabRegionPhysicalExtents() const { | ||
| 273 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 274 | KMemoryRegionType_DramKernelSlab); | ||
| 275 | } | ||
| 276 | auto GetKernelPageTableHeapRegionPhysicalExtents() const { | ||
| 277 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 278 | KMemoryRegionType_DramKernelPtHeap); | ||
| 279 | } | ||
| 280 | auto GetKernelInitPageTableRegionPhysicalExtents() const { | ||
| 281 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 282 | KMemoryRegionType_DramKernelInitPt); | ||
| 283 | } | ||
| 284 | |||
| 285 | auto GetKernelPoolManagementRegionPhysicalExtents() const { | ||
| 286 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 287 | KMemoryRegionType_DramPoolManagement); | ||
| 288 | } | ||
| 289 | auto GetKernelPoolPartitionRegionPhysicalExtents() const { | ||
| 290 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 291 | KMemoryRegionType_DramPoolPartition); | ||
| 292 | } | ||
| 293 | auto GetKernelSystemPoolRegionPhysicalExtents() const { | ||
| 294 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 295 | KMemoryRegionType_DramSystemPool); | ||
| 296 | } | ||
| 297 | auto GetKernelSystemNonSecurePoolRegionPhysicalExtents() const { | ||
| 298 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 299 | KMemoryRegionType_DramSystemNonSecurePool); | ||
| 300 | } | ||
| 301 | auto GetKernelAppletPoolRegionPhysicalExtents() const { | ||
| 302 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 303 | KMemoryRegionType_DramAppletPool); | ||
| 304 | } | ||
| 305 | auto GetKernelApplicationPoolRegionPhysicalExtents() const { | ||
| 306 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 307 | KMemoryRegionType_DramApplicationPool); | ||
| 308 | } | ||
| 309 | |||
| 310 | auto GetKernelTraceBufferRegionPhysicalExtents() const { | ||
| 311 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 312 | KMemoryRegionType_KernelTraceBuffer); | ||
| 313 | } | ||
| 314 | |||
| 315 | private: | ||
| 316 | template <typename AddressType> | ||
| 317 | static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address, | ||
| 318 | const KMemoryRegionTree& tree, KMemoryRegionType type) { | ||
| 319 | // Check if the cached region already contains the address. | ||
| 320 | if (region != nullptr && region->Contains(address)) { | ||
| 321 | return true; | ||
| 322 | } | ||
| 323 | |||
| 324 | // Find the containing region, and update the cache. | ||
| 325 | if (const KMemoryRegion* found = tree.Find(address); | ||
| 326 | found != nullptr && found->IsDerivedFrom(type)) { | ||
| 327 | region = found; | ||
| 328 | return true; | ||
| 329 | } else { | ||
| 330 | return false; | ||
| 331 | } | ||
| 332 | } | ||
| 333 | |||
| 334 | template <typename AddressType> | ||
| 335 | static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address, size_t size, | ||
| 336 | const KMemoryRegionTree& tree, KMemoryRegionType type) { | ||
| 337 | // Get the end of the checked region. | ||
| 338 | const u64 last_address = address + size - 1; | ||
| 339 | |||
| 340 | // Walk the tree to verify the region is correct. | ||
| 341 | const KMemoryRegion* cur = | ||
| 342 | (region != nullptr && region->Contains(address)) ? region : tree.Find(address); | ||
| 343 | while (cur != nullptr && cur->IsDerivedFrom(type)) { | ||
| 344 | if (last_address <= cur->GetLastAddress()) { | ||
| 345 | region = cur; | ||
| 346 | return true; | ||
| 347 | } | ||
| 348 | |||
| 349 | cur = cur->GetNext(); | ||
| 350 | } | ||
| 351 | return false; | ||
| 352 | } | ||
| 353 | |||
| 354 | template <typename AddressType> | ||
| 355 | static const KMemoryRegion* Find(AddressType address, const KMemoryRegionTree& tree) { | ||
| 356 | return tree.Find(address); | ||
| 357 | } | ||
| 358 | |||
| 359 | static KMemoryRegion& Dereference(KMemoryRegion* region) { | ||
| 360 | ASSERT(region != nullptr); | ||
| 361 | return *region; | ||
| 362 | } | ||
| 363 | |||
| 364 | static const KMemoryRegion& Dereference(const KMemoryRegion* region) { | ||
| 365 | ASSERT(region != nullptr); | ||
| 366 | return *region; | ||
| 367 | } | ||
| 368 | |||
| 369 | VAddr GetStackTopAddress(s32 core_id, KMemoryRegionType type) const { | ||
| 370 | const auto& region = Dereference( | ||
| 371 | GetVirtualMemoryRegionTree().FindByTypeAndAttribute(type, static_cast<u32>(core_id))); | ||
| 372 | ASSERT(region.GetEndAddress() != 0); | ||
| 373 | return region.GetEndAddress(); | ||
| 56 | } | 374 | } |
| 57 | 375 | ||
| 58 | private: | 376 | private: |
| 59 | constexpr KMemoryLayout(PAddr application_start_address, std::size_t application_size, | 377 | u64 linear_phys_to_virt_diff{}; |
| 60 | PAddr applet_start_address, std::size_t applet_size, | 378 | u64 linear_virt_to_phys_diff{}; |
| 61 | PAddr system_start_address, std::size_t system_size) | 379 | KMemoryRegionTree virtual_tree; |
| 62 | : application{application_start_address, application_size}, | 380 | KMemoryRegionTree physical_tree; |
| 63 | applet{applet_start_address, applet_size}, system{system_start_address, system_size} {} | 381 | KMemoryRegionTree virtual_linear_tree; |
| 64 | 382 | KMemoryRegionTree physical_linear_tree; | |
| 65 | const KMemoryRegion application; | ||
| 66 | const KMemoryRegion applet; | ||
| 67 | const KMemoryRegion system; | ||
| 68 | }; | 383 | }; |
| 69 | 384 | ||
| 385 | namespace Init { | ||
| 386 | |||
| 387 | // These should be generic, regardless of board. | ||
| 388 | void SetupPoolPartitionMemoryRegions(KMemoryLayout& memory_layout); | ||
| 389 | |||
| 390 | // These may be implemented in a board-specific manner. | ||
| 391 | void SetupDevicePhysicalMemoryRegions(KMemoryLayout& memory_layout); | ||
| 392 | void SetupDramPhysicalMemoryRegions(KMemoryLayout& memory_layout); | ||
| 393 | |||
| 394 | } // namespace Init | ||
| 395 | |||
| 70 | } // namespace Kernel | 396 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 48916df17..257d4bb83 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | 1 | // Copyright 2021 yuzu Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <utility> | 12 | #include <utility> |
| 13 | 13 | ||
| 14 | #include "common/assert.h" | 14 | #include "common/assert.h" |
| 15 | #include "common/common_sizes.h" | ||
| 15 | #include "common/logging/log.h" | 16 | #include "common/logging/log.h" |
| 16 | #include "common/microprofile.h" | 17 | #include "common/microprofile.h" |
| 17 | #include "common/thread.h" | 18 | #include "common/thread.h" |
| @@ -269,44 +270,310 @@ struct KernelCore::Impl { | |||
| 269 | } | 270 | } |
| 270 | 271 | ||
| 271 | void InitializeMemoryLayout() { | 272 | void InitializeMemoryLayout() { |
| 272 | // Initialize memory layout | 273 | KMemoryLayout memory_layout; |
| 273 | constexpr KMemoryLayout layout{KMemoryLayout::GetDefaultLayout()}; | 274 | |
| 275 | // Insert the root region for the virtual memory tree, from which all other regions will | ||
| 276 | // derive. | ||
| 277 | memory_layout.GetVirtualMemoryRegionTree().InsertDirectly( | ||
| 278 | KernelVirtualAddressSpaceBase, | ||
| 279 | KernelVirtualAddressSpaceBase + KernelVirtualAddressSpaceSize - 1); | ||
| 280 | |||
| 281 | // Insert the root region for the physical memory tree, from which all other regions will | ||
| 282 | // derive. | ||
| 283 | memory_layout.GetPhysicalMemoryRegionTree().InsertDirectly( | ||
| 284 | KernelPhysicalAddressSpaceBase, | ||
| 285 | KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1); | ||
| 286 | |||
| 287 | // Save start and end for ease of use. | ||
| 288 | const VAddr code_start_virt_addr = KernelVirtualAddressCodeBase; | ||
| 289 | const VAddr code_end_virt_addr = KernelVirtualAddressCodeEnd; | ||
| 290 | |||
| 291 | // Setup the containing kernel region. | ||
| 292 | constexpr size_t KernelRegionSize = Size_1_GB; | ||
| 293 | constexpr size_t KernelRegionAlign = Size_1_GB; | ||
| 294 | constexpr VAddr kernel_region_start = | ||
| 295 | Common::AlignDown(code_start_virt_addr, KernelRegionAlign); | ||
| 296 | size_t kernel_region_size = KernelRegionSize; | ||
| 297 | if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) { | ||
| 298 | kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start; | ||
| 299 | } | ||
| 300 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | ||
| 301 | kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel)); | ||
| 302 | |||
| 303 | // Setup the code region. | ||
| 304 | constexpr size_t CodeRegionAlign = PageSize; | ||
| 305 | constexpr VAddr code_region_start = | ||
| 306 | Common::AlignDown(code_start_virt_addr, CodeRegionAlign); | ||
| 307 | constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign); | ||
| 308 | constexpr size_t code_region_size = code_region_end - code_region_start; | ||
| 309 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | ||
| 310 | code_region_start, code_region_size, KMemoryRegionType_KernelCode)); | ||
| 311 | |||
| 312 | // Setup board-specific device physical regions. | ||
| 313 | Init::SetupDevicePhysicalMemoryRegions(memory_layout); | ||
| 314 | |||
| 315 | // Determine the amount of space needed for the misc region. | ||
| 316 | size_t misc_region_needed_size; | ||
| 317 | { | ||
| 318 | // Each core has a one page stack for all three stack types (Main, Idle, Exception). | ||
| 319 | misc_region_needed_size = Core::Hardware::NUM_CPU_CORES * (3 * (PageSize + PageSize)); | ||
| 320 | |||
| 321 | // Account for each auto-map device. | ||
| 322 | for (const auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { | ||
| 323 | if (region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) { | ||
| 324 | // Check that the region is valid. | ||
| 325 | ASSERT(region.GetEndAddress() != 0); | ||
| 326 | |||
| 327 | // Account for the region. | ||
| 328 | misc_region_needed_size += | ||
| 329 | PageSize + (Common::AlignUp(region.GetLastAddress(), PageSize) - | ||
| 330 | Common::AlignDown(region.GetAddress(), PageSize)); | ||
| 331 | } | ||
| 332 | } | ||
| 333 | |||
| 334 | // Multiply the needed size by three, to account for the need for guard space. | ||
| 335 | misc_region_needed_size *= 3; | ||
| 336 | } | ||
| 337 | |||
| 338 | // Decide on the actual size for the misc region. | ||
| 339 | constexpr size_t MiscRegionAlign = KernelAslrAlignment; | ||
| 340 | constexpr size_t MiscRegionMinimumSize = Size_32_MB; | ||
| 341 | const size_t misc_region_size = Common::AlignUp( | ||
| 342 | std::max(misc_region_needed_size, MiscRegionMinimumSize), MiscRegionAlign); | ||
| 343 | ASSERT(misc_region_size > 0); | ||
| 344 | |||
| 345 | // Setup the misc region. | ||
| 346 | const VAddr misc_region_start = | ||
| 347 | memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( | ||
| 348 | misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel); | ||
| 349 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | ||
| 350 | misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc)); | ||
| 351 | |||
| 352 | // Setup the stack region. | ||
| 353 | constexpr size_t StackRegionSize = Size_14_MB; | ||
| 354 | constexpr size_t StackRegionAlign = KernelAslrAlignment; | ||
| 355 | const VAddr stack_region_start = | ||
| 356 | memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( | ||
| 357 | StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel); | ||
| 358 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | ||
| 359 | stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack)); | ||
| 360 | |||
| 361 | // Determine the size of the resource region. | ||
| 362 | const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit(); | ||
| 363 | |||
| 364 | // Determine the size of the slab region. | ||
| 365 | const size_t slab_region_size = Common::AlignUp(KernelSlabHeapSize, PageSize); | ||
| 366 | ASSERT(slab_region_size <= resource_region_size); | ||
| 367 | |||
| 368 | // Setup the slab region. | ||
| 369 | const PAddr code_start_phys_addr = KernelPhysicalAddressCodeBase; | ||
| 370 | const PAddr code_end_phys_addr = code_start_phys_addr + code_region_size; | ||
| 371 | const PAddr slab_start_phys_addr = code_end_phys_addr; | ||
| 372 | const PAddr slab_end_phys_addr = slab_start_phys_addr + slab_region_size; | ||
| 373 | constexpr size_t SlabRegionAlign = KernelAslrAlignment; | ||
| 374 | const size_t slab_region_needed_size = | ||
| 375 | Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) - | ||
| 376 | Common::AlignDown(code_end_phys_addr, SlabRegionAlign); | ||
| 377 | const VAddr slab_region_start = | ||
| 378 | memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( | ||
| 379 | slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) + | ||
| 380 | (code_end_phys_addr % SlabRegionAlign); | ||
| 381 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | ||
| 382 | slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab)); | ||
| 383 | |||
| 384 | // Setup the temp region. | ||
| 385 | constexpr size_t TempRegionSize = Size_128_MB; | ||
| 386 | constexpr size_t TempRegionAlign = KernelAslrAlignment; | ||
| 387 | const VAddr temp_region_start = | ||
| 388 | memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( | ||
| 389 | TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel); | ||
| 390 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize, | ||
| 391 | KMemoryRegionType_KernelTemp)); | ||
| 392 | |||
| 393 | // Automatically map in devices that have auto-map attributes. | ||
| 394 | for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { | ||
| 395 | // We only care about kernel regions. | ||
| 396 | if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) { | ||
| 397 | continue; | ||
| 398 | } | ||
| 399 | |||
| 400 | // Check whether we should map the region. | ||
| 401 | if (!region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) { | ||
| 402 | continue; | ||
| 403 | } | ||
| 404 | |||
| 405 | // If this region has already been mapped, no need to consider it. | ||
| 406 | if (region.HasTypeAttribute(KMemoryRegionAttr_DidKernelMap)) { | ||
| 407 | continue; | ||
| 408 | } | ||
| 409 | |||
| 410 | // Check that the region is valid. | ||
| 411 | ASSERT(region.GetEndAddress() != 0); | ||
| 412 | |||
| 413 | // Set the attribute to note we've mapped this region. | ||
| 414 | region.SetTypeAttribute(KMemoryRegionAttr_DidKernelMap); | ||
| 415 | |||
| 416 | // Create a virtual pair region and insert it into the tree. | ||
| 417 | const PAddr map_phys_addr = Common::AlignDown(region.GetAddress(), PageSize); | ||
| 418 | const size_t map_size = | ||
| 419 | Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr; | ||
| 420 | const VAddr map_virt_addr = | ||
| 421 | memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( | ||
| 422 | map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize); | ||
| 423 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | ||
| 424 | map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice)); | ||
| 425 | region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr); | ||
| 426 | } | ||
| 427 | |||
| 428 | Init::SetupDramPhysicalMemoryRegions(memory_layout); | ||
| 429 | |||
| 430 | // Insert a physical region for the kernel code region. | ||
| 431 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 432 | code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode)); | ||
| 433 | |||
| 434 | // Insert a physical region for the kernel slab region. | ||
| 435 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 436 | slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab)); | ||
| 437 | |||
| 438 | // Determine size available for kernel page table heaps, requiring > 8 MB. | ||
| 439 | const PAddr resource_end_phys_addr = slab_start_phys_addr + resource_region_size; | ||
| 440 | const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr; | ||
| 441 | ASSERT(page_table_heap_size / Size_4_MB > 2); | ||
| 442 | |||
| 443 | // Insert a physical region for the kernel page table heap region | ||
| 444 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 445 | slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap)); | ||
| 446 | |||
| 447 | // All DRAM regions that we haven't tagged by this point will be mapped under the linear | ||
| 448 | // mapping. Tag them. | ||
| 449 | for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { | ||
| 450 | if (region.GetType() == KMemoryRegionType_Dram) { | ||
| 451 | // Check that the region is valid. | ||
| 452 | ASSERT(region.GetEndAddress() != 0); | ||
| 453 | |||
| 454 | // Set the linear map attribute. | ||
| 455 | region.SetTypeAttribute(KMemoryRegionAttr_LinearMapped); | ||
| 456 | } | ||
| 457 | } | ||
| 458 | |||
| 459 | // Get the linear region extents. | ||
| 460 | const auto linear_extents = | ||
| 461 | memory_layout.GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 462 | KMemoryRegionAttr_LinearMapped); | ||
| 463 | ASSERT(linear_extents.GetEndAddress() != 0); | ||
| 464 | |||
| 465 | // Setup the linear mapping region. | ||
| 466 | constexpr size_t LinearRegionAlign = Size_1_GB; | ||
| 467 | const PAddr aligned_linear_phys_start = | ||
| 468 | Common::AlignDown(linear_extents.GetAddress(), LinearRegionAlign); | ||
| 469 | const size_t linear_region_size = | ||
| 470 | Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) - | ||
| 471 | aligned_linear_phys_start; | ||
| 472 | const VAddr linear_region_start = | ||
| 473 | memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( | ||
| 474 | linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign); | ||
| 475 | |||
| 476 | const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start; | ||
| 477 | |||
| 478 | // Map and create regions for all the linearly-mapped data. | ||
| 479 | { | ||
| 480 | PAddr cur_phys_addr = 0; | ||
| 481 | u64 cur_size = 0; | ||
| 482 | for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { | ||
| 483 | if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) { | ||
| 484 | continue; | ||
| 485 | } | ||
| 486 | |||
| 487 | ASSERT(region.GetEndAddress() != 0); | ||
| 488 | |||
| 489 | if (cur_size == 0) { | ||
| 490 | cur_phys_addr = region.GetAddress(); | ||
| 491 | cur_size = region.GetSize(); | ||
| 492 | } else if (cur_phys_addr + cur_size == region.GetAddress()) { | ||
| 493 | cur_size += region.GetSize(); | ||
| 494 | } else { | ||
| 495 | const VAddr cur_virt_addr = cur_phys_addr + linear_region_phys_to_virt_diff; | ||
| 496 | cur_phys_addr = region.GetAddress(); | ||
| 497 | cur_size = region.GetSize(); | ||
| 498 | } | ||
| 499 | |||
| 500 | const VAddr region_virt_addr = | ||
| 501 | region.GetAddress() + linear_region_phys_to_virt_diff; | ||
| 502 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | ||
| 503 | region_virt_addr, region.GetSize(), | ||
| 504 | GetTypeForVirtualLinearMapping(region.GetType()))); | ||
| 505 | region.SetPairAddress(region_virt_addr); | ||
| 506 | |||
| 507 | KMemoryRegion* virt_region = | ||
| 508 | memory_layout.GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr); | ||
| 509 | ASSERT(virt_region != nullptr); | ||
| 510 | virt_region->SetPairAddress(region.GetAddress()); | ||
| 511 | } | ||
| 512 | } | ||
| 513 | |||
| 514 | // Insert regions for the initial page table region. | ||
| 515 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 516 | resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt)); | ||
| 517 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | ||
| 518 | resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize, | ||
| 519 | KMemoryRegionType_VirtualDramKernelInitPt)); | ||
| 520 | |||
| 521 | // All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to | ||
| 522 | // some pool partition. Tag them. | ||
| 523 | for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { | ||
| 524 | if (region.GetType() == (KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped)) { | ||
| 525 | region.SetType(KMemoryRegionType_DramPoolPartition); | ||
| 526 | } | ||
| 527 | } | ||
| 528 | |||
| 529 | // Setup all other memory regions needed to arrange the pool partitions. | ||
| 530 | Init::SetupPoolPartitionMemoryRegions(memory_layout); | ||
| 531 | |||
| 532 | // Cache all linear regions in their own trees for faster access, later. | ||
| 533 | memory_layout.InitializeLinearMemoryRegionTrees(aligned_linear_phys_start, | ||
| 534 | linear_region_start); | ||
| 535 | |||
| 536 | const auto system_pool = memory_layout.GetKernelSystemPoolRegionPhysicalExtents(); | ||
| 537 | const auto applet_pool = memory_layout.GetKernelAppletPoolRegionPhysicalExtents(); | ||
| 538 | const auto application_pool = memory_layout.GetKernelApplicationPoolRegionPhysicalExtents(); | ||
| 539 | |||
| 540 | // Initialize memory managers | ||
| 541 | memory_manager = std::make_unique<KMemoryManager>(); | ||
| 542 | memory_manager->InitializeManager(KMemoryManager::Pool::Application, | ||
| 543 | application_pool.GetAddress(), | ||
| 544 | application_pool.GetEndAddress()); | ||
| 545 | memory_manager->InitializeManager(KMemoryManager::Pool::Applet, applet_pool.GetAddress(), | ||
| 546 | applet_pool.GetEndAddress()); | ||
| 547 | memory_manager->InitializeManager(KMemoryManager::Pool::System, system_pool.GetAddress(), | ||
| 548 | system_pool.GetEndAddress()); | ||
| 549 | |||
| 550 | // Setup memory regions for emulated processes | ||
| 551 | // TODO(bunnei): These should not be hardcoded regions initialized within the kernel | ||
| 274 | constexpr std::size_t hid_size{0x40000}; | 552 | constexpr std::size_t hid_size{0x40000}; |
| 275 | constexpr std::size_t font_size{0x1100000}; | 553 | constexpr std::size_t font_size{0x1100000}; |
| 276 | constexpr std::size_t irs_size{0x8000}; | 554 | constexpr std::size_t irs_size{0x8000}; |
| 277 | constexpr std::size_t time_size{0x1000}; | 555 | constexpr std::size_t time_size{0x1000}; |
| 278 | constexpr PAddr hid_addr{layout.System().GetAddress()}; | ||
| 279 | constexpr PAddr font_pa{layout.System().GetAddress() + hid_size}; | ||
| 280 | constexpr PAddr irs_addr{layout.System().GetAddress() + hid_size + font_size}; | ||
| 281 | constexpr PAddr time_addr{layout.System().GetAddress() + hid_size + font_size + irs_size}; | ||
| 282 | 556 | ||
| 283 | // Initialize memory manager | 557 | const PAddr hid_phys_addr{system_pool.GetAddress()}; |
| 284 | memory_manager = std::make_unique<KMemoryManager>(); | 558 | const PAddr font_phys_addr{system_pool.GetAddress() + hid_size}; |
| 285 | memory_manager->InitializeManager(KMemoryManager::Pool::Application, | 559 | const PAddr irs_phys_addr{system_pool.GetAddress() + hid_size + font_size}; |
| 286 | layout.Application().GetAddress(), | 560 | const PAddr time_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size}; |
| 287 | layout.Application().GetLastAddress()); | ||
| 288 | memory_manager->InitializeManager(KMemoryManager::Pool::Applet, | ||
| 289 | layout.Applet().GetAddress(), | ||
| 290 | layout.Applet().GetLastAddress()); | ||
| 291 | memory_manager->InitializeManager(KMemoryManager::Pool::System, | ||
| 292 | layout.System().GetAddress(), | ||
| 293 | layout.System().GetLastAddress()); | ||
| 294 | 561 | ||
| 295 | hid_shared_mem = Kernel::KSharedMemory::Create( | 562 | hid_shared_mem = Kernel::KSharedMemory::Create( |
| 296 | system.Kernel(), system.DeviceMemory(), nullptr, {hid_addr, hid_size / PageSize}, | 563 | system.Kernel(), system.DeviceMemory(), nullptr, {hid_phys_addr, hid_size / PageSize}, |
| 297 | KMemoryPermission::None, KMemoryPermission::Read, hid_addr, hid_size, | 564 | KMemoryPermission::None, KMemoryPermission::Read, hid_phys_addr, hid_size, |
| 298 | "HID:SharedMemory"); | 565 | "HID:SharedMemory"); |
| 299 | font_shared_mem = Kernel::KSharedMemory::Create( | 566 | font_shared_mem = Kernel::KSharedMemory::Create( |
| 300 | system.Kernel(), system.DeviceMemory(), nullptr, {font_pa, font_size / PageSize}, | 567 | system.Kernel(), system.DeviceMemory(), nullptr, {font_phys_addr, font_size / PageSize}, |
| 301 | KMemoryPermission::None, KMemoryPermission::Read, font_pa, font_size, | 568 | KMemoryPermission::None, KMemoryPermission::Read, font_phys_addr, font_size, |
| 302 | "Font:SharedMemory"); | 569 | "Font:SharedMemory"); |
| 303 | irs_shared_mem = Kernel::KSharedMemory::Create( | 570 | irs_shared_mem = Kernel::KSharedMemory::Create( |
| 304 | system.Kernel(), system.DeviceMemory(), nullptr, {irs_addr, irs_size / PageSize}, | 571 | system.Kernel(), system.DeviceMemory(), nullptr, {irs_phys_addr, irs_size / PageSize}, |
| 305 | KMemoryPermission::None, KMemoryPermission::Read, irs_addr, irs_size, | 572 | KMemoryPermission::None, KMemoryPermission::Read, irs_phys_addr, irs_size, |
| 306 | "IRS:SharedMemory"); | 573 | "IRS:SharedMemory"); |
| 307 | time_shared_mem = Kernel::KSharedMemory::Create( | 574 | time_shared_mem = Kernel::KSharedMemory::Create( |
| 308 | system.Kernel(), system.DeviceMemory(), nullptr, {time_addr, time_size / PageSize}, | 575 | system.Kernel(), system.DeviceMemory(), nullptr, {time_phys_addr, time_size / PageSize}, |
| 309 | KMemoryPermission::None, KMemoryPermission::Read, time_addr, time_size, | 576 | KMemoryPermission::None, KMemoryPermission::Read, time_phys_addr, time_size, |
| 310 | "Time:SharedMemory"); | 577 | "Time:SharedMemory"); |
| 311 | 578 | ||
| 312 | // Allocate slab heaps | 579 | // Allocate slab heaps |
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 56906f2da..a500e63bc 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project / PPSSPP Project | 1 | // Copyright 2021 yuzu Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||