diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/core/CMakeLists.txt | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h | 13 | ||||
| -rw-r--r-- | src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp | 12 | ||||
| -rw-r--r-- | src/core/hle/kernel/board/nintendo/nx/k_system_control.h | 1 | ||||
| -rw-r--r-- | src/core/hle/kernel/initial_process.h | 23 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_memory_layout.h | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_memory_manager.cpp | 469 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_memory_manager.h | 167 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_memory_region_type.h | 10 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_heap.cpp | 126 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_heap.h | 221 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_table.cpp | 50 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_table.h | 9 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 113 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.h | 4 |
15 files changed, 848 insertions, 376 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 0c10cd019..5db6a1b3a 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -152,6 +152,7 @@ add_library(core STATIC | |||
| 152 | hle/api_version.h | 152 | hle/api_version.h |
| 153 | hle/ipc.h | 153 | hle/ipc.h |
| 154 | hle/ipc_helpers.h | 154 | hle/ipc_helpers.h |
| 155 | hle/kernel/board/nintendo/nx/k_memory_layout.h | ||
| 155 | hle/kernel/board/nintendo/nx/k_system_control.cpp | 156 | hle/kernel/board/nintendo/nx/k_system_control.cpp |
| 156 | hle/kernel/board/nintendo/nx/k_system_control.h | 157 | hle/kernel/board/nintendo/nx/k_system_control.h |
| 157 | hle/kernel/board/nintendo/nx/secure_monitor.h | 158 | hle/kernel/board/nintendo/nx/secure_monitor.h |
| @@ -164,6 +165,7 @@ add_library(core STATIC | |||
| 164 | hle/kernel/hle_ipc.h | 165 | hle/kernel/hle_ipc.h |
| 165 | hle/kernel/init/init_slab_setup.cpp | 166 | hle/kernel/init/init_slab_setup.cpp |
| 166 | hle/kernel/init/init_slab_setup.h | 167 | hle/kernel/init/init_slab_setup.h |
| 168 | hle/kernel/initial_process.h | ||
| 167 | hle/kernel/k_address_arbiter.cpp | 169 | hle/kernel/k_address_arbiter.cpp |
| 168 | hle/kernel/k_address_arbiter.h | 170 | hle/kernel/k_address_arbiter.h |
| 169 | hle/kernel/k_address_space_info.cpp | 171 | hle/kernel/k_address_space_info.cpp |
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h new file mode 100644 index 000000000..01e225088 --- /dev/null +++ b/src/core/hle/kernel/board/nintendo/nx/k_memory_layout.h | |||
| @@ -0,0 +1,13 @@ | |||
| 1 | // Copyright 2022 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include "common/common_types.h" | ||
| 8 | |||
| 9 | namespace Kernel { | ||
| 10 | |||
| 11 | constexpr inline PAddr MainMemoryAddress = 0x80000000; | ||
| 12 | |||
| 13 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp index 702cacffc..8027bec00 100644 --- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp | |||
| @@ -39,6 +39,10 @@ Smc::MemoryArrangement GetMemoryArrangeForInit() { | |||
| 39 | } | 39 | } |
| 40 | } // namespace | 40 | } // namespace |
| 41 | 41 | ||
| 42 | size_t KSystemControl::Init::GetRealMemorySize() { | ||
| 43 | return GetIntendedMemorySize(); | ||
| 44 | } | ||
| 45 | |||
| 42 | // Initialization. | 46 | // Initialization. |
| 43 | size_t KSystemControl::Init::GetIntendedMemorySize() { | 47 | size_t KSystemControl::Init::GetIntendedMemorySize() { |
| 44 | switch (GetMemorySizeForInit()) { | 48 | switch (GetMemorySizeForInit()) { |
| @@ -53,7 +57,13 @@ size_t KSystemControl::Init::GetIntendedMemorySize() { | |||
| 53 | } | 57 | } |
| 54 | 58 | ||
| 55 | PAddr KSystemControl::Init::GetKernelPhysicalBaseAddress(u64 base_address) { | 59 | PAddr KSystemControl::Init::GetKernelPhysicalBaseAddress(u64 base_address) { |
| 56 | return base_address; | 60 | const size_t real_dram_size = KSystemControl::Init::GetRealMemorySize(); |
| 61 | const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize(); | ||
| 62 | if (intended_dram_size * 2 < real_dram_size) { | ||
| 63 | return base_address; | ||
| 64 | } else { | ||
| 65 | return base_address + ((real_dram_size - intended_dram_size) / 2); | ||
| 66 | } | ||
| 57 | } | 67 | } |
| 58 | 68 | ||
| 59 | bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() { | 69 | bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() { |
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h index 52f230ced..df2a17f2a 100644 --- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h | |||
| @@ -13,6 +13,7 @@ public: | |||
| 13 | class Init { | 13 | class Init { |
| 14 | public: | 14 | public: |
| 15 | // Initialization. | 15 | // Initialization. |
| 16 | static std::size_t GetRealMemorySize(); | ||
| 16 | static std::size_t GetIntendedMemorySize(); | 17 | static std::size_t GetIntendedMemorySize(); |
| 17 | static PAddr GetKernelPhysicalBaseAddress(u64 base_address); | 18 | static PAddr GetKernelPhysicalBaseAddress(u64 base_address); |
| 18 | static bool ShouldIncreaseThreadResourceLimit(); | 19 | static bool ShouldIncreaseThreadResourceLimit(); |
diff --git a/src/core/hle/kernel/initial_process.h b/src/core/hle/kernel/initial_process.h new file mode 100644 index 000000000..25b27909c --- /dev/null +++ b/src/core/hle/kernel/initial_process.h | |||
| @@ -0,0 +1,23 @@ | |||
| 1 | // Copyright 2022 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include "common/common_types.h" | ||
| 8 | #include "common/literals.h" | ||
| 9 | #include "core/hle/kernel/board/nintendo/nx/k_memory_layout.h" | ||
| 10 | #include "core/hle/kernel/board/nintendo/nx/k_system_control.h" | ||
| 11 | |||
| 12 | namespace Kernel { | ||
| 13 | |||
| 14 | using namespace Common::Literals; | ||
| 15 | |||
| 16 | constexpr std::size_t InitialProcessBinarySizeMax = 12_MiB; | ||
| 17 | |||
| 18 | static inline PAddr GetInitialProcessBinaryPhysicalAddress() { | ||
| 19 | return Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetKernelPhysicalBaseAddress( | ||
| 20 | MainMemoryAddress); | ||
| 21 | } | ||
| 22 | |||
| 23 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index 57ff538cc..bcddb0d62 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h | |||
| @@ -173,6 +173,10 @@ public: | |||
| 173 | return Dereference(FindVirtualLinear(address)); | 173 | return Dereference(FindVirtualLinear(address)); |
| 174 | } | 174 | } |
| 175 | 175 | ||
| 176 | const KMemoryRegion& GetPhysicalLinearRegion(PAddr address) const { | ||
| 177 | return Dereference(FindPhysicalLinear(address)); | ||
| 178 | } | ||
| 179 | |||
| 176 | const KMemoryRegion* GetPhysicalKernelTraceBufferRegion() const { | 180 | const KMemoryRegion* GetPhysicalKernelTraceBufferRegion() const { |
| 177 | return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_KernelTraceBuffer); | 181 | return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_KernelTraceBuffer); |
| 178 | } | 182 | } |
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 1b44541b1..a2f18f643 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp | |||
| @@ -10,189 +10,412 @@ | |||
| 10 | #include "common/scope_exit.h" | 10 | #include "common/scope_exit.h" |
| 11 | #include "core/core.h" | 11 | #include "core/core.h" |
| 12 | #include "core/device_memory.h" | 12 | #include "core/device_memory.h" |
| 13 | #include "core/hle/kernel/initial_process.h" | ||
| 13 | #include "core/hle/kernel/k_memory_manager.h" | 14 | #include "core/hle/kernel/k_memory_manager.h" |
| 14 | #include "core/hle/kernel/k_page_linked_list.h" | 15 | #include "core/hle/kernel/k_page_linked_list.h" |
| 16 | #include "core/hle/kernel/kernel.h" | ||
| 15 | #include "core/hle/kernel/svc_results.h" | 17 | #include "core/hle/kernel/svc_results.h" |
| 18 | #include "core/memory.h" | ||
| 16 | 19 | ||
| 17 | namespace Kernel { | 20 | namespace Kernel { |
| 18 | 21 | ||
| 19 | KMemoryManager::KMemoryManager(Core::System& system_) : system{system_} {} | 22 | namespace { |
| 23 | |||
| 24 | constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) { | ||
| 25 | if ((type | KMemoryRegionType_DramApplicationPool) == type) { | ||
| 26 | return KMemoryManager::Pool::Application; | ||
| 27 | } else if ((type | KMemoryRegionType_DramAppletPool) == type) { | ||
| 28 | return KMemoryManager::Pool::Applet; | ||
| 29 | } else if ((type | KMemoryRegionType_DramSystemPool) == type) { | ||
| 30 | return KMemoryManager::Pool::System; | ||
| 31 | } else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) { | ||
| 32 | return KMemoryManager::Pool::SystemNonSecure; | ||
| 33 | } else { | ||
| 34 | UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool"); | ||
| 35 | return {}; | ||
| 36 | } | ||
| 37 | } | ||
| 20 | 38 | ||
| 21 | std::size_t KMemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) { | 39 | } // namespace |
| 22 | const auto size{end_address - start_address}; | 40 | |
| 41 | KMemoryManager::KMemoryManager(Core::System& system_) | ||
| 42 | : system{system_}, pool_locks{ | ||
| 43 | KLightLock{system_.Kernel()}, | ||
| 44 | KLightLock{system_.Kernel()}, | ||
| 45 | KLightLock{system_.Kernel()}, | ||
| 46 | KLightLock{system_.Kernel()}, | ||
| 47 | } {} | ||
| 48 | |||
| 49 | void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) { | ||
| 50 | |||
| 51 | // Clear the management region to zero. | ||
| 52 | const VAddr management_region_end = management_region + management_region_size; | ||
| 53 | |||
| 54 | // Reset our manager count. | ||
| 55 | num_managers = 0; | ||
| 56 | |||
| 57 | // Traverse the virtual memory layout tree, initializing each manager as appropriate. | ||
| 58 | while (num_managers != MaxManagerCount) { | ||
| 59 | // Locate the region that should initialize the current manager. | ||
| 60 | PAddr region_address = 0; | ||
| 61 | size_t region_size = 0; | ||
| 62 | Pool region_pool = Pool::Count; | ||
| 63 | for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { | ||
| 64 | // We only care about regions that we need to create managers for. | ||
| 65 | if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { | ||
| 66 | continue; | ||
| 67 | } | ||
| 23 | 68 | ||
| 24 | // Calculate metadata sizes | 69 | // We want to initialize the managers in order. |
| 25 | const auto ref_count_size{(size / PageSize) * sizeof(u16)}; | 70 | if (it.GetAttributes() != num_managers) { |
| 26 | const auto optimize_map_size{(Common::AlignUp((size / PageSize), 64) / 64) * sizeof(u64)}; | 71 | continue; |
| 27 | const auto manager_size{Common::AlignUp(optimize_map_size + ref_count_size, PageSize)}; | 72 | } |
| 28 | const auto page_heap_size{KPageHeap::CalculateManagementOverheadSize(size)}; | ||
| 29 | const auto total_metadata_size{manager_size + page_heap_size}; | ||
| 30 | ASSERT(manager_size <= total_metadata_size); | ||
| 31 | ASSERT(Common::IsAligned(total_metadata_size, PageSize)); | ||
| 32 | 73 | ||
| 33 | // Setup region | 74 | const PAddr cur_start = it.GetAddress(); |
| 34 | pool = new_pool; | 75 | const PAddr cur_end = it.GetEndAddress(); |
| 76 | |||
| 77 | // Validate the region. | ||
| 78 | ASSERT(cur_end != 0); | ||
| 79 | ASSERT(cur_start != 0); | ||
| 80 | ASSERT(it.GetSize() > 0); | ||
| 81 | |||
| 82 | // Update the region's extents. | ||
| 83 | if (region_address == 0) { | ||
| 84 | region_address = cur_start; | ||
| 85 | region_size = it.GetSize(); | ||
| 86 | region_pool = GetPoolFromMemoryRegionType(it.GetType()); | ||
| 87 | } else { | ||
| 88 | ASSERT(cur_start == region_address + region_size); | ||
| 89 | |||
| 90 | // Update the size. | ||
| 91 | region_size = cur_end - region_address; | ||
| 92 | ASSERT(GetPoolFromMemoryRegionType(it.GetType()) == region_pool); | ||
| 93 | } | ||
| 94 | } | ||
| 95 | |||
| 96 | // If we didn't find a region, we're done. | ||
| 97 | if (region_size == 0) { | ||
| 98 | break; | ||
| 99 | } | ||
| 35 | 100 | ||
| 36 | // Initialize the manager's KPageHeap | 101 | // Initialize a new manager for the region. |
| 37 | heap.Initialize(start_address, size, page_heap_size); | 102 | Impl* manager = std::addressof(managers[num_managers++]); |
| 103 | ASSERT(num_managers <= managers.size()); | ||
| 104 | |||
| 105 | const size_t cur_size = manager->Initialize(region_address, region_size, management_region, | ||
| 106 | management_region_end, region_pool); | ||
| 107 | management_region += cur_size; | ||
| 108 | ASSERT(management_region <= management_region_end); | ||
| 109 | |||
| 110 | // Insert the manager into the pool list. | ||
| 111 | const auto region_pool_index = static_cast<u32>(region_pool); | ||
| 112 | if (pool_managers_tail[region_pool_index] == nullptr) { | ||
| 113 | pool_managers_head[region_pool_index] = manager; | ||
| 114 | } else { | ||
| 115 | pool_managers_tail[region_pool_index]->SetNext(manager); | ||
| 116 | manager->SetPrev(pool_managers_tail[region_pool_index]); | ||
| 117 | } | ||
| 118 | pool_managers_tail[region_pool_index] = manager; | ||
| 119 | } | ||
| 38 | 120 | ||
| 39 | // Free the memory to the heap | 121 | // Free each region to its corresponding heap. |
| 40 | heap.Free(start_address, size / PageSize); | 122 | size_t reserved_sizes[MaxManagerCount] = {}; |
| 123 | const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress(); | ||
| 124 | const PAddr ini_end = ini_start + InitialProcessBinarySizeMax; | ||
| 125 | const PAddr ini_last = ini_end - 1; | ||
| 126 | for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { | ||
| 127 | if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { | ||
| 128 | // Get the manager for the region. | ||
| 129 | auto index = it.GetAttributes(); | ||
| 130 | auto& manager = managers[index]; | ||
| 131 | |||
| 132 | const PAddr cur_start = it.GetAddress(); | ||
| 133 | const PAddr cur_last = it.GetLastAddress(); | ||
| 134 | const PAddr cur_end = it.GetEndAddress(); | ||
| 135 | |||
| 136 | if (cur_start <= ini_start && ini_last <= cur_last) { | ||
| 137 | // Free memory before the ini to the heap. | ||
| 138 | if (cur_start != ini_start) { | ||
| 139 | manager.Free(cur_start, (ini_start - cur_start) / PageSize); | ||
| 140 | } | ||
| 41 | 141 | ||
| 42 | // Update the heap's used size | 142 | // Open/reserve the ini memory. |
| 43 | heap.UpdateUsedSize(); | 143 | manager.OpenFirst(ini_start, InitialProcessBinarySizeMax / PageSize); |
| 144 | reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax; | ||
| 44 | 145 | ||
| 45 | return total_metadata_size; | 146 | // Free memory after the ini to the heap. |
| 46 | } | 147 | if (ini_last != cur_last) { |
| 148 | ASSERT(cur_end != 0); | ||
| 149 | manager.Free(ini_end, cur_end - ini_end); | ||
| 150 | } | ||
| 151 | } else { | ||
| 152 | // Ensure there's no partial overlap with the ini image. | ||
| 153 | if (cur_start <= ini_last) { | ||
| 154 | ASSERT(cur_last < ini_start); | ||
| 155 | } else { | ||
| 156 | // Otherwise, check the region for general validity. | ||
| 157 | ASSERT(cur_end != 0); | ||
| 158 | } | ||
| 47 | 159 | ||
| 48 | void KMemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) { | 160 | // Free the memory to the heap. |
| 49 | ASSERT(pool < Pool::Count); | 161 | manager.Free(cur_start, it.GetSize() / PageSize); |
| 50 | managers[static_cast<std::size_t>(pool)].Initialize(pool, start_address, end_address); | 162 | } |
| 163 | } | ||
| 164 | } | ||
| 165 | |||
| 166 | // Update the used size for all managers. | ||
| 167 | for (size_t i = 0; i < num_managers; ++i) { | ||
| 168 | managers[i].SetInitialUsedHeapSize(reserved_sizes[i]); | ||
| 169 | } | ||
| 51 | } | 170 | } |
| 52 | 171 | ||
| 53 | VAddr KMemoryManager::AllocateAndOpenContinuous(std::size_t num_pages, std::size_t align_pages, | 172 | PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) { |
| 54 | u32 option) { | 173 | // Early return if we're allocating no pages. |
| 55 | // Early return if we're allocating no pages | ||
| 56 | if (num_pages == 0) { | 174 | if (num_pages == 0) { |
| 57 | return {}; | 175 | return 0; |
| 58 | } | 176 | } |
| 59 | 177 | ||
| 60 | // Lock the pool that we're allocating from | 178 | // Lock the pool that we're allocating from. |
| 61 | const auto [pool, dir] = DecodeOption(option); | 179 | const auto [pool, dir] = DecodeOption(option); |
| 62 | const auto pool_index{static_cast<std::size_t>(pool)}; | 180 | KScopedLightLock lk(pool_locks[static_cast<std::size_t>(pool)]); |
| 63 | std::lock_guard lock{pool_locks[pool_index]}; | 181 | |
| 64 | 182 | // Choose a heap based on our page size request. | |
| 65 | // Choose a heap based on our page size request | 183 | const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages); |
| 66 | const s32 heap_index{KPageHeap::GetAlignedBlockIndex(num_pages, align_pages)}; | 184 | |
| 67 | 185 | // Loop, trying to iterate from each block. | |
| 68 | // Loop, trying to iterate from each block | 186 | Impl* chosen_manager = nullptr; |
| 69 | // TODO (bunnei): Support multiple managers | 187 | PAddr allocated_block = 0; |
| 70 | Impl& chosen_manager{managers[pool_index]}; | 188 | for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; |
| 71 | VAddr allocated_block{chosen_manager.AllocateBlock(heap_index, false)}; | 189 | chosen_manager = this->GetNextManager(chosen_manager, dir)) { |
| 190 | allocated_block = chosen_manager->AllocateBlock(heap_index, true); | ||
| 191 | if (allocated_block != 0) { | ||
| 192 | break; | ||
| 193 | } | ||
| 194 | } | ||
| 72 | 195 | ||
| 73 | // If we failed to allocate, quit now | 196 | // If we failed to allocate, quit now. |
| 74 | if (!allocated_block) { | 197 | if (allocated_block == 0) { |
| 75 | return {}; | 198 | return 0; |
| 76 | } | 199 | } |
| 77 | 200 | ||
| 78 | // If we allocated more than we need, free some | 201 | // If we allocated more than we need, free some. |
| 79 | const auto allocated_pages{KPageHeap::GetBlockNumPages(heap_index)}; | 202 | const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index); |
| 80 | if (allocated_pages > num_pages) { | 203 | if (allocated_pages > num_pages) { |
| 81 | chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); | 204 | chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); |
| 82 | } | 205 | } |
| 83 | 206 | ||
| 207 | // Open the first reference to the pages. | ||
| 208 | chosen_manager->OpenFirst(allocated_block, num_pages); | ||
| 209 | |||
| 84 | return allocated_block; | 210 | return allocated_block; |
| 85 | } | 211 | } |
| 86 | 212 | ||
| 87 | ResultCode KMemoryManager::Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, | 213 | ResultCode KMemoryManager::AllocatePageGroupImpl(KPageLinkedList* out, size_t num_pages, Pool pool, |
| 88 | Direction dir, u32 heap_fill_value) { | 214 | Direction dir, bool random) { |
| 89 | ASSERT(page_list.GetNumPages() == 0); | 215 | // Choose a heap based on our page size request. |
| 216 | const s32 heap_index = KPageHeap::GetBlockIndex(num_pages); | ||
| 217 | R_UNLESS(0 <= heap_index, ResultOutOfMemory); | ||
| 218 | |||
| 219 | // Ensure that we don't leave anything un-freed. | ||
| 220 | auto group_guard = SCOPE_GUARD({ | ||
| 221 | for (const auto& it : out->Nodes()) { | ||
| 222 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), it.GetAddress()); | ||
| 223 | const size_t num_pages_to_free = | ||
| 224 | std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); | ||
| 225 | manager.Free(it.GetAddress(), num_pages_to_free); | ||
| 226 | } | ||
| 227 | }); | ||
| 90 | 228 | ||
| 91 | // Early return if we're allocating no pages | 229 | // Keep allocating until we've allocated all our pages. |
| 92 | if (num_pages == 0) { | 230 | for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) { |
| 93 | return ResultSuccess; | 231 | const size_t pages_per_alloc = KPageHeap::GetBlockNumPages(index); |
| 94 | } | 232 | for (Impl* cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr; |
| 233 | cur_manager = this->GetNextManager(cur_manager, dir)) { | ||
| 234 | while (num_pages >= pages_per_alloc) { | ||
| 235 | // Allocate a block. | ||
| 236 | PAddr allocated_block = cur_manager->AllocateBlock(index, random); | ||
| 237 | if (allocated_block == 0) { | ||
| 238 | break; | ||
| 239 | } | ||
| 95 | 240 | ||
| 96 | // Lock the pool that we're allocating from | 241 | // Safely add it to our group. |
| 97 | const auto pool_index{static_cast<std::size_t>(pool)}; | 242 | { |
| 98 | std::lock_guard lock{pool_locks[pool_index]}; | 243 | auto block_guard = |
| 244 | SCOPE_GUARD({ cur_manager->Free(allocated_block, pages_per_alloc); }); | ||
| 245 | R_TRY(out->AddBlock(allocated_block, pages_per_alloc)); | ||
| 246 | block_guard.Cancel(); | ||
| 247 | } | ||
| 99 | 248 | ||
| 100 | // Choose a heap based on our page size request | 249 | num_pages -= pages_per_alloc; |
| 101 | const s32 heap_index{KPageHeap::GetBlockIndex(num_pages)}; | 250 | } |
| 102 | if (heap_index < 0) { | 251 | } |
| 103 | return ResultOutOfMemory; | ||
| 104 | } | 252 | } |
| 105 | 253 | ||
| 106 | // TODO (bunnei): Support multiple managers | 254 | // Only succeed if we allocated as many pages as we wanted. |
| 107 | Impl& chosen_manager{managers[pool_index]}; | 255 | R_UNLESS(num_pages == 0, ResultOutOfMemory); |
| 108 | 256 | ||
| 109 | // Ensure that we don't leave anything un-freed | 257 | // We succeeded! |
| 110 | auto group_guard = detail::ScopeExit([&] { | 258 | group_guard.Cancel(); |
| 111 | for (const auto& it : page_list.Nodes()) { | 259 | return ResultSuccess; |
| 112 | const auto min_num_pages{std::min<size_t>( | 260 | } |
| 113 | it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)}; | ||
| 114 | chosen_manager.Free(it.GetAddress(), min_num_pages); | ||
| 115 | } | ||
| 116 | }); | ||
| 117 | 261 | ||
| 118 | // Keep allocating until we've allocated all our pages | 262 | ResultCode KMemoryManager::AllocateAndOpen(KPageLinkedList* out, size_t num_pages, u32 option) { |
| 119 | for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) { | 263 | ASSERT(out != nullptr); |
| 120 | const auto pages_per_alloc{KPageHeap::GetBlockNumPages(index)}; | 264 | ASSERT(out->GetNumPages() == 0); |
| 121 | 265 | ||
| 122 | while (num_pages >= pages_per_alloc) { | 266 | // Early return if we're allocating no pages. |
| 123 | // Allocate a block | 267 | R_SUCCEED_IF(num_pages == 0); |
| 124 | VAddr allocated_block{chosen_manager.AllocateBlock(index, false)}; | ||
| 125 | if (!allocated_block) { | ||
| 126 | break; | ||
| 127 | } | ||
| 128 | 268 | ||
| 129 | // Safely add it to our group | 269 | // Lock the pool that we're allocating from. |
| 130 | { | 270 | const auto [pool, dir] = DecodeOption(option); |
| 131 | auto block_guard = detail::ScopeExit( | 271 | KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]); |
| 132 | [&] { chosen_manager.Free(allocated_block, pages_per_alloc); }); | 272 | |
| 273 | // Allocate the page group. | ||
| 274 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false)); | ||
| 275 | |||
| 276 | // Open the first reference to the pages. | ||
| 277 | for (const auto& block : out->Nodes()) { | ||
| 278 | PAddr cur_address = block.GetAddress(); | ||
| 279 | size_t remaining_pages = block.GetNumPages(); | ||
| 280 | while (remaining_pages > 0) { | ||
| 281 | // Get the manager for the current address. | ||
| 282 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address); | ||
| 283 | |||
| 284 | // Process part or all of the block. | ||
| 285 | const size_t cur_pages = | ||
| 286 | std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); | ||
| 287 | manager.OpenFirst(cur_address, cur_pages); | ||
| 288 | |||
| 289 | // Advance. | ||
| 290 | cur_address += cur_pages * PageSize; | ||
| 291 | remaining_pages -= cur_pages; | ||
| 292 | } | ||
| 293 | } | ||
| 133 | 294 | ||
| 134 | if (const ResultCode result{page_list.AddBlock(allocated_block, pages_per_alloc)}; | 295 | return ResultSuccess; |
| 135 | result.IsError()) { | 296 | } |
| 136 | return result; | ||
| 137 | } | ||
| 138 | 297 | ||
| 139 | block_guard.Cancel(); | 298 | ResultCode KMemoryManager::AllocateAndOpenForProcess(KPageLinkedList* out, size_t num_pages, |
| 140 | } | 299 | u32 option, u64 process_id, u8 fill_pattern) { |
| 300 | ASSERT(out != nullptr); | ||
| 301 | ASSERT(out->GetNumPages() == 0); | ||
| 141 | 302 | ||
| 142 | num_pages -= pages_per_alloc; | 303 | // Decode the option. |
| 143 | } | 304 | const auto [pool, dir] = DecodeOption(option); |
| 144 | } | ||
| 145 | 305 | ||
| 146 | // Clear allocated memory. | 306 | // Allocate the memory. |
| 147 | for (const auto& it : page_list.Nodes()) { | 307 | { |
| 148 | std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value, | 308 | // Lock the pool that we're allocating from. |
| 149 | it.GetSize()); | 309 | KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]); |
| 310 | |||
| 311 | // Allocate the page group. | ||
| 312 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false)); | ||
| 313 | |||
| 314 | // Open the first reference to the pages. | ||
| 315 | for (const auto& block : out->Nodes()) { | ||
| 316 | PAddr cur_address = block.GetAddress(); | ||
| 317 | size_t remaining_pages = block.GetNumPages(); | ||
| 318 | while (remaining_pages > 0) { | ||
| 319 | // Get the manager for the current address. | ||
| 320 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address); | ||
| 321 | |||
| 322 | // Process part or all of the block. | ||
| 323 | const size_t cur_pages = | ||
| 324 | std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); | ||
| 325 | manager.OpenFirst(cur_address, cur_pages); | ||
| 326 | |||
| 327 | // Advance. | ||
| 328 | cur_address += cur_pages * PageSize; | ||
| 329 | remaining_pages -= cur_pages; | ||
| 330 | } | ||
| 331 | } | ||
| 150 | } | 332 | } |
| 151 | 333 | ||
| 152 | // Only succeed if we allocated as many pages as we wanted | 334 | // Set all the allocated memory. |
| 153 | if (num_pages) { | 335 | for (const auto& block : out->Nodes()) { |
| 154 | return ResultOutOfMemory; | 336 | std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern, |
| 337 | block.GetSize()); | ||
| 155 | } | 338 | } |
| 156 | 339 | ||
| 157 | // We succeeded! | ||
| 158 | group_guard.Cancel(); | ||
| 159 | |||
| 160 | return ResultSuccess; | 340 | return ResultSuccess; |
| 161 | } | 341 | } |
| 162 | 342 | ||
| 163 | ResultCode KMemoryManager::Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, | 343 | void KMemoryManager::Open(PAddr address, size_t num_pages) { |
| 164 | Direction dir, u32 heap_fill_value) { | 344 | // Repeatedly open references until we've done so for all pages. |
| 165 | // Early return if we're freeing no pages | 345 | while (num_pages) { |
| 166 | if (!num_pages) { | 346 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address); |
| 167 | return ResultSuccess; | 347 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); |
| 348 | |||
| 349 | { | ||
| 350 | KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 351 | manager.Open(address, cur_pages); | ||
| 352 | } | ||
| 353 | |||
| 354 | num_pages -= cur_pages; | ||
| 355 | address += cur_pages * PageSize; | ||
| 168 | } | 356 | } |
| 357 | } | ||
| 169 | 358 | ||
| 170 | // Lock the pool that we're freeing from | 359 | void KMemoryManager::Close(PAddr address, size_t num_pages) { |
| 171 | const auto pool_index{static_cast<std::size_t>(pool)}; | 360 | // Repeatedly close references until we've done so for all pages. |
| 172 | std::lock_guard lock{pool_locks[pool_index]}; | 361 | while (num_pages) { |
| 362 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address); | ||
| 363 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||
| 173 | 364 | ||
| 174 | // TODO (bunnei): Support multiple managers | 365 | { |
| 175 | Impl& chosen_manager{managers[pool_index]}; | 366 | KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]); |
| 367 | manager.Close(address, cur_pages); | ||
| 368 | } | ||
| 176 | 369 | ||
| 177 | // Free all of the pages | 370 | num_pages -= cur_pages; |
| 178 | for (const auto& it : page_list.Nodes()) { | 371 | address += cur_pages * PageSize; |
| 179 | const auto min_num_pages{std::min<size_t>( | ||
| 180 | it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)}; | ||
| 181 | chosen_manager.Free(it.GetAddress(), min_num_pages); | ||
| 182 | } | 372 | } |
| 373 | } | ||
| 183 | 374 | ||
| 184 | return ResultSuccess; | 375 | void KMemoryManager::Close(const KPageLinkedList& pg) { |
| 376 | for (const auto& node : pg.Nodes()) { | ||
| 377 | Close(node.GetAddress(), node.GetNumPages()); | ||
| 378 | } | ||
| 379 | } | ||
| 380 | void KMemoryManager::Open(const KPageLinkedList& pg) { | ||
| 381 | for (const auto& node : pg.Nodes()) { | ||
| 382 | Open(node.GetAddress(), node.GetNumPages()); | ||
| 383 | } | ||
| 384 | } | ||
| 385 | |||
| 386 | size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management, | ||
| 387 | VAddr management_end, Pool p) { | ||
| 388 | // Calculate management sizes. | ||
| 389 | const size_t ref_count_size = (size / PageSize) * sizeof(u16); | ||
| 390 | const size_t optimize_map_size = CalculateOptimizedProcessOverheadSize(size); | ||
| 391 | const size_t manager_size = Common::AlignUp(optimize_map_size + ref_count_size, PageSize); | ||
| 392 | const size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(size); | ||
| 393 | const size_t total_management_size = manager_size + page_heap_size; | ||
| 394 | ASSERT(manager_size <= total_management_size); | ||
| 395 | ASSERT(management + total_management_size <= management_end); | ||
| 396 | ASSERT(Common::IsAligned(total_management_size, PageSize)); | ||
| 397 | |||
| 398 | // Setup region. | ||
| 399 | pool = p; | ||
| 400 | management_region = management; | ||
| 401 | page_reference_counts.resize( | ||
| 402 | Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize); | ||
| 403 | ASSERT(Common::IsAligned(management_region, PageSize)); | ||
| 404 | |||
| 405 | // Initialize the manager's KPageHeap. | ||
| 406 | heap.Initialize(address, size, management + manager_size, page_heap_size); | ||
| 407 | |||
| 408 | return total_management_size; | ||
| 185 | } | 409 | } |
| 186 | 410 | ||
| 187 | std::size_t KMemoryManager::Impl::CalculateManagementOverheadSize(std::size_t region_size) { | 411 | size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) { |
| 188 | const std::size_t ref_count_size = (region_size / PageSize) * sizeof(u16); | 412 | const size_t ref_count_size = (region_size / PageSize) * sizeof(u16); |
| 189 | const std::size_t optimize_map_size = | 413 | const size_t optimize_map_size = |
| 190 | (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / | 414 | (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / |
| 191 | Common::BitSize<u64>()) * | 415 | Common::BitSize<u64>()) * |
| 192 | sizeof(u64); | 416 | sizeof(u64); |
| 193 | const std::size_t manager_meta_size = | 417 | const size_t manager_meta_size = Common::AlignUp(optimize_map_size + ref_count_size, PageSize); |
| 194 | Common::AlignUp(optimize_map_size + ref_count_size, PageSize); | 418 | const size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region_size); |
| 195 | const std::size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region_size); | ||
| 196 | return manager_meta_size + page_heap_size; | 419 | return manager_meta_size + page_heap_size; |
| 197 | } | 420 | } |
| 198 | 421 | ||
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h index 17c7690f1..18775b262 100644 --- a/src/core/hle/kernel/k_memory_manager.h +++ b/src/core/hle/kernel/k_memory_manager.h | |||
| @@ -5,11 +5,12 @@ | |||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <array> | 7 | #include <array> |
| 8 | #include <mutex> | ||
| 9 | #include <tuple> | 8 | #include <tuple> |
| 10 | 9 | ||
| 11 | #include "common/common_funcs.h" | 10 | #include "common/common_funcs.h" |
| 12 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 12 | #include "core/hle/kernel/k_light_lock.h" | ||
| 13 | #include "core/hle/kernel/k_memory_layout.h" | ||
| 13 | #include "core/hle/kernel/k_page_heap.h" | 14 | #include "core/hle/kernel/k_page_heap.h" |
| 14 | #include "core/hle/result.h" | 15 | #include "core/hle/result.h" |
| 15 | 16 | ||
| @@ -52,22 +53,33 @@ public: | |||
| 52 | 53 | ||
| 53 | explicit KMemoryManager(Core::System& system_); | 54 | explicit KMemoryManager(Core::System& system_); |
| 54 | 55 | ||
| 55 | constexpr std::size_t GetSize(Pool pool) const { | 56 | void Initialize(VAddr management_region, size_t management_region_size); |
| 56 | return managers[static_cast<std::size_t>(pool)].GetSize(); | 57 | |
| 58 | constexpr size_t GetSize(Pool pool) const { | ||
| 59 | constexpr Direction GetSizeDirection = Direction::FromFront; | ||
| 60 | size_t total = 0; | ||
| 61 | for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; | ||
| 62 | manager = this->GetNextManager(manager, GetSizeDirection)) { | ||
| 63 | total += manager->GetSize(); | ||
| 64 | } | ||
| 65 | return total; | ||
| 57 | } | 66 | } |
| 58 | 67 | ||
| 59 | void InitializeManager(Pool pool, u64 start_address, u64 end_address); | 68 | PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); |
| 69 | ResultCode AllocateAndOpen(KPageLinkedList* out, size_t num_pages, u32 option); | ||
| 70 | ResultCode AllocateAndOpenForProcess(KPageLinkedList* out, size_t num_pages, u32 option, | ||
| 71 | u64 process_id, u8 fill_pattern); | ||
| 72 | |||
| 73 | static constexpr size_t MaxManagerCount = 10; | ||
| 60 | 74 | ||
| 61 | VAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); | 75 | void Close(PAddr address, size_t num_pages); |
| 62 | ResultCode Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, Direction dir, | 76 | void Close(const KPageLinkedList& pg); |
| 63 | u32 heap_fill_value = 0); | ||
| 64 | ResultCode Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, Direction dir, | ||
| 65 | u32 heap_fill_value = 0); | ||
| 66 | 77 | ||
| 67 | static constexpr std::size_t MaxManagerCount = 10; | 78 | void Open(PAddr address, size_t num_pages); |
| 79 | void Open(const KPageLinkedList& pg); | ||
| 68 | 80 | ||
| 69 | public: | 81 | public: |
| 70 | static std::size_t CalculateManagementOverheadSize(std::size_t region_size) { | 82 | static size_t CalculateManagementOverheadSize(size_t region_size) { |
| 71 | return Impl::CalculateManagementOverheadSize(region_size); | 83 | return Impl::CalculateManagementOverheadSize(region_size); |
| 72 | } | 84 | } |
| 73 | 85 | ||
| @@ -100,17 +112,26 @@ private: | |||
| 100 | Impl() = default; | 112 | Impl() = default; |
| 101 | ~Impl() = default; | 113 | ~Impl() = default; |
| 102 | 114 | ||
| 103 | std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address); | 115 | size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end, |
| 116 | Pool p); | ||
| 104 | 117 | ||
| 105 | VAddr AllocateBlock(s32 index, bool random) { | 118 | VAddr AllocateBlock(s32 index, bool random) { |
| 106 | return heap.AllocateBlock(index, random); | 119 | return heap.AllocateBlock(index, random); |
| 107 | } | 120 | } |
| 108 | 121 | ||
| 109 | void Free(VAddr addr, std::size_t num_pages) { | 122 | void Free(VAddr addr, size_t num_pages) { |
| 110 | heap.Free(addr, num_pages); | 123 | heap.Free(addr, num_pages); |
| 111 | } | 124 | } |
| 112 | 125 | ||
| 113 | constexpr std::size_t GetSize() const { | 126 | void SetInitialUsedHeapSize(size_t reserved_size) { |
| 127 | heap.SetInitialUsedSize(reserved_size); | ||
| 128 | } | ||
| 129 | |||
| 130 | constexpr Pool GetPool() const { | ||
| 131 | return pool; | ||
| 132 | } | ||
| 133 | |||
| 134 | constexpr size_t GetSize() const { | ||
| 114 | return heap.GetSize(); | 135 | return heap.GetSize(); |
| 115 | } | 136 | } |
| 116 | 137 | ||
| @@ -122,10 +143,88 @@ private: | |||
| 122 | return heap.GetEndAddress(); | 143 | return heap.GetEndAddress(); |
| 123 | } | 144 | } |
| 124 | 145 | ||
| 125 | static std::size_t CalculateManagementOverheadSize(std::size_t region_size); | 146 | constexpr size_t GetPageOffset(PAddr address) const { |
| 147 | return heap.GetPageOffset(address); | ||
| 148 | } | ||
| 149 | |||
| 150 | constexpr size_t GetPageOffsetToEnd(PAddr address) const { | ||
| 151 | return heap.GetPageOffsetToEnd(address); | ||
| 152 | } | ||
| 153 | |||
| 154 | constexpr void SetNext(Impl* n) { | ||
| 155 | next = n; | ||
| 156 | } | ||
| 157 | |||
| 158 | constexpr void SetPrev(Impl* n) { | ||
| 159 | prev = n; | ||
| 160 | } | ||
| 161 | |||
| 162 | constexpr Impl* GetNext() const { | ||
| 163 | return next; | ||
| 164 | } | ||
| 165 | |||
| 166 | constexpr Impl* GetPrev() const { | ||
| 167 | return prev; | ||
| 168 | } | ||
| 169 | |||
| 170 | void OpenFirst(PAddr address, size_t num_pages) { | ||
| 171 | size_t index = this->GetPageOffset(address); | ||
| 172 | const size_t end = index + num_pages; | ||
| 173 | while (index < end) { | ||
| 174 | const RefCount ref_count = (++page_reference_counts[index]); | ||
| 175 | ASSERT(ref_count == 1); | ||
| 126 | 176 | ||
| 127 | static constexpr std::size_t CalculateOptimizedProcessOverheadSize( | 177 | index++; |
| 128 | std::size_t region_size) { | 178 | } |
| 179 | } | ||
| 180 | |||
| 181 | void Open(PAddr address, size_t num_pages) { | ||
| 182 | size_t index = this->GetPageOffset(address); | ||
| 183 | const size_t end = index + num_pages; | ||
| 184 | while (index < end) { | ||
| 185 | const RefCount ref_count = (++page_reference_counts[index]); | ||
| 186 | ASSERT(ref_count > 1); | ||
| 187 | |||
| 188 | index++; | ||
| 189 | } | ||
| 190 | } | ||
| 191 | |||
| 192 | void Close(PAddr address, size_t num_pages) { | ||
| 193 | size_t index = this->GetPageOffset(address); | ||
| 194 | const size_t end = index + num_pages; | ||
| 195 | |||
| 196 | size_t free_start = 0; | ||
| 197 | size_t free_count = 0; | ||
| 198 | while (index < end) { | ||
| 199 | ASSERT(page_reference_counts[index] > 0); | ||
| 200 | const RefCount ref_count = (--page_reference_counts[index]); | ||
| 201 | |||
| 202 | // Keep track of how many zero refcounts we see in a row, to minimize calls to free. | ||
| 203 | if (ref_count == 0) { | ||
| 204 | if (free_count > 0) { | ||
| 205 | free_count++; | ||
| 206 | } else { | ||
| 207 | free_start = index; | ||
| 208 | free_count = 1; | ||
| 209 | } | ||
| 210 | } else { | ||
| 211 | if (free_count > 0) { | ||
| 212 | this->Free(heap.GetAddress() + free_start * PageSize, free_count); | ||
| 213 | free_count = 0; | ||
| 214 | } | ||
| 215 | } | ||
| 216 | |||
| 217 | index++; | ||
| 218 | } | ||
| 219 | |||
| 220 | if (free_count > 0) { | ||
| 221 | this->Free(heap.GetAddress() + free_start * PageSize, free_count); | ||
| 222 | } | ||
| 223 | } | ||
| 224 | |||
| 225 | static size_t CalculateManagementOverheadSize(size_t region_size); | ||
| 226 | |||
| 227 | static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) { | ||
| 129 | return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / | 228 | return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / |
| 130 | Common::BitSize<u64>()) * | 229 | Common::BitSize<u64>()) * |
| 131 | sizeof(u64); | 230 | sizeof(u64); |
| @@ -135,13 +234,45 @@ private: | |||
| 135 | using RefCount = u16; | 234 | using RefCount = u16; |
| 136 | 235 | ||
| 137 | KPageHeap heap; | 236 | KPageHeap heap; |
| 237 | std::vector<RefCount> page_reference_counts; | ||
| 238 | VAddr management_region{}; | ||
| 138 | Pool pool{}; | 239 | Pool pool{}; |
| 240 | Impl* next{}; | ||
| 241 | Impl* prev{}; | ||
| 139 | }; | 242 | }; |
| 140 | 243 | ||
| 141 | private: | 244 | private: |
| 245 | Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) { | ||
| 246 | return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; | ||
| 247 | } | ||
| 248 | |||
| 249 | const Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) const { | ||
| 250 | return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; | ||
| 251 | } | ||
| 252 | |||
| 253 | constexpr Impl* GetFirstManager(Pool pool, Direction dir) const { | ||
| 254 | return dir == Direction::FromBack ? pool_managers_tail[static_cast<size_t>(pool)] | ||
| 255 | : pool_managers_head[static_cast<size_t>(pool)]; | ||
| 256 | } | ||
| 257 | |||
| 258 | constexpr Impl* GetNextManager(Impl* cur, Direction dir) const { | ||
| 259 | if (dir == Direction::FromBack) { | ||
| 260 | return cur->GetPrev(); | ||
| 261 | } else { | ||
| 262 | return cur->GetNext(); | ||
| 263 | } | ||
| 264 | } | ||
| 265 | |||
| 266 | ResultCode AllocatePageGroupImpl(KPageLinkedList* out, size_t num_pages, Pool pool, | ||
| 267 | Direction dir, bool random); | ||
| 268 | |||
| 269 | private: | ||
| 142 | Core::System& system; | 270 | Core::System& system; |
| 143 | std::array<std::mutex, static_cast<std::size_t>(Pool::Count)> pool_locks; | 271 | std::array<KLightLock, static_cast<size_t>(Pool::Count)> pool_locks; |
| 272 | std::array<Impl*, MaxManagerCount> pool_managers_head{}; | ||
| 273 | std::array<Impl*, MaxManagerCount> pool_managers_tail{}; | ||
| 144 | std::array<Impl, MaxManagerCount> managers; | 274 | std::array<Impl, MaxManagerCount> managers; |
| 275 | size_t num_managers{}; | ||
| 145 | }; | 276 | }; |
| 146 | 277 | ||
| 147 | } // namespace Kernel | 278 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h index a05e66677..0baeddf51 100644 --- a/src/core/hle/kernel/k_memory_region_type.h +++ b/src/core/hle/kernel/k_memory_region_type.h | |||
| @@ -14,7 +14,8 @@ | |||
| 14 | namespace Kernel { | 14 | namespace Kernel { |
| 15 | 15 | ||
| 16 | enum KMemoryRegionType : u32 { | 16 | enum KMemoryRegionType : u32 { |
| 17 | KMemoryRegionAttr_CarveoutProtected = 0x04000000, | 17 | KMemoryRegionAttr_CarveoutProtected = 0x02000000, |
| 18 | KMemoryRegionAttr_Uncached = 0x04000000, | ||
| 18 | KMemoryRegionAttr_DidKernelMap = 0x08000000, | 19 | KMemoryRegionAttr_DidKernelMap = 0x08000000, |
| 19 | KMemoryRegionAttr_ShouldKernelMap = 0x10000000, | 20 | KMemoryRegionAttr_ShouldKernelMap = 0x10000000, |
| 20 | KMemoryRegionAttr_UserReadOnly = 0x20000000, | 21 | KMemoryRegionAttr_UserReadOnly = 0x20000000, |
| @@ -239,6 +240,11 @@ static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A); | |||
| 239 | static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A); | 240 | static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A); |
| 240 | static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A); | 241 | static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A); |
| 241 | 242 | ||
| 243 | // UNUSED: .DeriveSparse(2, 2, 0); | ||
| 244 | constexpr auto KMemoryRegionType_VirtualDramUnknownDebug = | ||
| 245 | KMemoryRegionType_Dram.DeriveSparse(2, 2, 1); | ||
| 246 | static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52)); | ||
| 247 | |||
| 242 | constexpr auto KMemoryRegionType_VirtualDramKernelInitPt = | 248 | constexpr auto KMemoryRegionType_VirtualDramKernelInitPt = |
| 243 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0); | 249 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0); |
| 244 | constexpr auto KMemoryRegionType_VirtualDramPoolManagement = | 250 | constexpr auto KMemoryRegionType_VirtualDramPoolManagement = |
| @@ -330,6 +336,8 @@ constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { | |||
| 330 | return KMemoryRegionType_VirtualDramKernelTraceBuffer; | 336 | return KMemoryRegionType_VirtualDramKernelTraceBuffer; |
| 331 | } else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { | 337 | } else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { |
| 332 | return KMemoryRegionType_VirtualDramKernelPtHeap; | 338 | return KMemoryRegionType_VirtualDramKernelPtHeap; |
| 339 | } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) { | ||
| 340 | return KMemoryRegionType_VirtualDramUnknownDebug; | ||
| 333 | } else { | 341 | } else { |
| 334 | return KMemoryRegionType_Dram; | 342 | return KMemoryRegionType_Dram; |
| 335 | } | 343 | } |
diff --git a/src/core/hle/kernel/k_page_heap.cpp b/src/core/hle/kernel/k_page_heap.cpp index 29d996d62..97a5890a0 100644 --- a/src/core/hle/kernel/k_page_heap.cpp +++ b/src/core/hle/kernel/k_page_heap.cpp | |||
| @@ -7,35 +7,51 @@ | |||
| 7 | 7 | ||
| 8 | namespace Kernel { | 8 | namespace Kernel { |
| 9 | 9 | ||
| 10 | void KPageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) { | 10 | void KPageHeap::Initialize(PAddr address, size_t size, VAddr management_address, |
| 11 | // Check our assumptions | 11 | size_t management_size, const size_t* block_shifts, |
| 12 | ASSERT(Common::IsAligned((address), PageSize)); | 12 | size_t num_block_shifts) { |
| 13 | // Check our assumptions. | ||
| 14 | ASSERT(Common::IsAligned(address, PageSize)); | ||
| 13 | ASSERT(Common::IsAligned(size, PageSize)); | 15 | ASSERT(Common::IsAligned(size, PageSize)); |
| 16 | ASSERT(0 < num_block_shifts && num_block_shifts <= NumMemoryBlockPageShifts); | ||
| 17 | const VAddr management_end = management_address + management_size; | ||
| 14 | 18 | ||
| 15 | // Set our members | 19 | // Set our members. |
| 16 | heap_address = address; | 20 | m_heap_address = address; |
| 17 | heap_size = size; | 21 | m_heap_size = size; |
| 18 | 22 | m_num_blocks = num_block_shifts; | |
| 19 | // Setup bitmaps | 23 | |
| 20 | metadata.resize(metadata_size / sizeof(u64)); | 24 | // Setup bitmaps. |
| 21 | u64* cur_bitmap_storage{metadata.data()}; | 25 | m_management_data.resize(management_size / sizeof(u64)); |
| 22 | for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) { | 26 | u64* cur_bitmap_storage{m_management_data.data()}; |
| 23 | const std::size_t cur_block_shift{MemoryBlockPageShifts[i]}; | 27 | for (size_t i = 0; i < num_block_shifts; i++) { |
| 24 | const std::size_t next_block_shift{ | 28 | const size_t cur_block_shift = block_shifts[i]; |
| 25 | (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0}; | 29 | const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0; |
| 26 | cur_bitmap_storage = blocks[i].Initialize(heap_address, heap_size, cur_block_shift, | 30 | cur_bitmap_storage = m_blocks[i].Initialize(m_heap_address, m_heap_size, cur_block_shift, |
| 27 | next_block_shift, cur_bitmap_storage); | 31 | next_block_shift, cur_bitmap_storage); |
| 28 | } | 32 | } |
| 33 | |||
| 34 | // Ensure we didn't overextend our bounds. | ||
| 35 | ASSERT(VAddr(cur_bitmap_storage) <= management_end); | ||
| 36 | } | ||
| 37 | |||
| 38 | size_t KPageHeap::GetNumFreePages() const { | ||
| 39 | size_t num_free = 0; | ||
| 40 | |||
| 41 | for (size_t i = 0; i < m_num_blocks; i++) { | ||
| 42 | num_free += m_blocks[i].GetNumFreePages(); | ||
| 43 | } | ||
| 44 | |||
| 45 | return num_free; | ||
| 29 | } | 46 | } |
| 30 | 47 | ||
| 31 | VAddr KPageHeap::AllocateBlock(s32 index, bool random) { | 48 | PAddr KPageHeap::AllocateBlock(s32 index, bool random) { |
| 32 | const std::size_t needed_size{blocks[index].GetSize()}; | 49 | const size_t needed_size = m_blocks[index].GetSize(); |
| 33 | 50 | ||
| 34 | for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) { | 51 | for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) { |
| 35 | if (const VAddr addr{blocks[i].PopBlock(random)}; addr) { | 52 | if (const PAddr addr = m_blocks[i].PopBlock(random); addr != 0) { |
| 36 | if (const std::size_t allocated_size{blocks[i].GetSize()}; | 53 | if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) { |
| 37 | allocated_size > needed_size) { | 54 | this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize); |
| 38 | Free(addr + needed_size, (allocated_size - needed_size) / PageSize); | ||
| 39 | } | 55 | } |
| 40 | return addr; | 56 | return addr; |
| 41 | } | 57 | } |
| @@ -44,34 +60,34 @@ VAddr KPageHeap::AllocateBlock(s32 index, bool random) { | |||
| 44 | return 0; | 60 | return 0; |
| 45 | } | 61 | } |
| 46 | 62 | ||
| 47 | void KPageHeap::FreeBlock(VAddr block, s32 index) { | 63 | void KPageHeap::FreeBlock(PAddr block, s32 index) { |
| 48 | do { | 64 | do { |
| 49 | block = blocks[index++].PushBlock(block); | 65 | block = m_blocks[index++].PushBlock(block); |
| 50 | } while (block != 0); | 66 | } while (block != 0); |
| 51 | } | 67 | } |
| 52 | 68 | ||
| 53 | void KPageHeap::Free(VAddr addr, std::size_t num_pages) { | 69 | void KPageHeap::Free(PAddr addr, size_t num_pages) { |
| 54 | // Freeing no pages is a no-op | 70 | // Freeing no pages is a no-op. |
| 55 | if (num_pages == 0) { | 71 | if (num_pages == 0) { |
| 56 | return; | 72 | return; |
| 57 | } | 73 | } |
| 58 | 74 | ||
| 59 | // Find the largest block size that we can free, and free as many as possible | 75 | // Find the largest block size that we can free, and free as many as possible. |
| 60 | s32 big_index{static_cast<s32>(MemoryBlockPageShifts.size()) - 1}; | 76 | s32 big_index = static_cast<s32>(m_num_blocks) - 1; |
| 61 | const VAddr start{addr}; | 77 | const PAddr start = addr; |
| 62 | const VAddr end{(num_pages * PageSize) + addr}; | 78 | const PAddr end = addr + num_pages * PageSize; |
| 63 | VAddr before_start{start}; | 79 | PAddr before_start = start; |
| 64 | VAddr before_end{start}; | 80 | PAddr before_end = start; |
| 65 | VAddr after_start{end}; | 81 | PAddr after_start = end; |
| 66 | VAddr after_end{end}; | 82 | PAddr after_end = end; |
| 67 | while (big_index >= 0) { | 83 | while (big_index >= 0) { |
| 68 | const std::size_t block_size{blocks[big_index].GetSize()}; | 84 | const size_t block_size = m_blocks[big_index].GetSize(); |
| 69 | const VAddr big_start{Common::AlignUp((start), block_size)}; | 85 | const PAddr big_start = Common::AlignUp(start, block_size); |
| 70 | const VAddr big_end{Common::AlignDown((end), block_size)}; | 86 | const PAddr big_end = Common::AlignDown(end, block_size); |
| 71 | if (big_start < big_end) { | 87 | if (big_start < big_end) { |
| 72 | // Free as many big blocks as we can | 88 | // Free as many big blocks as we can. |
| 73 | for (auto block{big_start}; block < big_end; block += block_size) { | 89 | for (auto block = big_start; block < big_end; block += block_size) { |
| 74 | FreeBlock(block, big_index); | 90 | this->FreeBlock(block, big_index); |
| 75 | } | 91 | } |
| 76 | before_end = big_start; | 92 | before_end = big_start; |
| 77 | after_start = big_end; | 93 | after_start = big_end; |
| @@ -81,31 +97,31 @@ void KPageHeap::Free(VAddr addr, std::size_t num_pages) { | |||
| 81 | } | 97 | } |
| 82 | ASSERT(big_index >= 0); | 98 | ASSERT(big_index >= 0); |
| 83 | 99 | ||
| 84 | // Free space before the big blocks | 100 | // Free space before the big blocks. |
| 85 | for (s32 i{big_index - 1}; i >= 0; i--) { | 101 | for (s32 i = big_index - 1; i >= 0; i--) { |
| 86 | const std::size_t block_size{blocks[i].GetSize()}; | 102 | const size_t block_size = m_blocks[i].GetSize(); |
| 87 | while (before_start + block_size <= before_end) { | 103 | while (before_start + block_size <= before_end) { |
| 88 | before_end -= block_size; | 104 | before_end -= block_size; |
| 89 | FreeBlock(before_end, i); | 105 | this->FreeBlock(before_end, i); |
| 90 | } | 106 | } |
| 91 | } | 107 | } |
| 92 | 108 | ||
| 93 | // Free space after the big blocks | 109 | // Free space after the big blocks. |
| 94 | for (s32 i{big_index - 1}; i >= 0; i--) { | 110 | for (s32 i = big_index - 1; i >= 0; i--) { |
| 95 | const std::size_t block_size{blocks[i].GetSize()}; | 111 | const size_t block_size = m_blocks[i].GetSize(); |
| 96 | while (after_start + block_size <= after_end) { | 112 | while (after_start + block_size <= after_end) { |
| 97 | FreeBlock(after_start, i); | 113 | this->FreeBlock(after_start, i); |
| 98 | after_start += block_size; | 114 | after_start += block_size; |
| 99 | } | 115 | } |
| 100 | } | 116 | } |
| 101 | } | 117 | } |
| 102 | 118 | ||
| 103 | std::size_t KPageHeap::CalculateManagementOverheadSize(std::size_t region_size) { | 119 | size_t KPageHeap::CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts, |
| 104 | std::size_t overhead_size = 0; | 120 | size_t num_block_shifts) { |
| 105 | for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) { | 121 | size_t overhead_size = 0; |
| 106 | const std::size_t cur_block_shift{MemoryBlockPageShifts[i]}; | 122 | for (size_t i = 0; i < num_block_shifts; i++) { |
| 107 | const std::size_t next_block_shift{ | 123 | const size_t cur_block_shift = block_shifts[i]; |
| 108 | (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0}; | 124 | const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0; |
| 109 | overhead_size += KPageHeap::Block::CalculateManagementOverheadSize( | 125 | overhead_size += KPageHeap::Block::CalculateManagementOverheadSize( |
| 110 | region_size, cur_block_shift, next_block_shift); | 126 | region_size, cur_block_shift, next_block_shift); |
| 111 | } | 127 | } |
diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h index a65aa28a0..60fff766b 100644 --- a/src/core/hle/kernel/k_page_heap.h +++ b/src/core/hle/kernel/k_page_heap.h | |||
| @@ -23,54 +23,73 @@ public: | |||
| 23 | KPageHeap() = default; | 23 | KPageHeap() = default; |
| 24 | ~KPageHeap() = default; | 24 | ~KPageHeap() = default; |
| 25 | 25 | ||
| 26 | constexpr VAddr GetAddress() const { | 26 | constexpr PAddr GetAddress() const { |
| 27 | return heap_address; | 27 | return m_heap_address; |
| 28 | } | 28 | } |
| 29 | constexpr std::size_t GetSize() const { | 29 | constexpr size_t GetSize() const { |
| 30 | return heap_size; | 30 | return m_heap_size; |
| 31 | } | 31 | } |
| 32 | constexpr VAddr GetEndAddress() const { | 32 | constexpr PAddr GetEndAddress() const { |
| 33 | return GetAddress() + GetSize(); | 33 | return this->GetAddress() + this->GetSize(); |
| 34 | } | 34 | } |
| 35 | constexpr std::size_t GetPageOffset(VAddr block) const { | 35 | constexpr size_t GetPageOffset(PAddr block) const { |
| 36 | return (block - GetAddress()) / PageSize; | 36 | return (block - this->GetAddress()) / PageSize; |
| 37 | } | ||
| 38 | constexpr size_t GetPageOffsetToEnd(PAddr block) const { | ||
| 39 | return (this->GetEndAddress() - block) / PageSize; | ||
| 40 | } | ||
| 41 | |||
| 42 | void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address, | ||
| 43 | size_t management_size) { | ||
| 44 | return this->Initialize(heap_address, heap_size, management_address, management_size, | ||
| 45 | MemoryBlockPageShifts.data(), NumMemoryBlockPageShifts); | ||
| 46 | } | ||
| 47 | |||
| 48 | size_t GetFreeSize() const { | ||
| 49 | return this->GetNumFreePages() * PageSize; | ||
| 37 | } | 50 | } |
| 38 | 51 | ||
| 39 | void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size); | 52 | void SetInitialUsedSize(size_t reserved_size) { |
| 40 | VAddr AllocateBlock(s32 index, bool random); | 53 | // Check that the reserved size is valid. |
| 41 | void Free(VAddr addr, std::size_t num_pages); | 54 | const size_t free_size = this->GetNumFreePages() * PageSize; |
| 55 | ASSERT(m_heap_size >= free_size + reserved_size); | ||
| 42 | 56 | ||
| 43 | void UpdateUsedSize() { | 57 | // Set the initial used size. |
| 44 | used_size = heap_size - (GetNumFreePages() * PageSize); | 58 | m_initial_used_size = m_heap_size - free_size - reserved_size; |
| 45 | } | 59 | } |
| 46 | 60 | ||
| 47 | static std::size_t CalculateManagementOverheadSize(std::size_t region_size); | 61 | PAddr AllocateBlock(s32 index, bool random); |
| 62 | void Free(PAddr addr, size_t num_pages); | ||
| 63 | |||
| 64 | static size_t CalculateManagementOverheadSize(size_t region_size) { | ||
| 65 | return CalculateManagementOverheadSize(region_size, MemoryBlockPageShifts.data(), | ||
| 66 | NumMemoryBlockPageShifts); | ||
| 67 | } | ||
| 48 | 68 | ||
| 49 | static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) { | 69 | static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) { |
| 50 | const auto target_pages{std::max(num_pages, align_pages)}; | 70 | const size_t target_pages = std::max(num_pages, align_pages); |
| 51 | for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) { | 71 | for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) { |
| 52 | if (target_pages <= | 72 | if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { |
| 53 | (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { | ||
| 54 | return static_cast<s32>(i); | 73 | return static_cast<s32>(i); |
| 55 | } | 74 | } |
| 56 | } | 75 | } |
| 57 | return -1; | 76 | return -1; |
| 58 | } | 77 | } |
| 59 | 78 | ||
| 60 | static constexpr s32 GetBlockIndex(std::size_t num_pages) { | 79 | static constexpr s32 GetBlockIndex(size_t num_pages) { |
| 61 | for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) { | 80 | for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) { |
| 62 | if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { | 81 | if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { |
| 63 | return i; | 82 | return i; |
| 64 | } | 83 | } |
| 65 | } | 84 | } |
| 66 | return -1; | 85 | return -1; |
| 67 | } | 86 | } |
| 68 | 87 | ||
| 69 | static constexpr std::size_t GetBlockSize(std::size_t index) { | 88 | static constexpr size_t GetBlockSize(size_t index) { |
| 70 | return static_cast<std::size_t>(1) << MemoryBlockPageShifts[index]; | 89 | return size_t(1) << MemoryBlockPageShifts[index]; |
| 71 | } | 90 | } |
| 72 | 91 | ||
| 73 | static constexpr std::size_t GetBlockNumPages(std::size_t index) { | 92 | static constexpr size_t GetBlockNumPages(size_t index) { |
| 74 | return GetBlockSize(index) / PageSize; | 93 | return GetBlockSize(index) / PageSize; |
| 75 | } | 94 | } |
| 76 | 95 | ||
| @@ -83,114 +102,116 @@ private: | |||
| 83 | Block() = default; | 102 | Block() = default; |
| 84 | ~Block() = default; | 103 | ~Block() = default; |
| 85 | 104 | ||
| 86 | constexpr std::size_t GetShift() const { | 105 | constexpr size_t GetShift() const { |
| 87 | return block_shift; | 106 | return m_block_shift; |
| 88 | } | 107 | } |
| 89 | constexpr std::size_t GetNextShift() const { | 108 | constexpr size_t GetNextShift() const { |
| 90 | return next_block_shift; | 109 | return m_next_block_shift; |
| 91 | } | 110 | } |
| 92 | constexpr std::size_t GetSize() const { | 111 | constexpr size_t GetSize() const { |
| 93 | return static_cast<std::size_t>(1) << GetShift(); | 112 | return u64(1) << this->GetShift(); |
| 94 | } | 113 | } |
| 95 | constexpr std::size_t GetNumPages() const { | 114 | constexpr size_t GetNumPages() const { |
| 96 | return GetSize() / PageSize; | 115 | return this->GetSize() / PageSize; |
| 97 | } | 116 | } |
| 98 | constexpr std::size_t GetNumFreeBlocks() const { | 117 | constexpr size_t GetNumFreeBlocks() const { |
| 99 | return bitmap.GetNumBits(); | 118 | return m_bitmap.GetNumBits(); |
| 100 | } | 119 | } |
| 101 | constexpr std::size_t GetNumFreePages() const { | 120 | constexpr size_t GetNumFreePages() const { |
| 102 | return GetNumFreeBlocks() * GetNumPages(); | 121 | return this->GetNumFreeBlocks() * this->GetNumPages(); |
| 103 | } | 122 | } |
| 104 | 123 | ||
| 105 | u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs, | 124 | u64* Initialize(PAddr addr, size_t size, size_t bs, size_t nbs, u64* bit_storage) { |
| 106 | u64* bit_storage) { | 125 | // Set shifts. |
| 107 | // Set shifts | 126 | m_block_shift = bs; |
| 108 | block_shift = bs; | 127 | m_next_block_shift = nbs; |
| 109 | next_block_shift = nbs; | 128 | |
| 110 | 129 | // Align up the address. | |
| 111 | // Align up the address | 130 | PAddr end = addr + size; |
| 112 | VAddr end{addr + size}; | 131 | const size_t align = (m_next_block_shift != 0) ? (u64(1) << m_next_block_shift) |
| 113 | const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift) | 132 | : (u64(1) << m_block_shift); |
| 114 | : (1ULL << block_shift)}; | 133 | addr = Common::AlignDown(addr, align); |
| 115 | addr = Common::AlignDown((addr), align); | 134 | end = Common::AlignUp(end, align); |
| 116 | end = Common::AlignUp((end), align); | 135 | |
| 117 | 136 | m_heap_address = addr; | |
| 118 | heap_address = addr; | 137 | m_end_offset = (end - addr) / (u64(1) << m_block_shift); |
| 119 | end_offset = (end - addr) / (1ULL << block_shift); | 138 | return m_bitmap.Initialize(bit_storage, m_end_offset); |
| 120 | return bitmap.Initialize(bit_storage, end_offset); | ||
| 121 | } | 139 | } |
| 122 | 140 | ||
| 123 | VAddr PushBlock(VAddr address) { | 141 | PAddr PushBlock(PAddr address) { |
| 124 | // Set the bit for the free block | 142 | // Set the bit for the free block. |
| 125 | std::size_t offset{(address - heap_address) >> GetShift()}; | 143 | size_t offset = (address - m_heap_address) >> this->GetShift(); |
| 126 | bitmap.SetBit(offset); | 144 | m_bitmap.SetBit(offset); |
| 127 | 145 | ||
| 128 | // If we have a next shift, try to clear the blocks below and return the address | 146 | // If we have a next shift, try to clear the blocks below this one and return the new |
| 129 | if (GetNextShift()) { | 147 | // address. |
| 130 | const auto diff{1ULL << (GetNextShift() - GetShift())}; | 148 | if (this->GetNextShift()) { |
| 149 | const size_t diff = u64(1) << (this->GetNextShift() - this->GetShift()); | ||
| 131 | offset = Common::AlignDown(offset, diff); | 150 | offset = Common::AlignDown(offset, diff); |
| 132 | if (bitmap.ClearRange(offset, diff)) { | 151 | if (m_bitmap.ClearRange(offset, diff)) { |
| 133 | return heap_address + (offset << GetShift()); | 152 | return m_heap_address + (offset << this->GetShift()); |
| 134 | } | 153 | } |
| 135 | } | 154 | } |
| 136 | 155 | ||
| 137 | // We couldn't coalesce, or we're already as big as possible | 156 | // We couldn't coalesce, or we're already as big as possible. |
| 138 | return 0; | 157 | return {}; |
| 139 | } | 158 | } |
| 140 | 159 | ||
| 141 | VAddr PopBlock(bool random) { | 160 | PAddr PopBlock(bool random) { |
| 142 | // Find a free block | 161 | // Find a free block. |
| 143 | const s64 soffset{bitmap.FindFreeBlock(random)}; | 162 | s64 soffset = m_bitmap.FindFreeBlock(random); |
| 144 | if (soffset < 0) { | 163 | if (soffset < 0) { |
| 145 | return 0; | 164 | return {}; |
| 146 | } | 165 | } |
| 147 | const auto offset{static_cast<std::size_t>(soffset)}; | 166 | const size_t offset = static_cast<size_t>(soffset); |
| 148 | 167 | ||
| 149 | // Update our tracking and return it | 168 | // Update our tracking and return it. |
| 150 | bitmap.ClearBit(offset); | 169 | m_bitmap.ClearBit(offset); |
| 151 | return heap_address + (offset << GetShift()); | 170 | return m_heap_address + (offset << this->GetShift()); |
| 152 | } | 171 | } |
| 153 | 172 | ||
| 154 | static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size, | 173 | public: |
| 155 | std::size_t cur_block_shift, | 174 | static constexpr size_t CalculateManagementOverheadSize(size_t region_size, |
| 156 | std::size_t next_block_shift) { | 175 | size_t cur_block_shift, |
| 157 | const auto cur_block_size{(1ULL << cur_block_shift)}; | 176 | size_t next_block_shift) { |
| 158 | const auto next_block_size{(1ULL << next_block_shift)}; | 177 | const size_t cur_block_size = (u64(1) << cur_block_shift); |
| 159 | const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size}; | 178 | const size_t next_block_size = (u64(1) << next_block_shift); |
| 179 | const size_t align = (next_block_shift != 0) ? next_block_size : cur_block_size; | ||
| 160 | return KPageBitmap::CalculateManagementOverheadSize( | 180 | return KPageBitmap::CalculateManagementOverheadSize( |
| 161 | (align * 2 + Common::AlignUp(region_size, align)) / cur_block_size); | 181 | (align * 2 + Common::AlignUp(region_size, align)) / cur_block_size); |
| 162 | } | 182 | } |
| 163 | 183 | ||
| 164 | private: | 184 | private: |
| 165 | KPageBitmap bitmap; | 185 | KPageBitmap m_bitmap; |
| 166 | VAddr heap_address{}; | 186 | PAddr m_heap_address{}; |
| 167 | uintptr_t end_offset{}; | 187 | uintptr_t m_end_offset{}; |
| 168 | std::size_t block_shift{}; | 188 | size_t m_block_shift{}; |
| 169 | std::size_t next_block_shift{}; | 189 | size_t m_next_block_shift{}; |
| 170 | }; | 190 | }; |
| 171 | 191 | ||
| 172 | constexpr std::size_t GetNumFreePages() const { | 192 | private: |
| 173 | std::size_t num_free{}; | 193 | void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address, |
| 174 | 194 | size_t management_size, const size_t* block_shifts, size_t num_block_shifts); | |
| 175 | for (const auto& block : blocks) { | 195 | size_t GetNumFreePages() const; |
| 176 | num_free += block.GetNumFreePages(); | ||
| 177 | } | ||
| 178 | |||
| 179 | return num_free; | ||
| 180 | } | ||
| 181 | 196 | ||
| 182 | void FreeBlock(VAddr block, s32 index); | 197 | void FreeBlock(PAddr block, s32 index); |
| 183 | 198 | ||
| 184 | static constexpr std::size_t NumMemoryBlockPageShifts{7}; | 199 | static constexpr size_t NumMemoryBlockPageShifts{7}; |
| 185 | static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{ | 200 | static constexpr std::array<size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{ |
| 186 | 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E, | 201 | 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E, |
| 187 | }; | 202 | }; |
| 188 | 203 | ||
| 189 | VAddr heap_address{}; | 204 | private: |
| 190 | std::size_t heap_size{}; | 205 | static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts, |
| 191 | std::size_t used_size{}; | 206 | size_t num_block_shifts); |
| 192 | std::array<Block, NumMemoryBlockPageShifts> blocks{}; | 207 | |
| 193 | std::vector<u64> metadata; | 208 | private: |
| 209 | PAddr m_heap_address{}; | ||
| 210 | size_t m_heap_size{}; | ||
| 211 | size_t m_initial_used_size{}; | ||
| 212 | size_t m_num_blocks{}; | ||
| 213 | std::array<Block, NumMemoryBlockPageShifts> m_blocks{}; | ||
| 214 | std::vector<u64> m_management_data; | ||
| 194 | }; | 215 | }; |
| 195 | 216 | ||
| 196 | } // namespace Kernel | 217 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 88aa2a152..dfea0b6e2 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp | |||
| @@ -273,11 +273,12 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory | |||
| 273 | R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, | 273 | R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, |
| 274 | KMemoryPermission::None, KMemoryPermission::None, | 274 | KMemoryPermission::None, KMemoryPermission::None, |
| 275 | KMemoryAttribute::None, KMemoryAttribute::None)); | 275 | KMemoryAttribute::None, KMemoryAttribute::None)); |
| 276 | KPageLinkedList pg; | ||
| 277 | R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( | ||
| 278 | &pg, num_pages, | ||
| 279 | KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option))); | ||
| 276 | 280 | ||
| 277 | KPageLinkedList page_linked_list; | 281 | R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup)); |
| 278 | R_TRY(system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool, | ||
| 279 | allocation_option)); | ||
| 280 | R_TRY(Operate(addr, num_pages, page_linked_list, OperationType::MapGroup)); | ||
| 281 | 282 | ||
| 282 | block_manager->Update(addr, num_pages, state, perm); | 283 | block_manager->Update(addr, num_pages, state, perm); |
| 283 | 284 | ||
| @@ -443,9 +444,10 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 443 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 444 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 444 | 445 | ||
| 445 | // Allocate pages for the new memory. | 446 | // Allocate pages for the new memory. |
| 446 | KPageLinkedList page_linked_list; | 447 | KPageLinkedList pg; |
| 447 | R_TRY(system.Kernel().MemoryManager().Allocate( | 448 | R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( |
| 448 | page_linked_list, (size - mapped_size) / PageSize, memory_pool, allocation_option)); | 449 | &pg, (size - mapped_size) / PageSize, |
| 450 | KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); | ||
| 449 | 451 | ||
| 450 | // Map the memory. | 452 | // Map the memory. |
| 451 | { | 453 | { |
| @@ -547,7 +549,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 547 | }); | 549 | }); |
| 548 | 550 | ||
| 549 | // Iterate over the memory. | 551 | // Iterate over the memory. |
| 550 | auto pg_it = page_linked_list.Nodes().begin(); | 552 | auto pg_it = pg.Nodes().begin(); |
| 551 | PAddr pg_phys_addr = pg_it->GetAddress(); | 553 | PAddr pg_phys_addr = pg_it->GetAddress(); |
| 552 | size_t pg_pages = pg_it->GetNumPages(); | 554 | size_t pg_pages = pg_it->GetNumPages(); |
| 553 | 555 | ||
| @@ -571,7 +573,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 571 | // Check if we're at the end of the physical block. | 573 | // Check if we're at the end of the physical block. |
| 572 | if (pg_pages == 0) { | 574 | if (pg_pages == 0) { |
| 573 | // Ensure there are more pages to map. | 575 | // Ensure there are more pages to map. |
| 574 | ASSERT(pg_it != page_linked_list.Nodes().end()); | 576 | ASSERT(pg_it != pg.Nodes().end()); |
| 575 | 577 | ||
| 576 | // Advance our physical block. | 578 | // Advance our physical block. |
| 577 | ++pg_it; | 579 | ++pg_it; |
| @@ -841,10 +843,14 @@ ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 841 | process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); | 843 | process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); |
| 842 | 844 | ||
| 843 | // Update memory blocks. | 845 | // Update memory blocks. |
| 844 | system.Kernel().MemoryManager().Free(pg, size / PageSize, memory_pool, allocation_option); | ||
| 845 | block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, | 846 | block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, |
| 846 | KMemoryAttribute::None); | 847 | KMemoryAttribute::None); |
| 847 | 848 | ||
| 849 | // TODO(bunnei): This is a workaround until the next set of changes, where we add reference | ||
| 850 | // counting for mapped pages. Until then, we must manually close the reference to the page | ||
| 851 | // group. | ||
| 852 | system.Kernel().MemoryManager().Close(pg); | ||
| 853 | |||
| 848 | // We succeeded. | 854 | // We succeeded. |
| 849 | remap_guard.Cancel(); | 855 | remap_guard.Cancel(); |
| 850 | 856 | ||
| @@ -1270,9 +1276,16 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) { | |||
| 1270 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 1276 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 1271 | 1277 | ||
| 1272 | // Allocate pages for the heap extension. | 1278 | // Allocate pages for the heap extension. |
| 1273 | KPageLinkedList page_linked_list; | 1279 | KPageLinkedList pg; |
| 1274 | R_TRY(system.Kernel().MemoryManager().Allocate(page_linked_list, allocation_size / PageSize, | 1280 | R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( |
| 1275 | memory_pool, allocation_option)); | 1281 | &pg, allocation_size / PageSize, |
| 1282 | KMemoryManager::EncodeOption(memory_pool, allocation_option))); | ||
| 1283 | |||
| 1284 | // Clear all the newly allocated pages. | ||
| 1285 | for (const auto& it : pg.Nodes()) { | ||
| 1286 | std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value, | ||
| 1287 | it.GetSize()); | ||
| 1288 | } | ||
| 1276 | 1289 | ||
| 1277 | // Map the pages. | 1290 | // Map the pages. |
| 1278 | { | 1291 | { |
| @@ -1291,7 +1304,7 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) { | |||
| 1291 | 1304 | ||
| 1292 | // Map the pages. | 1305 | // Map the pages. |
| 1293 | const auto num_pages = allocation_size / PageSize; | 1306 | const auto num_pages = allocation_size / PageSize; |
| 1294 | R_TRY(Operate(current_heap_end, num_pages, page_linked_list, OperationType::MapGroup)); | 1307 | R_TRY(Operate(current_heap_end, num_pages, pg, OperationType::MapGroup)); |
| 1295 | 1308 | ||
| 1296 | // Clear all the newly allocated pages. | 1309 | // Clear all the newly allocated pages. |
| 1297 | for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) { | 1310 | for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) { |
| @@ -1339,8 +1352,9 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, | |||
| 1339 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); | 1352 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); |
| 1340 | } else { | 1353 | } else { |
| 1341 | KPageLinkedList page_group; | 1354 | KPageLinkedList page_group; |
| 1342 | R_TRY(system.Kernel().MemoryManager().Allocate(page_group, needed_num_pages, memory_pool, | 1355 | R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( |
| 1343 | allocation_option)); | 1356 | &page_group, needed_num_pages, |
| 1357 | KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); | ||
| 1344 | R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); | 1358 | R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); |
| 1345 | } | 1359 | } |
| 1346 | 1360 | ||
| @@ -1547,7 +1561,7 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermiss | |||
| 1547 | return ResultSuccess; | 1561 | return ResultSuccess; |
| 1548 | } | 1562 | } |
| 1549 | 1563 | ||
| 1550 | constexpr VAddr KPageTable::GetRegionAddress(KMemoryState state) const { | 1564 | VAddr KPageTable::GetRegionAddress(KMemoryState state) const { |
| 1551 | switch (state) { | 1565 | switch (state) { |
| 1552 | case KMemoryState::Free: | 1566 | case KMemoryState::Free: |
| 1553 | case KMemoryState::Kernel: | 1567 | case KMemoryState::Kernel: |
| @@ -1583,7 +1597,7 @@ constexpr VAddr KPageTable::GetRegionAddress(KMemoryState state) const { | |||
| 1583 | } | 1597 | } |
| 1584 | } | 1598 | } |
| 1585 | 1599 | ||
| 1586 | constexpr std::size_t KPageTable::GetRegionSize(KMemoryState state) const { | 1600 | std::size_t KPageTable::GetRegionSize(KMemoryState state) const { |
| 1587 | switch (state) { | 1601 | switch (state) { |
| 1588 | case KMemoryState::Free: | 1602 | case KMemoryState::Free: |
| 1589 | case KMemoryState::Kernel: | 1603 | case KMemoryState::Kernel: |
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index c98887d34..194177332 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h | |||
| @@ -102,8 +102,8 @@ private: | |||
| 102 | OperationType operation); | 102 | OperationType operation); |
| 103 | ResultCode Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, | 103 | ResultCode Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, |
| 104 | OperationType operation, PAddr map_addr = 0); | 104 | OperationType operation, PAddr map_addr = 0); |
| 105 | constexpr VAddr GetRegionAddress(KMemoryState state) const; | 105 | VAddr GetRegionAddress(KMemoryState state) const; |
| 106 | constexpr std::size_t GetRegionSize(KMemoryState state) const; | 106 | std::size_t GetRegionSize(KMemoryState state) const; |
| 107 | 107 | ||
| 108 | ResultCode CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, | 108 | ResultCode CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, |
| 109 | std::size_t size, KMemoryState state_mask, | 109 | std::size_t size, KMemoryState state_mask, |
| @@ -254,8 +254,7 @@ public: | |||
| 254 | return !IsOutsideASLRRegion(address, size); | 254 | return !IsOutsideASLRRegion(address, size); |
| 255 | } | 255 | } |
| 256 | 256 | ||
| 257 | PAddr GetPhysicalAddr(VAddr addr) { | 257 | PAddr GetPhysicalAddr(VAddr addr) const { |
| 258 | ASSERT(IsLockedByCurrentThread()); | ||
| 259 | const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits]; | 258 | const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits]; |
| 260 | ASSERT(backing_addr); | 259 | ASSERT(backing_addr); |
| 261 | return backing_addr + addr; | 260 | return backing_addr + addr; |
| @@ -311,6 +310,8 @@ private: | |||
| 311 | bool is_kernel{}; | 310 | bool is_kernel{}; |
| 312 | bool is_aslr_enabled{}; | 311 | bool is_aslr_enabled{}; |
| 313 | 312 | ||
| 313 | u32 heap_fill_value{}; | ||
| 314 | |||
| 314 | KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application}; | 315 | KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application}; |
| 315 | KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront}; | 316 | KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront}; |
| 316 | 317 | ||
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 797f47021..71bd466cf 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -70,13 +70,12 @@ struct KernelCore::Impl { | |||
| 70 | 70 | ||
| 71 | // Derive the initial memory layout from the emulated board | 71 | // Derive the initial memory layout from the emulated board |
| 72 | Init::InitializeSlabResourceCounts(kernel); | 72 | Init::InitializeSlabResourceCounts(kernel); |
| 73 | KMemoryLayout memory_layout; | 73 | DeriveInitialMemoryLayout(); |
| 74 | DeriveInitialMemoryLayout(memory_layout); | 74 | Init::InitializeSlabHeaps(system, *memory_layout); |
| 75 | Init::InitializeSlabHeaps(system, memory_layout); | ||
| 76 | 75 | ||
| 77 | // Initialize kernel memory and resources. | 76 | // Initialize kernel memory and resources. |
| 78 | InitializeSystemResourceLimit(kernel, system.CoreTiming(), memory_layout); | 77 | InitializeSystemResourceLimit(kernel, system.CoreTiming()); |
| 79 | InitializeMemoryLayout(memory_layout); | 78 | InitializeMemoryLayout(); |
| 80 | InitializePageSlab(); | 79 | InitializePageSlab(); |
| 81 | InitializeSchedulers(); | 80 | InitializeSchedulers(); |
| 82 | InitializeSuspendThreads(); | 81 | InitializeSuspendThreads(); |
| @@ -219,12 +218,11 @@ struct KernelCore::Impl { | |||
| 219 | 218 | ||
| 220 | // Creates the default system resource limit | 219 | // Creates the default system resource limit |
| 221 | void InitializeSystemResourceLimit(KernelCore& kernel, | 220 | void InitializeSystemResourceLimit(KernelCore& kernel, |
| 222 | const Core::Timing::CoreTiming& core_timing, | 221 | const Core::Timing::CoreTiming& core_timing) { |
| 223 | const KMemoryLayout& memory_layout) { | ||
| 224 | system_resource_limit = KResourceLimit::Create(system.Kernel()); | 222 | system_resource_limit = KResourceLimit::Create(system.Kernel()); |
| 225 | system_resource_limit->Initialize(&core_timing); | 223 | system_resource_limit->Initialize(&core_timing); |
| 226 | 224 | ||
| 227 | const auto [total_size, kernel_size] = memory_layout.GetTotalAndKernelMemorySizes(); | 225 | const auto [total_size, kernel_size] = memory_layout->GetTotalAndKernelMemorySizes(); |
| 228 | 226 | ||
| 229 | // If setting the default system values fails, then something seriously wrong has occurred. | 227 | // If setting the default system values fails, then something seriously wrong has occurred. |
| 230 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, total_size) | 228 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, total_size) |
| @@ -353,16 +351,18 @@ struct KernelCore::Impl { | |||
| 353 | return schedulers[thread_id]->GetCurrentThread(); | 351 | return schedulers[thread_id]->GetCurrentThread(); |
| 354 | } | 352 | } |
| 355 | 353 | ||
| 356 | void DeriveInitialMemoryLayout(KMemoryLayout& memory_layout) { | 354 | void DeriveInitialMemoryLayout() { |
| 355 | memory_layout = std::make_unique<KMemoryLayout>(); | ||
| 356 | |||
| 357 | // Insert the root region for the virtual memory tree, from which all other regions will | 357 | // Insert the root region for the virtual memory tree, from which all other regions will |
| 358 | // derive. | 358 | // derive. |
| 359 | memory_layout.GetVirtualMemoryRegionTree().InsertDirectly( | 359 | memory_layout->GetVirtualMemoryRegionTree().InsertDirectly( |
| 360 | KernelVirtualAddressSpaceBase, | 360 | KernelVirtualAddressSpaceBase, |
| 361 | KernelVirtualAddressSpaceBase + KernelVirtualAddressSpaceSize - 1); | 361 | KernelVirtualAddressSpaceBase + KernelVirtualAddressSpaceSize - 1); |
| 362 | 362 | ||
| 363 | // Insert the root region for the physical memory tree, from which all other regions will | 363 | // Insert the root region for the physical memory tree, from which all other regions will |
| 364 | // derive. | 364 | // derive. |
| 365 | memory_layout.GetPhysicalMemoryRegionTree().InsertDirectly( | 365 | memory_layout->GetPhysicalMemoryRegionTree().InsertDirectly( |
| 366 | KernelPhysicalAddressSpaceBase, | 366 | KernelPhysicalAddressSpaceBase, |
| 367 | KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1); | 367 | KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1); |
| 368 | 368 | ||
| @@ -379,7 +379,7 @@ struct KernelCore::Impl { | |||
| 379 | if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) { | 379 | if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) { |
| 380 | kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start; | 380 | kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start; |
| 381 | } | 381 | } |
| 382 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | 382 | ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( |
| 383 | kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel)); | 383 | kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel)); |
| 384 | 384 | ||
| 385 | // Setup the code region. | 385 | // Setup the code region. |
| @@ -388,11 +388,11 @@ struct KernelCore::Impl { | |||
| 388 | Common::AlignDown(code_start_virt_addr, CodeRegionAlign); | 388 | Common::AlignDown(code_start_virt_addr, CodeRegionAlign); |
| 389 | constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign); | 389 | constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign); |
| 390 | constexpr size_t code_region_size = code_region_end - code_region_start; | 390 | constexpr size_t code_region_size = code_region_end - code_region_start; |
| 391 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | 391 | ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( |
| 392 | code_region_start, code_region_size, KMemoryRegionType_KernelCode)); | 392 | code_region_start, code_region_size, KMemoryRegionType_KernelCode)); |
| 393 | 393 | ||
| 394 | // Setup board-specific device physical regions. | 394 | // Setup board-specific device physical regions. |
| 395 | Init::SetupDevicePhysicalMemoryRegions(memory_layout); | 395 | Init::SetupDevicePhysicalMemoryRegions(*memory_layout); |
| 396 | 396 | ||
| 397 | // Determine the amount of space needed for the misc region. | 397 | // Determine the amount of space needed for the misc region. |
| 398 | size_t misc_region_needed_size; | 398 | size_t misc_region_needed_size; |
| @@ -401,7 +401,7 @@ struct KernelCore::Impl { | |||
| 401 | misc_region_needed_size = Core::Hardware::NUM_CPU_CORES * (3 * (PageSize + PageSize)); | 401 | misc_region_needed_size = Core::Hardware::NUM_CPU_CORES * (3 * (PageSize + PageSize)); |
| 402 | 402 | ||
| 403 | // Account for each auto-map device. | 403 | // Account for each auto-map device. |
| 404 | for (const auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { | 404 | for (const auto& region : memory_layout->GetPhysicalMemoryRegionTree()) { |
| 405 | if (region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) { | 405 | if (region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) { |
| 406 | // Check that the region is valid. | 406 | // Check that the region is valid. |
| 407 | ASSERT(region.GetEndAddress() != 0); | 407 | ASSERT(region.GetEndAddress() != 0); |
| @@ -426,22 +426,22 @@ struct KernelCore::Impl { | |||
| 426 | 426 | ||
| 427 | // Setup the misc region. | 427 | // Setup the misc region. |
| 428 | const VAddr misc_region_start = | 428 | const VAddr misc_region_start = |
| 429 | memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( | 429 | memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion( |
| 430 | misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel); | 430 | misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel); |
| 431 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | 431 | ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( |
| 432 | misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc)); | 432 | misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc)); |
| 433 | 433 | ||
| 434 | // Setup the stack region. | 434 | // Setup the stack region. |
| 435 | constexpr size_t StackRegionSize = 14_MiB; | 435 | constexpr size_t StackRegionSize = 14_MiB; |
| 436 | constexpr size_t StackRegionAlign = KernelAslrAlignment; | 436 | constexpr size_t StackRegionAlign = KernelAslrAlignment; |
| 437 | const VAddr stack_region_start = | 437 | const VAddr stack_region_start = |
| 438 | memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( | 438 | memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion( |
| 439 | StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel); | 439 | StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel); |
| 440 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | 440 | ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( |
| 441 | stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack)); | 441 | stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack)); |
| 442 | 442 | ||
| 443 | // Determine the size of the resource region. | 443 | // Determine the size of the resource region. |
| 444 | const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit(); | 444 | const size_t resource_region_size = memory_layout->GetResourceRegionSizeForInit(); |
| 445 | 445 | ||
| 446 | // Determine the size of the slab region. | 446 | // Determine the size of the slab region. |
| 447 | const size_t slab_region_size = | 447 | const size_t slab_region_size = |
| @@ -458,23 +458,23 @@ struct KernelCore::Impl { | |||
| 458 | Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) - | 458 | Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) - |
| 459 | Common::AlignDown(code_end_phys_addr, SlabRegionAlign); | 459 | Common::AlignDown(code_end_phys_addr, SlabRegionAlign); |
| 460 | const VAddr slab_region_start = | 460 | const VAddr slab_region_start = |
| 461 | memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( | 461 | memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion( |
| 462 | slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) + | 462 | slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) + |
| 463 | (code_end_phys_addr % SlabRegionAlign); | 463 | (code_end_phys_addr % SlabRegionAlign); |
| 464 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | 464 | ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( |
| 465 | slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab)); | 465 | slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab)); |
| 466 | 466 | ||
| 467 | // Setup the temp region. | 467 | // Setup the temp region. |
| 468 | constexpr size_t TempRegionSize = 128_MiB; | 468 | constexpr size_t TempRegionSize = 128_MiB; |
| 469 | constexpr size_t TempRegionAlign = KernelAslrAlignment; | 469 | constexpr size_t TempRegionAlign = KernelAslrAlignment; |
| 470 | const VAddr temp_region_start = | 470 | const VAddr temp_region_start = |
| 471 | memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( | 471 | memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion( |
| 472 | TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel); | 472 | TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel); |
| 473 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize, | 473 | ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize, |
| 474 | KMemoryRegionType_KernelTemp)); | 474 | KMemoryRegionType_KernelTemp)); |
| 475 | 475 | ||
| 476 | // Automatically map in devices that have auto-map attributes. | 476 | // Automatically map in devices that have auto-map attributes. |
| 477 | for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { | 477 | for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) { |
| 478 | // We only care about kernel regions. | 478 | // We only care about kernel regions. |
| 479 | if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) { | 479 | if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) { |
| 480 | continue; | 480 | continue; |
| @@ -501,21 +501,21 @@ struct KernelCore::Impl { | |||
| 501 | const size_t map_size = | 501 | const size_t map_size = |
| 502 | Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr; | 502 | Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr; |
| 503 | const VAddr map_virt_addr = | 503 | const VAddr map_virt_addr = |
| 504 | memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( | 504 | memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( |
| 505 | map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize); | 505 | map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize); |
| 506 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | 506 | ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( |
| 507 | map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice)); | 507 | map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice)); |
| 508 | region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr); | 508 | region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr); |
| 509 | } | 509 | } |
| 510 | 510 | ||
| 511 | Init::SetupDramPhysicalMemoryRegions(memory_layout); | 511 | Init::SetupDramPhysicalMemoryRegions(*memory_layout); |
| 512 | 512 | ||
| 513 | // Insert a physical region for the kernel code region. | 513 | // Insert a physical region for the kernel code region. |
| 514 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | 514 | ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( |
| 515 | code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode)); | 515 | code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode)); |
| 516 | 516 | ||
| 517 | // Insert a physical region for the kernel slab region. | 517 | // Insert a physical region for the kernel slab region. |
| 518 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | 518 | ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( |
| 519 | slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab)); | 519 | slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab)); |
| 520 | 520 | ||
| 521 | // Determine size available for kernel page table heaps, requiring > 8 MB. | 521 | // Determine size available for kernel page table heaps, requiring > 8 MB. |
| @@ -524,12 +524,12 @@ struct KernelCore::Impl { | |||
| 524 | ASSERT(page_table_heap_size / 4_MiB > 2); | 524 | ASSERT(page_table_heap_size / 4_MiB > 2); |
| 525 | 525 | ||
| 526 | // Insert a physical region for the kernel page table heap region | 526 | // Insert a physical region for the kernel page table heap region |
| 527 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | 527 | ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( |
| 528 | slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap)); | 528 | slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap)); |
| 529 | 529 | ||
| 530 | // All DRAM regions that we haven't tagged by this point will be mapped under the linear | 530 | // All DRAM regions that we haven't tagged by this point will be mapped under the linear |
| 531 | // mapping. Tag them. | 531 | // mapping. Tag them. |
| 532 | for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { | 532 | for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) { |
| 533 | if (region.GetType() == KMemoryRegionType_Dram) { | 533 | if (region.GetType() == KMemoryRegionType_Dram) { |
| 534 | // Check that the region is valid. | 534 | // Check that the region is valid. |
| 535 | ASSERT(region.GetEndAddress() != 0); | 535 | ASSERT(region.GetEndAddress() != 0); |
| @@ -541,7 +541,7 @@ struct KernelCore::Impl { | |||
| 541 | 541 | ||
| 542 | // Get the linear region extents. | 542 | // Get the linear region extents. |
| 543 | const auto linear_extents = | 543 | const auto linear_extents = |
| 544 | memory_layout.GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | 544 | memory_layout->GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( |
| 545 | KMemoryRegionAttr_LinearMapped); | 545 | KMemoryRegionAttr_LinearMapped); |
| 546 | ASSERT(linear_extents.GetEndAddress() != 0); | 546 | ASSERT(linear_extents.GetEndAddress() != 0); |
| 547 | 547 | ||
| @@ -553,7 +553,7 @@ struct KernelCore::Impl { | |||
| 553 | Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) - | 553 | Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) - |
| 554 | aligned_linear_phys_start; | 554 | aligned_linear_phys_start; |
| 555 | const VAddr linear_region_start = | 555 | const VAddr linear_region_start = |
| 556 | memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( | 556 | memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( |
| 557 | linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign); | 557 | linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign); |
| 558 | 558 | ||
| 559 | const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start; | 559 | const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start; |
| @@ -562,7 +562,7 @@ struct KernelCore::Impl { | |||
| 562 | { | 562 | { |
| 563 | PAddr cur_phys_addr = 0; | 563 | PAddr cur_phys_addr = 0; |
| 564 | u64 cur_size = 0; | 564 | u64 cur_size = 0; |
| 565 | for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { | 565 | for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) { |
| 566 | if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) { | 566 | if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) { |
| 567 | continue; | 567 | continue; |
| 568 | } | 568 | } |
| @@ -581,55 +581,49 @@ struct KernelCore::Impl { | |||
| 581 | 581 | ||
| 582 | const VAddr region_virt_addr = | 582 | const VAddr region_virt_addr = |
| 583 | region.GetAddress() + linear_region_phys_to_virt_diff; | 583 | region.GetAddress() + linear_region_phys_to_virt_diff; |
| 584 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | 584 | ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( |
| 585 | region_virt_addr, region.GetSize(), | 585 | region_virt_addr, region.GetSize(), |
| 586 | GetTypeForVirtualLinearMapping(region.GetType()))); | 586 | GetTypeForVirtualLinearMapping(region.GetType()))); |
| 587 | region.SetPairAddress(region_virt_addr); | 587 | region.SetPairAddress(region_virt_addr); |
| 588 | 588 | ||
| 589 | KMemoryRegion* virt_region = | 589 | KMemoryRegion* virt_region = |
| 590 | memory_layout.GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr); | 590 | memory_layout->GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr); |
| 591 | ASSERT(virt_region != nullptr); | 591 | ASSERT(virt_region != nullptr); |
| 592 | virt_region->SetPairAddress(region.GetAddress()); | 592 | virt_region->SetPairAddress(region.GetAddress()); |
| 593 | } | 593 | } |
| 594 | } | 594 | } |
| 595 | 595 | ||
| 596 | // Insert regions for the initial page table region. | 596 | // Insert regions for the initial page table region. |
| 597 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | 597 | ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( |
| 598 | resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt)); | 598 | resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt)); |
| 599 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | 599 | ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( |
| 600 | resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize, | 600 | resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize, |
| 601 | KMemoryRegionType_VirtualDramKernelInitPt)); | 601 | KMemoryRegionType_VirtualDramKernelInitPt)); |
| 602 | 602 | ||
| 603 | // All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to | 603 | // All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to |
| 604 | // some pool partition. Tag them. | 604 | // some pool partition. Tag them. |
| 605 | for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { | 605 | for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) { |
| 606 | if (region.GetType() == (KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped)) { | 606 | if (region.GetType() == (KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped)) { |
| 607 | region.SetType(KMemoryRegionType_DramPoolPartition); | 607 | region.SetType(KMemoryRegionType_DramPoolPartition); |
| 608 | } | 608 | } |
| 609 | } | 609 | } |
| 610 | 610 | ||
| 611 | // Setup all other memory regions needed to arrange the pool partitions. | 611 | // Setup all other memory regions needed to arrange the pool partitions. |
| 612 | Init::SetupPoolPartitionMemoryRegions(memory_layout); | 612 | Init::SetupPoolPartitionMemoryRegions(*memory_layout); |
| 613 | 613 | ||
| 614 | // Cache all linear regions in their own trees for faster access, later. | 614 | // Cache all linear regions in their own trees for faster access, later. |
| 615 | memory_layout.InitializeLinearMemoryRegionTrees(aligned_linear_phys_start, | 615 | memory_layout->InitializeLinearMemoryRegionTrees(aligned_linear_phys_start, |
| 616 | linear_region_start); | 616 | linear_region_start); |
| 617 | } | 617 | } |
| 618 | 618 | ||
| 619 | void InitializeMemoryLayout(const KMemoryLayout& memory_layout) { | 619 | void InitializeMemoryLayout() { |
| 620 | const auto system_pool = memory_layout.GetKernelSystemPoolRegionPhysicalExtents(); | 620 | const auto system_pool = memory_layout->GetKernelSystemPoolRegionPhysicalExtents(); |
| 621 | const auto applet_pool = memory_layout.GetKernelAppletPoolRegionPhysicalExtents(); | ||
| 622 | const auto application_pool = memory_layout.GetKernelApplicationPoolRegionPhysicalExtents(); | ||
| 623 | 621 | ||
| 624 | // Initialize memory managers | 622 | // Initialize the memory manager. |
| 625 | memory_manager = std::make_unique<KMemoryManager>(system); | 623 | memory_manager = std::make_unique<KMemoryManager>(system); |
| 626 | memory_manager->InitializeManager(KMemoryManager::Pool::Application, | 624 | const auto& management_region = memory_layout->GetPoolManagementRegion(); |
| 627 | application_pool.GetAddress(), | 625 | ASSERT(management_region.GetEndAddress() != 0); |
| 628 | application_pool.GetEndAddress()); | 626 | memory_manager->Initialize(management_region.GetAddress(), management_region.GetSize()); |
| 629 | memory_manager->InitializeManager(KMemoryManager::Pool::Applet, applet_pool.GetAddress(), | ||
| 630 | applet_pool.GetEndAddress()); | ||
| 631 | memory_manager->InitializeManager(KMemoryManager::Pool::System, system_pool.GetAddress(), | ||
| 632 | system_pool.GetEndAddress()); | ||
| 633 | 627 | ||
| 634 | // Setup memory regions for emulated processes | 628 | // Setup memory regions for emulated processes |
| 635 | // TODO(bunnei): These should not be hardcoded regions initialized within the kernel | 629 | // TODO(bunnei): These should not be hardcoded regions initialized within the kernel |
| @@ -770,6 +764,9 @@ struct KernelCore::Impl { | |||
| 770 | Kernel::KSharedMemory* irs_shared_mem{}; | 764 | Kernel::KSharedMemory* irs_shared_mem{}; |
| 771 | Kernel::KSharedMemory* time_shared_mem{}; | 765 | Kernel::KSharedMemory* time_shared_mem{}; |
| 772 | 766 | ||
| 767 | // Memory layout | ||
| 768 | std::unique_ptr<KMemoryLayout> memory_layout; | ||
| 769 | |||
| 773 | // Threads used for services | 770 | // Threads used for services |
| 774 | std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads; | 771 | std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads; |
| 775 | Common::ThreadWorker service_threads_manager; | 772 | Common::ThreadWorker service_threads_manager; |
| @@ -1135,6 +1132,10 @@ const KWorkerTaskManager& KernelCore::WorkerTaskManager() const { | |||
| 1135 | return impl->worker_task_manager; | 1132 | return impl->worker_task_manager; |
| 1136 | } | 1133 | } |
| 1137 | 1134 | ||
| 1135 | const KMemoryLayout& KernelCore::MemoryLayout() const { | ||
| 1136 | return *impl->memory_layout; | ||
| 1137 | } | ||
| 1138 | |||
| 1138 | bool KernelCore::IsPhantomModeForSingleCore() const { | 1139 | bool KernelCore::IsPhantomModeForSingleCore() const { |
| 1139 | return impl->IsPhantomModeForSingleCore(); | 1140 | return impl->IsPhantomModeForSingleCore(); |
| 1140 | } | 1141 | } |
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 0e04fc3bb..c1254b18d 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h | |||
| @@ -41,6 +41,7 @@ class KClientSession; | |||
| 41 | class KEvent; | 41 | class KEvent; |
| 42 | class KHandleTable; | 42 | class KHandleTable; |
| 43 | class KLinkedListNode; | 43 | class KLinkedListNode; |
| 44 | class KMemoryLayout; | ||
| 44 | class KMemoryManager; | 45 | class KMemoryManager; |
| 45 | class KPort; | 46 | class KPort; |
| 46 | class KProcess; | 47 | class KProcess; |
| @@ -350,6 +351,9 @@ public: | |||
| 350 | /// Gets the current worker task manager, used for dispatching KThread/KProcess tasks. | 351 | /// Gets the current worker task manager, used for dispatching KThread/KProcess tasks. |
| 351 | const KWorkerTaskManager& WorkerTaskManager() const; | 352 | const KWorkerTaskManager& WorkerTaskManager() const; |
| 352 | 353 | ||
| 354 | /// Gets the memory layout. | ||
| 355 | const KMemoryLayout& MemoryLayout() const; | ||
| 356 | |||
| 353 | private: | 357 | private: |
| 354 | friend class KProcess; | 358 | friend class KProcess; |
| 355 | friend class KThread; | 359 | friend class KThread; |