diff options
Diffstat (limited to '')
| -rw-r--r-- | src/core/hle/kernel/k_memory_manager.cpp | 469 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_memory_manager.h | 167 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_table.cpp | 46 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_table.h | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.cpp | 33 | ||||
| -rw-r--r-- | src/core/hle/kernel/kernel.h | 4 |
6 files changed, 548 insertions, 173 deletions
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 1b44541b1..a2f18f643 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp | |||
| @@ -10,189 +10,412 @@ | |||
| 10 | #include "common/scope_exit.h" | 10 | #include "common/scope_exit.h" |
| 11 | #include "core/core.h" | 11 | #include "core/core.h" |
| 12 | #include "core/device_memory.h" | 12 | #include "core/device_memory.h" |
| 13 | #include "core/hle/kernel/initial_process.h" | ||
| 13 | #include "core/hle/kernel/k_memory_manager.h" | 14 | #include "core/hle/kernel/k_memory_manager.h" |
| 14 | #include "core/hle/kernel/k_page_linked_list.h" | 15 | #include "core/hle/kernel/k_page_linked_list.h" |
| 16 | #include "core/hle/kernel/kernel.h" | ||
| 15 | #include "core/hle/kernel/svc_results.h" | 17 | #include "core/hle/kernel/svc_results.h" |
| 18 | #include "core/memory.h" | ||
| 16 | 19 | ||
| 17 | namespace Kernel { | 20 | namespace Kernel { |
| 18 | 21 | ||
| 19 | KMemoryManager::KMemoryManager(Core::System& system_) : system{system_} {} | 22 | namespace { |
| 23 | |||
| 24 | constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) { | ||
| 25 | if ((type | KMemoryRegionType_DramApplicationPool) == type) { | ||
| 26 | return KMemoryManager::Pool::Application; | ||
| 27 | } else if ((type | KMemoryRegionType_DramAppletPool) == type) { | ||
| 28 | return KMemoryManager::Pool::Applet; | ||
| 29 | } else if ((type | KMemoryRegionType_DramSystemPool) == type) { | ||
| 30 | return KMemoryManager::Pool::System; | ||
| 31 | } else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) { | ||
| 32 | return KMemoryManager::Pool::SystemNonSecure; | ||
| 33 | } else { | ||
| 34 | UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool"); | ||
| 35 | return {}; | ||
| 36 | } | ||
| 37 | } | ||
| 20 | 38 | ||
| 21 | std::size_t KMemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) { | 39 | } // namespace |
| 22 | const auto size{end_address - start_address}; | 40 | |
| 41 | KMemoryManager::KMemoryManager(Core::System& system_) | ||
| 42 | : system{system_}, pool_locks{ | ||
| 43 | KLightLock{system_.Kernel()}, | ||
| 44 | KLightLock{system_.Kernel()}, | ||
| 45 | KLightLock{system_.Kernel()}, | ||
| 46 | KLightLock{system_.Kernel()}, | ||
| 47 | } {} | ||
| 48 | |||
| 49 | void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) { | ||
| 50 | |||
| 51 | // Clear the management region to zero. | ||
| 52 | const VAddr management_region_end = management_region + management_region_size; | ||
| 53 | |||
| 54 | // Reset our manager count. | ||
| 55 | num_managers = 0; | ||
| 56 | |||
| 57 | // Traverse the virtual memory layout tree, initializing each manager as appropriate. | ||
| 58 | while (num_managers != MaxManagerCount) { | ||
| 59 | // Locate the region that should initialize the current manager. | ||
| 60 | PAddr region_address = 0; | ||
| 61 | size_t region_size = 0; | ||
| 62 | Pool region_pool = Pool::Count; | ||
| 63 | for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { | ||
| 64 | // We only care about regions that we need to create managers for. | ||
| 65 | if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { | ||
| 66 | continue; | ||
| 67 | } | ||
| 23 | 68 | ||
| 24 | // Calculate metadata sizes | 69 | // We want to initialize the managers in order. |
| 25 | const auto ref_count_size{(size / PageSize) * sizeof(u16)}; | 70 | if (it.GetAttributes() != num_managers) { |
| 26 | const auto optimize_map_size{(Common::AlignUp((size / PageSize), 64) / 64) * sizeof(u64)}; | 71 | continue; |
| 27 | const auto manager_size{Common::AlignUp(optimize_map_size + ref_count_size, PageSize)}; | 72 | } |
| 28 | const auto page_heap_size{KPageHeap::CalculateManagementOverheadSize(size)}; | ||
| 29 | const auto total_metadata_size{manager_size + page_heap_size}; | ||
| 30 | ASSERT(manager_size <= total_metadata_size); | ||
| 31 | ASSERT(Common::IsAligned(total_metadata_size, PageSize)); | ||
| 32 | 73 | ||
| 33 | // Setup region | 74 | const PAddr cur_start = it.GetAddress(); |
| 34 | pool = new_pool; | 75 | const PAddr cur_end = it.GetEndAddress(); |
| 76 | |||
| 77 | // Validate the region. | ||
| 78 | ASSERT(cur_end != 0); | ||
| 79 | ASSERT(cur_start != 0); | ||
| 80 | ASSERT(it.GetSize() > 0); | ||
| 81 | |||
| 82 | // Update the region's extents. | ||
| 83 | if (region_address == 0) { | ||
| 84 | region_address = cur_start; | ||
| 85 | region_size = it.GetSize(); | ||
| 86 | region_pool = GetPoolFromMemoryRegionType(it.GetType()); | ||
| 87 | } else { | ||
| 88 | ASSERT(cur_start == region_address + region_size); | ||
| 89 | |||
| 90 | // Update the size. | ||
| 91 | region_size = cur_end - region_address; | ||
| 92 | ASSERT(GetPoolFromMemoryRegionType(it.GetType()) == region_pool); | ||
| 93 | } | ||
| 94 | } | ||
| 95 | |||
| 96 | // If we didn't find a region, we're done. | ||
| 97 | if (region_size == 0) { | ||
| 98 | break; | ||
| 99 | } | ||
| 35 | 100 | ||
| 36 | // Initialize the manager's KPageHeap | 101 | // Initialize a new manager for the region. |
| 37 | heap.Initialize(start_address, size, page_heap_size); | 102 | Impl* manager = std::addressof(managers[num_managers++]); |
| 103 | ASSERT(num_managers <= managers.size()); | ||
| 104 | |||
| 105 | const size_t cur_size = manager->Initialize(region_address, region_size, management_region, | ||
| 106 | management_region_end, region_pool); | ||
| 107 | management_region += cur_size; | ||
| 108 | ASSERT(management_region <= management_region_end); | ||
| 109 | |||
| 110 | // Insert the manager into the pool list. | ||
| 111 | const auto region_pool_index = static_cast<u32>(region_pool); | ||
| 112 | if (pool_managers_tail[region_pool_index] == nullptr) { | ||
| 113 | pool_managers_head[region_pool_index] = manager; | ||
| 114 | } else { | ||
| 115 | pool_managers_tail[region_pool_index]->SetNext(manager); | ||
| 116 | manager->SetPrev(pool_managers_tail[region_pool_index]); | ||
| 117 | } | ||
| 118 | pool_managers_tail[region_pool_index] = manager; | ||
| 119 | } | ||
| 38 | 120 | ||
| 39 | // Free the memory to the heap | 121 | // Free each region to its corresponding heap. |
| 40 | heap.Free(start_address, size / PageSize); | 122 | size_t reserved_sizes[MaxManagerCount] = {}; |
| 123 | const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress(); | ||
| 124 | const PAddr ini_end = ini_start + InitialProcessBinarySizeMax; | ||
| 125 | const PAddr ini_last = ini_end - 1; | ||
| 126 | for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { | ||
| 127 | if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { | ||
| 128 | // Get the manager for the region. | ||
| 129 | auto index = it.GetAttributes(); | ||
| 130 | auto& manager = managers[index]; | ||
| 131 | |||
| 132 | const PAddr cur_start = it.GetAddress(); | ||
| 133 | const PAddr cur_last = it.GetLastAddress(); | ||
| 134 | const PAddr cur_end = it.GetEndAddress(); | ||
| 135 | |||
| 136 | if (cur_start <= ini_start && ini_last <= cur_last) { | ||
| 137 | // Free memory before the ini to the heap. | ||
| 138 | if (cur_start != ini_start) { | ||
| 139 | manager.Free(cur_start, (ini_start - cur_start) / PageSize); | ||
| 140 | } | ||
| 41 | 141 | ||
| 42 | // Update the heap's used size | 142 | // Open/reserve the ini memory. |
| 43 | heap.UpdateUsedSize(); | 143 | manager.OpenFirst(ini_start, InitialProcessBinarySizeMax / PageSize); |
| 144 | reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax; | ||
| 44 | 145 | ||
| 45 | return total_metadata_size; | 146 | // Free memory after the ini to the heap. |
| 46 | } | 147 | if (ini_last != cur_last) { |
| 148 | ASSERT(cur_end != 0); | ||
| 149 | manager.Free(ini_end, cur_end - ini_end); | ||
| 150 | } | ||
| 151 | } else { | ||
| 152 | // Ensure there's no partial overlap with the ini image. | ||
| 153 | if (cur_start <= ini_last) { | ||
| 154 | ASSERT(cur_last < ini_start); | ||
| 155 | } else { | ||
| 156 | // Otherwise, check the region for general validity. | ||
| 157 | ASSERT(cur_end != 0); | ||
| 158 | } | ||
| 47 | 159 | ||
| 48 | void KMemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) { | 160 | // Free the memory to the heap. |
| 49 | ASSERT(pool < Pool::Count); | 161 | manager.Free(cur_start, it.GetSize() / PageSize); |
| 50 | managers[static_cast<std::size_t>(pool)].Initialize(pool, start_address, end_address); | 162 | } |
| 163 | } | ||
| 164 | } | ||
| 165 | |||
| 166 | // Update the used size for all managers. | ||
| 167 | for (size_t i = 0; i < num_managers; ++i) { | ||
| 168 | managers[i].SetInitialUsedHeapSize(reserved_sizes[i]); | ||
| 169 | } | ||
| 51 | } | 170 | } |
| 52 | 171 | ||
| 53 | VAddr KMemoryManager::AllocateAndOpenContinuous(std::size_t num_pages, std::size_t align_pages, | 172 | PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) { |
| 54 | u32 option) { | 173 | // Early return if we're allocating no pages. |
| 55 | // Early return if we're allocating no pages | ||
| 56 | if (num_pages == 0) { | 174 | if (num_pages == 0) { |
| 57 | return {}; | 175 | return 0; |
| 58 | } | 176 | } |
| 59 | 177 | ||
| 60 | // Lock the pool that we're allocating from | 178 | // Lock the pool that we're allocating from. |
| 61 | const auto [pool, dir] = DecodeOption(option); | 179 | const auto [pool, dir] = DecodeOption(option); |
| 62 | const auto pool_index{static_cast<std::size_t>(pool)}; | 180 | KScopedLightLock lk(pool_locks[static_cast<std::size_t>(pool)]); |
| 63 | std::lock_guard lock{pool_locks[pool_index]}; | 181 | |
| 64 | 182 | // Choose a heap based on our page size request. | |
| 65 | // Choose a heap based on our page size request | 183 | const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages); |
| 66 | const s32 heap_index{KPageHeap::GetAlignedBlockIndex(num_pages, align_pages)}; | 184 | |
| 67 | 185 | // Loop, trying to iterate from each block. | |
| 68 | // Loop, trying to iterate from each block | 186 | Impl* chosen_manager = nullptr; |
| 69 | // TODO (bunnei): Support multiple managers | 187 | PAddr allocated_block = 0; |
| 70 | Impl& chosen_manager{managers[pool_index]}; | 188 | for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; |
| 71 | VAddr allocated_block{chosen_manager.AllocateBlock(heap_index, false)}; | 189 | chosen_manager = this->GetNextManager(chosen_manager, dir)) { |
| 190 | allocated_block = chosen_manager->AllocateBlock(heap_index, true); | ||
| 191 | if (allocated_block != 0) { | ||
| 192 | break; | ||
| 193 | } | ||
| 194 | } | ||
| 72 | 195 | ||
| 73 | // If we failed to allocate, quit now | 196 | // If we failed to allocate, quit now. |
| 74 | if (!allocated_block) { | 197 | if (allocated_block == 0) { |
| 75 | return {}; | 198 | return 0; |
| 76 | } | 199 | } |
| 77 | 200 | ||
| 78 | // If we allocated more than we need, free some | 201 | // If we allocated more than we need, free some. |
| 79 | const auto allocated_pages{KPageHeap::GetBlockNumPages(heap_index)}; | 202 | const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index); |
| 80 | if (allocated_pages > num_pages) { | 203 | if (allocated_pages > num_pages) { |
| 81 | chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); | 204 | chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); |
| 82 | } | 205 | } |
| 83 | 206 | ||
| 207 | // Open the first reference to the pages. | ||
| 208 | chosen_manager->OpenFirst(allocated_block, num_pages); | ||
| 209 | |||
| 84 | return allocated_block; | 210 | return allocated_block; |
| 85 | } | 211 | } |
| 86 | 212 | ||
| 87 | ResultCode KMemoryManager::Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, | 213 | ResultCode KMemoryManager::AllocatePageGroupImpl(KPageLinkedList* out, size_t num_pages, Pool pool, |
| 88 | Direction dir, u32 heap_fill_value) { | 214 | Direction dir, bool random) { |
| 89 | ASSERT(page_list.GetNumPages() == 0); | 215 | // Choose a heap based on our page size request. |
| 216 | const s32 heap_index = KPageHeap::GetBlockIndex(num_pages); | ||
| 217 | R_UNLESS(0 <= heap_index, ResultOutOfMemory); | ||
| 218 | |||
| 219 | // Ensure that we don't leave anything un-freed. | ||
| 220 | auto group_guard = SCOPE_GUARD({ | ||
| 221 | for (const auto& it : out->Nodes()) { | ||
| 222 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), it.GetAddress()); | ||
| 223 | const size_t num_pages_to_free = | ||
| 224 | std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); | ||
| 225 | manager.Free(it.GetAddress(), num_pages_to_free); | ||
| 226 | } | ||
| 227 | }); | ||
| 90 | 228 | ||
| 91 | // Early return if we're allocating no pages | 229 | // Keep allocating until we've allocated all our pages. |
| 92 | if (num_pages == 0) { | 230 | for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) { |
| 93 | return ResultSuccess; | 231 | const size_t pages_per_alloc = KPageHeap::GetBlockNumPages(index); |
| 94 | } | 232 | for (Impl* cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr; |
| 233 | cur_manager = this->GetNextManager(cur_manager, dir)) { | ||
| 234 | while (num_pages >= pages_per_alloc) { | ||
| 235 | // Allocate a block. | ||
| 236 | PAddr allocated_block = cur_manager->AllocateBlock(index, random); | ||
| 237 | if (allocated_block == 0) { | ||
| 238 | break; | ||
| 239 | } | ||
| 95 | 240 | ||
| 96 | // Lock the pool that we're allocating from | 241 | // Safely add it to our group. |
| 97 | const auto pool_index{static_cast<std::size_t>(pool)}; | 242 | { |
| 98 | std::lock_guard lock{pool_locks[pool_index]}; | 243 | auto block_guard = |
| 244 | SCOPE_GUARD({ cur_manager->Free(allocated_block, pages_per_alloc); }); | ||
| 245 | R_TRY(out->AddBlock(allocated_block, pages_per_alloc)); | ||
| 246 | block_guard.Cancel(); | ||
| 247 | } | ||
| 99 | 248 | ||
| 100 | // Choose a heap based on our page size request | 249 | num_pages -= pages_per_alloc; |
| 101 | const s32 heap_index{KPageHeap::GetBlockIndex(num_pages)}; | 250 | } |
| 102 | if (heap_index < 0) { | 251 | } |
| 103 | return ResultOutOfMemory; | ||
| 104 | } | 252 | } |
| 105 | 253 | ||
| 106 | // TODO (bunnei): Support multiple managers | 254 | // Only succeed if we allocated as many pages as we wanted. |
| 107 | Impl& chosen_manager{managers[pool_index]}; | 255 | R_UNLESS(num_pages == 0, ResultOutOfMemory); |
| 108 | 256 | ||
| 109 | // Ensure that we don't leave anything un-freed | 257 | // We succeeded! |
| 110 | auto group_guard = detail::ScopeExit([&] { | 258 | group_guard.Cancel(); |
| 111 | for (const auto& it : page_list.Nodes()) { | 259 | return ResultSuccess; |
| 112 | const auto min_num_pages{std::min<size_t>( | 260 | } |
| 113 | it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)}; | ||
| 114 | chosen_manager.Free(it.GetAddress(), min_num_pages); | ||
| 115 | } | ||
| 116 | }); | ||
| 117 | 261 | ||
| 118 | // Keep allocating until we've allocated all our pages | 262 | ResultCode KMemoryManager::AllocateAndOpen(KPageLinkedList* out, size_t num_pages, u32 option) { |
| 119 | for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) { | 263 | ASSERT(out != nullptr); |
| 120 | const auto pages_per_alloc{KPageHeap::GetBlockNumPages(index)}; | 264 | ASSERT(out->GetNumPages() == 0); |
| 121 | 265 | ||
| 122 | while (num_pages >= pages_per_alloc) { | 266 | // Early return if we're allocating no pages. |
| 123 | // Allocate a block | 267 | R_SUCCEED_IF(num_pages == 0); |
| 124 | VAddr allocated_block{chosen_manager.AllocateBlock(index, false)}; | ||
| 125 | if (!allocated_block) { | ||
| 126 | break; | ||
| 127 | } | ||
| 128 | 268 | ||
| 129 | // Safely add it to our group | 269 | // Lock the pool that we're allocating from. |
| 130 | { | 270 | const auto [pool, dir] = DecodeOption(option); |
| 131 | auto block_guard = detail::ScopeExit( | 271 | KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]); |
| 132 | [&] { chosen_manager.Free(allocated_block, pages_per_alloc); }); | 272 | |
| 273 | // Allocate the page group. | ||
| 274 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false)); | ||
| 275 | |||
| 276 | // Open the first reference to the pages. | ||
| 277 | for (const auto& block : out->Nodes()) { | ||
| 278 | PAddr cur_address = block.GetAddress(); | ||
| 279 | size_t remaining_pages = block.GetNumPages(); | ||
| 280 | while (remaining_pages > 0) { | ||
| 281 | // Get the manager for the current address. | ||
| 282 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address); | ||
| 283 | |||
| 284 | // Process part or all of the block. | ||
| 285 | const size_t cur_pages = | ||
| 286 | std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); | ||
| 287 | manager.OpenFirst(cur_address, cur_pages); | ||
| 288 | |||
| 289 | // Advance. | ||
| 290 | cur_address += cur_pages * PageSize; | ||
| 291 | remaining_pages -= cur_pages; | ||
| 292 | } | ||
| 293 | } | ||
| 133 | 294 | ||
| 134 | if (const ResultCode result{page_list.AddBlock(allocated_block, pages_per_alloc)}; | 295 | return ResultSuccess; |
| 135 | result.IsError()) { | 296 | } |
| 136 | return result; | ||
| 137 | } | ||
| 138 | 297 | ||
| 139 | block_guard.Cancel(); | 298 | ResultCode KMemoryManager::AllocateAndOpenForProcess(KPageLinkedList* out, size_t num_pages, |
| 140 | } | 299 | u32 option, u64 process_id, u8 fill_pattern) { |
| 300 | ASSERT(out != nullptr); | ||
| 301 | ASSERT(out->GetNumPages() == 0); | ||
| 141 | 302 | ||
| 142 | num_pages -= pages_per_alloc; | 303 | // Decode the option. |
| 143 | } | 304 | const auto [pool, dir] = DecodeOption(option); |
| 144 | } | ||
| 145 | 305 | ||
| 146 | // Clear allocated memory. | 306 | // Allocate the memory. |
| 147 | for (const auto& it : page_list.Nodes()) { | 307 | { |
| 148 | std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value, | 308 | // Lock the pool that we're allocating from. |
| 149 | it.GetSize()); | 309 | KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]); |
| 310 | |||
| 311 | // Allocate the page group. | ||
| 312 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false)); | ||
| 313 | |||
| 314 | // Open the first reference to the pages. | ||
| 315 | for (const auto& block : out->Nodes()) { | ||
| 316 | PAddr cur_address = block.GetAddress(); | ||
| 317 | size_t remaining_pages = block.GetNumPages(); | ||
| 318 | while (remaining_pages > 0) { | ||
| 319 | // Get the manager for the current address. | ||
| 320 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address); | ||
| 321 | |||
| 322 | // Process part or all of the block. | ||
| 323 | const size_t cur_pages = | ||
| 324 | std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); | ||
| 325 | manager.OpenFirst(cur_address, cur_pages); | ||
| 326 | |||
| 327 | // Advance. | ||
| 328 | cur_address += cur_pages * PageSize; | ||
| 329 | remaining_pages -= cur_pages; | ||
| 330 | } | ||
| 331 | } | ||
| 150 | } | 332 | } |
| 151 | 333 | ||
| 152 | // Only succeed if we allocated as many pages as we wanted | 334 | // Set all the allocated memory. |
| 153 | if (num_pages) { | 335 | for (const auto& block : out->Nodes()) { |
| 154 | return ResultOutOfMemory; | 336 | std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern, |
| 337 | block.GetSize()); | ||
| 155 | } | 338 | } |
| 156 | 339 | ||
| 157 | // We succeeded! | ||
| 158 | group_guard.Cancel(); | ||
| 159 | |||
| 160 | return ResultSuccess; | 340 | return ResultSuccess; |
| 161 | } | 341 | } |
| 162 | 342 | ||
| 163 | ResultCode KMemoryManager::Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, | 343 | void KMemoryManager::Open(PAddr address, size_t num_pages) { |
| 164 | Direction dir, u32 heap_fill_value) { | 344 | // Repeatedly open references until we've done so for all pages. |
| 165 | // Early return if we're freeing no pages | 345 | while (num_pages) { |
| 166 | if (!num_pages) { | 346 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address); |
| 167 | return ResultSuccess; | 347 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); |
| 348 | |||
| 349 | { | ||
| 350 | KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 351 | manager.Open(address, cur_pages); | ||
| 352 | } | ||
| 353 | |||
| 354 | num_pages -= cur_pages; | ||
| 355 | address += cur_pages * PageSize; | ||
| 168 | } | 356 | } |
| 357 | } | ||
| 169 | 358 | ||
| 170 | // Lock the pool that we're freeing from | 359 | void KMemoryManager::Close(PAddr address, size_t num_pages) { |
| 171 | const auto pool_index{static_cast<std::size_t>(pool)}; | 360 | // Repeatedly close references until we've done so for all pages. |
| 172 | std::lock_guard lock{pool_locks[pool_index]}; | 361 | while (num_pages) { |
| 362 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address); | ||
| 363 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||
| 173 | 364 | ||
| 174 | // TODO (bunnei): Support multiple managers | 365 | { |
| 175 | Impl& chosen_manager{managers[pool_index]}; | 366 | KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]); |
| 367 | manager.Close(address, cur_pages); | ||
| 368 | } | ||
| 176 | 369 | ||
| 177 | // Free all of the pages | 370 | num_pages -= cur_pages; |
| 178 | for (const auto& it : page_list.Nodes()) { | 371 | address += cur_pages * PageSize; |
| 179 | const auto min_num_pages{std::min<size_t>( | ||
| 180 | it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)}; | ||
| 181 | chosen_manager.Free(it.GetAddress(), min_num_pages); | ||
| 182 | } | 372 | } |
| 373 | } | ||
| 183 | 374 | ||
| 184 | return ResultSuccess; | 375 | void KMemoryManager::Close(const KPageLinkedList& pg) { |
| 376 | for (const auto& node : pg.Nodes()) { | ||
| 377 | Close(node.GetAddress(), node.GetNumPages()); | ||
| 378 | } | ||
| 379 | } | ||
| 380 | void KMemoryManager::Open(const KPageLinkedList& pg) { | ||
| 381 | for (const auto& node : pg.Nodes()) { | ||
| 382 | Open(node.GetAddress(), node.GetNumPages()); | ||
| 383 | } | ||
| 384 | } | ||
| 385 | |||
| 386 | size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management, | ||
| 387 | VAddr management_end, Pool p) { | ||
| 388 | // Calculate management sizes. | ||
| 389 | const size_t ref_count_size = (size / PageSize) * sizeof(u16); | ||
| 390 | const size_t optimize_map_size = CalculateOptimizedProcessOverheadSize(size); | ||
| 391 | const size_t manager_size = Common::AlignUp(optimize_map_size + ref_count_size, PageSize); | ||
| 392 | const size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(size); | ||
| 393 | const size_t total_management_size = manager_size + page_heap_size; | ||
| 394 | ASSERT(manager_size <= total_management_size); | ||
| 395 | ASSERT(management + total_management_size <= management_end); | ||
| 396 | ASSERT(Common::IsAligned(total_management_size, PageSize)); | ||
| 397 | |||
| 398 | // Setup region. | ||
| 399 | pool = p; | ||
| 400 | management_region = management; | ||
| 401 | page_reference_counts.resize( | ||
| 402 | Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize); | ||
| 403 | ASSERT(Common::IsAligned(management_region, PageSize)); | ||
| 404 | |||
| 405 | // Initialize the manager's KPageHeap. | ||
| 406 | heap.Initialize(address, size, management + manager_size, page_heap_size); | ||
| 407 | |||
| 408 | return total_management_size; | ||
| 185 | } | 409 | } |
| 186 | 410 | ||
| 187 | std::size_t KMemoryManager::Impl::CalculateManagementOverheadSize(std::size_t region_size) { | 411 | size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) { |
| 188 | const std::size_t ref_count_size = (region_size / PageSize) * sizeof(u16); | 412 | const size_t ref_count_size = (region_size / PageSize) * sizeof(u16); |
| 189 | const std::size_t optimize_map_size = | 413 | const size_t optimize_map_size = |
| 190 | (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / | 414 | (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / |
| 191 | Common::BitSize<u64>()) * | 415 | Common::BitSize<u64>()) * |
| 192 | sizeof(u64); | 416 | sizeof(u64); |
| 193 | const std::size_t manager_meta_size = | 417 | const size_t manager_meta_size = Common::AlignUp(optimize_map_size + ref_count_size, PageSize); |
| 194 | Common::AlignUp(optimize_map_size + ref_count_size, PageSize); | 418 | const size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region_size); |
| 195 | const std::size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region_size); | ||
| 196 | return manager_meta_size + page_heap_size; | 419 | return manager_meta_size + page_heap_size; |
| 197 | } | 420 | } |
| 198 | 421 | ||
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h index 17c7690f1..18775b262 100644 --- a/src/core/hle/kernel/k_memory_manager.h +++ b/src/core/hle/kernel/k_memory_manager.h | |||
| @@ -5,11 +5,12 @@ | |||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <array> | 7 | #include <array> |
| 8 | #include <mutex> | ||
| 9 | #include <tuple> | 8 | #include <tuple> |
| 10 | 9 | ||
| 11 | #include "common/common_funcs.h" | 10 | #include "common/common_funcs.h" |
| 12 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 12 | #include "core/hle/kernel/k_light_lock.h" | ||
| 13 | #include "core/hle/kernel/k_memory_layout.h" | ||
| 13 | #include "core/hle/kernel/k_page_heap.h" | 14 | #include "core/hle/kernel/k_page_heap.h" |
| 14 | #include "core/hle/result.h" | 15 | #include "core/hle/result.h" |
| 15 | 16 | ||
| @@ -52,22 +53,33 @@ public: | |||
| 52 | 53 | ||
| 53 | explicit KMemoryManager(Core::System& system_); | 54 | explicit KMemoryManager(Core::System& system_); |
| 54 | 55 | ||
| 55 | constexpr std::size_t GetSize(Pool pool) const { | 56 | void Initialize(VAddr management_region, size_t management_region_size); |
| 56 | return managers[static_cast<std::size_t>(pool)].GetSize(); | 57 | |
| 58 | constexpr size_t GetSize(Pool pool) const { | ||
| 59 | constexpr Direction GetSizeDirection = Direction::FromFront; | ||
| 60 | size_t total = 0; | ||
| 61 | for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; | ||
| 62 | manager = this->GetNextManager(manager, GetSizeDirection)) { | ||
| 63 | total += manager->GetSize(); | ||
| 64 | } | ||
| 65 | return total; | ||
| 57 | } | 66 | } |
| 58 | 67 | ||
| 59 | void InitializeManager(Pool pool, u64 start_address, u64 end_address); | 68 | PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); |
| 69 | ResultCode AllocateAndOpen(KPageLinkedList* out, size_t num_pages, u32 option); | ||
| 70 | ResultCode AllocateAndOpenForProcess(KPageLinkedList* out, size_t num_pages, u32 option, | ||
| 71 | u64 process_id, u8 fill_pattern); | ||
| 72 | |||
| 73 | static constexpr size_t MaxManagerCount = 10; | ||
| 60 | 74 | ||
| 61 | VAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); | 75 | void Close(PAddr address, size_t num_pages); |
| 62 | ResultCode Allocate(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, Direction dir, | 76 | void Close(const KPageLinkedList& pg); |
| 63 | u32 heap_fill_value = 0); | ||
| 64 | ResultCode Free(KPageLinkedList& page_list, std::size_t num_pages, Pool pool, Direction dir, | ||
| 65 | u32 heap_fill_value = 0); | ||
| 66 | 77 | ||
| 67 | static constexpr std::size_t MaxManagerCount = 10; | 78 | void Open(PAddr address, size_t num_pages); |
| 79 | void Open(const KPageLinkedList& pg); | ||
| 68 | 80 | ||
| 69 | public: | 81 | public: |
| 70 | static std::size_t CalculateManagementOverheadSize(std::size_t region_size) { | 82 | static size_t CalculateManagementOverheadSize(size_t region_size) { |
| 71 | return Impl::CalculateManagementOverheadSize(region_size); | 83 | return Impl::CalculateManagementOverheadSize(region_size); |
| 72 | } | 84 | } |
| 73 | 85 | ||
| @@ -100,17 +112,26 @@ private: | |||
| 100 | Impl() = default; | 112 | Impl() = default; |
| 101 | ~Impl() = default; | 113 | ~Impl() = default; |
| 102 | 114 | ||
| 103 | std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address); | 115 | size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end, |
| 116 | Pool p); | ||
| 104 | 117 | ||
| 105 | VAddr AllocateBlock(s32 index, bool random) { | 118 | VAddr AllocateBlock(s32 index, bool random) { |
| 106 | return heap.AllocateBlock(index, random); | 119 | return heap.AllocateBlock(index, random); |
| 107 | } | 120 | } |
| 108 | 121 | ||
| 109 | void Free(VAddr addr, std::size_t num_pages) { | 122 | void Free(VAddr addr, size_t num_pages) { |
| 110 | heap.Free(addr, num_pages); | 123 | heap.Free(addr, num_pages); |
| 111 | } | 124 | } |
| 112 | 125 | ||
| 113 | constexpr std::size_t GetSize() const { | 126 | void SetInitialUsedHeapSize(size_t reserved_size) { |
| 127 | heap.SetInitialUsedSize(reserved_size); | ||
| 128 | } | ||
| 129 | |||
| 130 | constexpr Pool GetPool() const { | ||
| 131 | return pool; | ||
| 132 | } | ||
| 133 | |||
| 134 | constexpr size_t GetSize() const { | ||
| 114 | return heap.GetSize(); | 135 | return heap.GetSize(); |
| 115 | } | 136 | } |
| 116 | 137 | ||
| @@ -122,10 +143,88 @@ private: | |||
| 122 | return heap.GetEndAddress(); | 143 | return heap.GetEndAddress(); |
| 123 | } | 144 | } |
| 124 | 145 | ||
| 125 | static std::size_t CalculateManagementOverheadSize(std::size_t region_size); | 146 | constexpr size_t GetPageOffset(PAddr address) const { |
| 147 | return heap.GetPageOffset(address); | ||
| 148 | } | ||
| 149 | |||
| 150 | constexpr size_t GetPageOffsetToEnd(PAddr address) const { | ||
| 151 | return heap.GetPageOffsetToEnd(address); | ||
| 152 | } | ||
| 153 | |||
| 154 | constexpr void SetNext(Impl* n) { | ||
| 155 | next = n; | ||
| 156 | } | ||
| 157 | |||
| 158 | constexpr void SetPrev(Impl* n) { | ||
| 159 | prev = n; | ||
| 160 | } | ||
| 161 | |||
| 162 | constexpr Impl* GetNext() const { | ||
| 163 | return next; | ||
| 164 | } | ||
| 165 | |||
| 166 | constexpr Impl* GetPrev() const { | ||
| 167 | return prev; | ||
| 168 | } | ||
| 169 | |||
| 170 | void OpenFirst(PAddr address, size_t num_pages) { | ||
| 171 | size_t index = this->GetPageOffset(address); | ||
| 172 | const size_t end = index + num_pages; | ||
| 173 | while (index < end) { | ||
| 174 | const RefCount ref_count = (++page_reference_counts[index]); | ||
| 175 | ASSERT(ref_count == 1); | ||
| 126 | 176 | ||
| 127 | static constexpr std::size_t CalculateOptimizedProcessOverheadSize( | 177 | index++; |
| 128 | std::size_t region_size) { | 178 | } |
| 179 | } | ||
| 180 | |||
| 181 | void Open(PAddr address, size_t num_pages) { | ||
| 182 | size_t index = this->GetPageOffset(address); | ||
| 183 | const size_t end = index + num_pages; | ||
| 184 | while (index < end) { | ||
| 185 | const RefCount ref_count = (++page_reference_counts[index]); | ||
| 186 | ASSERT(ref_count > 1); | ||
| 187 | |||
| 188 | index++; | ||
| 189 | } | ||
| 190 | } | ||
| 191 | |||
| 192 | void Close(PAddr address, size_t num_pages) { | ||
| 193 | size_t index = this->GetPageOffset(address); | ||
| 194 | const size_t end = index + num_pages; | ||
| 195 | |||
| 196 | size_t free_start = 0; | ||
| 197 | size_t free_count = 0; | ||
| 198 | while (index < end) { | ||
| 199 | ASSERT(page_reference_counts[index] > 0); | ||
| 200 | const RefCount ref_count = (--page_reference_counts[index]); | ||
| 201 | |||
| 202 | // Keep track of how many zero refcounts we see in a row, to minimize calls to free. | ||
| 203 | if (ref_count == 0) { | ||
| 204 | if (free_count > 0) { | ||
| 205 | free_count++; | ||
| 206 | } else { | ||
| 207 | free_start = index; | ||
| 208 | free_count = 1; | ||
| 209 | } | ||
| 210 | } else { | ||
| 211 | if (free_count > 0) { | ||
| 212 | this->Free(heap.GetAddress() + free_start * PageSize, free_count); | ||
| 213 | free_count = 0; | ||
| 214 | } | ||
| 215 | } | ||
| 216 | |||
| 217 | index++; | ||
| 218 | } | ||
| 219 | |||
| 220 | if (free_count > 0) { | ||
| 221 | this->Free(heap.GetAddress() + free_start * PageSize, free_count); | ||
| 222 | } | ||
| 223 | } | ||
| 224 | |||
| 225 | static size_t CalculateManagementOverheadSize(size_t region_size); | ||
| 226 | |||
| 227 | static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) { | ||
| 129 | return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / | 228 | return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / |
| 130 | Common::BitSize<u64>()) * | 229 | Common::BitSize<u64>()) * |
| 131 | sizeof(u64); | 230 | sizeof(u64); |
| @@ -135,13 +234,45 @@ private: | |||
| 135 | using RefCount = u16; | 234 | using RefCount = u16; |
| 136 | 235 | ||
| 137 | KPageHeap heap; | 236 | KPageHeap heap; |
| 237 | std::vector<RefCount> page_reference_counts; | ||
| 238 | VAddr management_region{}; | ||
| 138 | Pool pool{}; | 239 | Pool pool{}; |
| 240 | Impl* next{}; | ||
| 241 | Impl* prev{}; | ||
| 139 | }; | 242 | }; |
| 140 | 243 | ||
| 141 | private: | 244 | private: |
| 245 | Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) { | ||
| 246 | return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; | ||
| 247 | } | ||
| 248 | |||
| 249 | const Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) const { | ||
| 250 | return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; | ||
| 251 | } | ||
| 252 | |||
| 253 | constexpr Impl* GetFirstManager(Pool pool, Direction dir) const { | ||
| 254 | return dir == Direction::FromBack ? pool_managers_tail[static_cast<size_t>(pool)] | ||
| 255 | : pool_managers_head[static_cast<size_t>(pool)]; | ||
| 256 | } | ||
| 257 | |||
| 258 | constexpr Impl* GetNextManager(Impl* cur, Direction dir) const { | ||
| 259 | if (dir == Direction::FromBack) { | ||
| 260 | return cur->GetPrev(); | ||
| 261 | } else { | ||
| 262 | return cur->GetNext(); | ||
| 263 | } | ||
| 264 | } | ||
| 265 | |||
| 266 | ResultCode AllocatePageGroupImpl(KPageLinkedList* out, size_t num_pages, Pool pool, | ||
| 267 | Direction dir, bool random); | ||
| 268 | |||
| 269 | private: | ||
| 142 | Core::System& system; | 270 | Core::System& system; |
| 143 | std::array<std::mutex, static_cast<std::size_t>(Pool::Count)> pool_locks; | 271 | std::array<KLightLock, static_cast<size_t>(Pool::Count)> pool_locks; |
| 272 | std::array<Impl*, MaxManagerCount> pool_managers_head{}; | ||
| 273 | std::array<Impl*, MaxManagerCount> pool_managers_tail{}; | ||
| 144 | std::array<Impl, MaxManagerCount> managers; | 274 | std::array<Impl, MaxManagerCount> managers; |
| 275 | size_t num_managers{}; | ||
| 145 | }; | 276 | }; |
| 146 | 277 | ||
| 147 | } // namespace Kernel | 278 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index a23226d70..dfea0b6e2 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp | |||
| @@ -273,11 +273,12 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory | |||
| 273 | R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, | 273 | R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, |
| 274 | KMemoryPermission::None, KMemoryPermission::None, | 274 | KMemoryPermission::None, KMemoryPermission::None, |
| 275 | KMemoryAttribute::None, KMemoryAttribute::None)); | 275 | KMemoryAttribute::None, KMemoryAttribute::None)); |
| 276 | KPageLinkedList pg; | ||
| 277 | R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( | ||
| 278 | &pg, num_pages, | ||
| 279 | KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option))); | ||
| 276 | 280 | ||
| 277 | KPageLinkedList page_linked_list; | 281 | R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup)); |
| 278 | R_TRY(system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool, | ||
| 279 | allocation_option)); | ||
| 280 | R_TRY(Operate(addr, num_pages, page_linked_list, OperationType::MapGroup)); | ||
| 281 | 282 | ||
| 282 | block_manager->Update(addr, num_pages, state, perm); | 283 | block_manager->Update(addr, num_pages, state, perm); |
| 283 | 284 | ||
| @@ -443,9 +444,10 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 443 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 444 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 444 | 445 | ||
| 445 | // Allocate pages for the new memory. | 446 | // Allocate pages for the new memory. |
| 446 | KPageLinkedList page_linked_list; | 447 | KPageLinkedList pg; |
| 447 | R_TRY(system.Kernel().MemoryManager().Allocate( | 448 | R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( |
| 448 | page_linked_list, (size - mapped_size) / PageSize, memory_pool, allocation_option)); | 449 | &pg, (size - mapped_size) / PageSize, |
| 450 | KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); | ||
| 449 | 451 | ||
| 450 | // Map the memory. | 452 | // Map the memory. |
| 451 | { | 453 | { |
| @@ -547,7 +549,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 547 | }); | 549 | }); |
| 548 | 550 | ||
| 549 | // Iterate over the memory. | 551 | // Iterate over the memory. |
| 550 | auto pg_it = page_linked_list.Nodes().begin(); | 552 | auto pg_it = pg.Nodes().begin(); |
| 551 | PAddr pg_phys_addr = pg_it->GetAddress(); | 553 | PAddr pg_phys_addr = pg_it->GetAddress(); |
| 552 | size_t pg_pages = pg_it->GetNumPages(); | 554 | size_t pg_pages = pg_it->GetNumPages(); |
| 553 | 555 | ||
| @@ -571,7 +573,7 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 571 | // Check if we're at the end of the physical block. | 573 | // Check if we're at the end of the physical block. |
| 572 | if (pg_pages == 0) { | 574 | if (pg_pages == 0) { |
| 573 | // Ensure there are more pages to map. | 575 | // Ensure there are more pages to map. |
| 574 | ASSERT(pg_it != page_linked_list.Nodes().end()); | 576 | ASSERT(pg_it != pg.Nodes().end()); |
| 575 | 577 | ||
| 576 | // Advance our physical block. | 578 | // Advance our physical block. |
| 577 | ++pg_it; | 579 | ++pg_it; |
| @@ -841,10 +843,14 @@ ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { | |||
| 841 | process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); | 843 | process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); |
| 842 | 844 | ||
| 843 | // Update memory blocks. | 845 | // Update memory blocks. |
| 844 | system.Kernel().MemoryManager().Free(pg, size / PageSize, memory_pool, allocation_option); | ||
| 845 | block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, | 846 | block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, |
| 846 | KMemoryAttribute::None); | 847 | KMemoryAttribute::None); |
| 847 | 848 | ||
| 849 | // TODO(bunnei): This is a workaround until the next set of changes, where we add reference | ||
| 850 | // counting for mapped pages. Until then, we must manually close the reference to the page | ||
| 851 | // group. | ||
| 852 | system.Kernel().MemoryManager().Close(pg); | ||
| 853 | |||
| 848 | // We succeeded. | 854 | // We succeeded. |
| 849 | remap_guard.Cancel(); | 855 | remap_guard.Cancel(); |
| 850 | 856 | ||
| @@ -1270,9 +1276,16 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) { | |||
| 1270 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 1276 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 1271 | 1277 | ||
| 1272 | // Allocate pages for the heap extension. | 1278 | // Allocate pages for the heap extension. |
| 1273 | KPageLinkedList page_linked_list; | 1279 | KPageLinkedList pg; |
| 1274 | R_TRY(system.Kernel().MemoryManager().Allocate(page_linked_list, allocation_size / PageSize, | 1280 | R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( |
| 1275 | memory_pool, allocation_option)); | 1281 | &pg, allocation_size / PageSize, |
| 1282 | KMemoryManager::EncodeOption(memory_pool, allocation_option))); | ||
| 1283 | |||
| 1284 | // Clear all the newly allocated pages. | ||
| 1285 | for (const auto& it : pg.Nodes()) { | ||
| 1286 | std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value, | ||
| 1287 | it.GetSize()); | ||
| 1288 | } | ||
| 1276 | 1289 | ||
| 1277 | // Map the pages. | 1290 | // Map the pages. |
| 1278 | { | 1291 | { |
| @@ -1291,7 +1304,7 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) { | |||
| 1291 | 1304 | ||
| 1292 | // Map the pages. | 1305 | // Map the pages. |
| 1293 | const auto num_pages = allocation_size / PageSize; | 1306 | const auto num_pages = allocation_size / PageSize; |
| 1294 | R_TRY(Operate(current_heap_end, num_pages, page_linked_list, OperationType::MapGroup)); | 1307 | R_TRY(Operate(current_heap_end, num_pages, pg, OperationType::MapGroup)); |
| 1295 | 1308 | ||
| 1296 | // Clear all the newly allocated pages. | 1309 | // Clear all the newly allocated pages. |
| 1297 | for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) { | 1310 | for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) { |
| @@ -1339,8 +1352,9 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, | |||
| 1339 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); | 1352 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); |
| 1340 | } else { | 1353 | } else { |
| 1341 | KPageLinkedList page_group; | 1354 | KPageLinkedList page_group; |
| 1342 | R_TRY(system.Kernel().MemoryManager().Allocate(page_group, needed_num_pages, memory_pool, | 1355 | R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( |
| 1343 | allocation_option)); | 1356 | &page_group, needed_num_pages, |
| 1357 | KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); | ||
| 1344 | R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); | 1358 | R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); |
| 1345 | } | 1359 | } |
| 1346 | 1360 | ||
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index 9a4510849..194177332 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h | |||
| @@ -310,6 +310,8 @@ private: | |||
| 310 | bool is_kernel{}; | 310 | bool is_kernel{}; |
| 311 | bool is_aslr_enabled{}; | 311 | bool is_aslr_enabled{}; |
| 312 | 312 | ||
| 313 | u32 heap_fill_value{}; | ||
| 314 | |||
| 313 | KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application}; | 315 | KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application}; |
| 314 | KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront}; | 316 | KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront}; |
| 315 | 317 | ||
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 797f47021..eda67d933 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -70,13 +70,12 @@ struct KernelCore::Impl { | |||
| 70 | 70 | ||
| 71 | // Derive the initial memory layout from the emulated board | 71 | // Derive the initial memory layout from the emulated board |
| 72 | Init::InitializeSlabResourceCounts(kernel); | 72 | Init::InitializeSlabResourceCounts(kernel); |
| 73 | KMemoryLayout memory_layout; | 73 | DeriveInitialMemoryLayout(); |
| 74 | DeriveInitialMemoryLayout(memory_layout); | ||
| 75 | Init::InitializeSlabHeaps(system, memory_layout); | 74 | Init::InitializeSlabHeaps(system, memory_layout); |
| 76 | 75 | ||
| 77 | // Initialize kernel memory and resources. | 76 | // Initialize kernel memory and resources. |
| 78 | InitializeSystemResourceLimit(kernel, system.CoreTiming(), memory_layout); | 77 | InitializeSystemResourceLimit(kernel, system.CoreTiming()); |
| 79 | InitializeMemoryLayout(memory_layout); | 78 | InitializeMemoryLayout(); |
| 80 | InitializePageSlab(); | 79 | InitializePageSlab(); |
| 81 | InitializeSchedulers(); | 80 | InitializeSchedulers(); |
| 82 | InitializeSuspendThreads(); | 81 | InitializeSuspendThreads(); |
| @@ -219,8 +218,7 @@ struct KernelCore::Impl { | |||
| 219 | 218 | ||
| 220 | // Creates the default system resource limit | 219 | // Creates the default system resource limit |
| 221 | void InitializeSystemResourceLimit(KernelCore& kernel, | 220 | void InitializeSystemResourceLimit(KernelCore& kernel, |
| 222 | const Core::Timing::CoreTiming& core_timing, | 221 | const Core::Timing::CoreTiming& core_timing) { |
| 223 | const KMemoryLayout& memory_layout) { | ||
| 224 | system_resource_limit = KResourceLimit::Create(system.Kernel()); | 222 | system_resource_limit = KResourceLimit::Create(system.Kernel()); |
| 225 | system_resource_limit->Initialize(&core_timing); | 223 | system_resource_limit->Initialize(&core_timing); |
| 226 | 224 | ||
| @@ -353,7 +351,7 @@ struct KernelCore::Impl { | |||
| 353 | return schedulers[thread_id]->GetCurrentThread(); | 351 | return schedulers[thread_id]->GetCurrentThread(); |
| 354 | } | 352 | } |
| 355 | 353 | ||
| 356 | void DeriveInitialMemoryLayout(KMemoryLayout& memory_layout) { | 354 | void DeriveInitialMemoryLayout() { |
| 357 | // Insert the root region for the virtual memory tree, from which all other regions will | 355 | // Insert the root region for the virtual memory tree, from which all other regions will |
| 358 | // derive. | 356 | // derive. |
| 359 | memory_layout.GetVirtualMemoryRegionTree().InsertDirectly( | 357 | memory_layout.GetVirtualMemoryRegionTree().InsertDirectly( |
| @@ -616,20 +614,16 @@ struct KernelCore::Impl { | |||
| 616 | linear_region_start); | 614 | linear_region_start); |
| 617 | } | 615 | } |
| 618 | 616 | ||
| 619 | void InitializeMemoryLayout(const KMemoryLayout& memory_layout) { | 617 | void InitializeMemoryLayout() { |
| 620 | const auto system_pool = memory_layout.GetKernelSystemPoolRegionPhysicalExtents(); | 618 | const auto system_pool = memory_layout.GetKernelSystemPoolRegionPhysicalExtents(); |
| 621 | const auto applet_pool = memory_layout.GetKernelAppletPoolRegionPhysicalExtents(); | 619 | const auto applet_pool = memory_layout.GetKernelAppletPoolRegionPhysicalExtents(); |
| 622 | const auto application_pool = memory_layout.GetKernelApplicationPoolRegionPhysicalExtents(); | 620 | const auto application_pool = memory_layout.GetKernelApplicationPoolRegionPhysicalExtents(); |
| 623 | 621 | ||
| 624 | // Initialize memory managers | 622 | // Initialize the memory manager. |
| 625 | memory_manager = std::make_unique<KMemoryManager>(system); | 623 | memory_manager = std::make_unique<KMemoryManager>(system); |
| 626 | memory_manager->InitializeManager(KMemoryManager::Pool::Application, | 624 | const auto& management_region = memory_layout.GetPoolManagementRegion(); |
| 627 | application_pool.GetAddress(), | 625 | ASSERT(management_region.GetEndAddress() != 0); |
| 628 | application_pool.GetEndAddress()); | 626 | memory_manager->Initialize(management_region.GetAddress(), management_region.GetSize()); |
| 629 | memory_manager->InitializeManager(KMemoryManager::Pool::Applet, applet_pool.GetAddress(), | ||
| 630 | applet_pool.GetEndAddress()); | ||
| 631 | memory_manager->InitializeManager(KMemoryManager::Pool::System, system_pool.GetAddress(), | ||
| 632 | system_pool.GetEndAddress()); | ||
| 633 | 627 | ||
| 634 | // Setup memory regions for emulated processes | 628 | // Setup memory regions for emulated processes |
| 635 | // TODO(bunnei): These should not be hardcoded regions initialized within the kernel | 629 | // TODO(bunnei): These should not be hardcoded regions initialized within the kernel |
| @@ -770,6 +764,9 @@ struct KernelCore::Impl { | |||
| 770 | Kernel::KSharedMemory* irs_shared_mem{}; | 764 | Kernel::KSharedMemory* irs_shared_mem{}; |
| 771 | Kernel::KSharedMemory* time_shared_mem{}; | 765 | Kernel::KSharedMemory* time_shared_mem{}; |
| 772 | 766 | ||
| 767 | // Memory layout | ||
| 768 | KMemoryLayout memory_layout; | ||
| 769 | |||
| 773 | // Threads used for services | 770 | // Threads used for services |
| 774 | std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads; | 771 | std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads; |
| 775 | Common::ThreadWorker service_threads_manager; | 772 | Common::ThreadWorker service_threads_manager; |
| @@ -1135,6 +1132,10 @@ const KWorkerTaskManager& KernelCore::WorkerTaskManager() const { | |||
| 1135 | return impl->worker_task_manager; | 1132 | return impl->worker_task_manager; |
| 1136 | } | 1133 | } |
| 1137 | 1134 | ||
| 1135 | const KMemoryLayout& KernelCore::MemoryLayout() const { | ||
| 1136 | return impl->memory_layout; | ||
| 1137 | } | ||
| 1138 | |||
| 1138 | bool KernelCore::IsPhantomModeForSingleCore() const { | 1139 | bool KernelCore::IsPhantomModeForSingleCore() const { |
| 1139 | return impl->IsPhantomModeForSingleCore(); | 1140 | return impl->IsPhantomModeForSingleCore(); |
| 1140 | } | 1141 | } |
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 0e04fc3bb..c1254b18d 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h | |||
| @@ -41,6 +41,7 @@ class KClientSession; | |||
| 41 | class KEvent; | 41 | class KEvent; |
| 42 | class KHandleTable; | 42 | class KHandleTable; |
| 43 | class KLinkedListNode; | 43 | class KLinkedListNode; |
| 44 | class KMemoryLayout; | ||
| 44 | class KMemoryManager; | 45 | class KMemoryManager; |
| 45 | class KPort; | 46 | class KPort; |
| 46 | class KProcess; | 47 | class KProcess; |
| @@ -350,6 +351,9 @@ public: | |||
| 350 | /// Gets the current worker task manager, used for dispatching KThread/KProcess tasks. | 351 | /// Gets the current worker task manager, used for dispatching KThread/KProcess tasks. |
| 351 | const KWorkerTaskManager& WorkerTaskManager() const; | 352 | const KWorkerTaskManager& WorkerTaskManager() const; |
| 352 | 353 | ||
| 354 | /// Gets the memory layout. | ||
| 355 | const KMemoryLayout& MemoryLayout() const; | ||
| 356 | |||
| 353 | private: | 357 | private: |
| 354 | friend class KProcess; | 358 | friend class KProcess; |
| 355 | friend class KThread; | 359 | friend class KThread; |