diff options
| author | 2022-10-29 14:53:28 -0700 | |
|---|---|---|
| committer | 2022-11-03 21:17:07 -0700 | |
| commit | ba21ba0c5cd8c3c4e6c0942512c051d1e9b24a5f (patch) | |
| tree | 188de16eca88f3e9c3618f145f4ad7c787ecb722 /src | |
| parent | core: hle: kernel: Integrate system KSystemResource. (diff) | |
| download | yuzu-ba21ba0c5cd8c3c4e6c0942512c051d1e9b24a5f.tar.gz yuzu-ba21ba0c5cd8c3c4e6c0942512c051d1e9b24a5f.tar.xz yuzu-ba21ba0c5cd8c3c4e6c0942512c051d1e9b24a5f.zip | |
core: hle: kernel: k_memory_manager: Refresh.
Diffstat (limited to '')
| -rw-r--r-- | src/core/hle/kernel/k_memory_manager.cpp | 270 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_memory_manager.h | 259 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_table.cpp | 294 | ||||
| -rw-r--r-- | src/core/hle/service/nvdrv/devices/nvmap.cpp | 6 |
4 files changed, 460 insertions, 369 deletions
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 646711505..c4bf306e8 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp | |||
| @@ -29,43 +29,44 @@ constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) { | |||
| 29 | } else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) { | 29 | } else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) { |
| 30 | return KMemoryManager::Pool::SystemNonSecure; | 30 | return KMemoryManager::Pool::SystemNonSecure; |
| 31 | } else { | 31 | } else { |
| 32 | ASSERT_MSG(false, "InvalidMemoryRegionType for conversion to Pool"); | 32 | UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool"); |
| 33 | return {}; | ||
| 34 | } | 33 | } |
| 35 | } | 34 | } |
| 36 | 35 | ||
| 37 | } // namespace | 36 | } // namespace |
| 38 | 37 | ||
| 39 | KMemoryManager::KMemoryManager(Core::System& system_) | 38 | KMemoryManager::KMemoryManager(Core::System& system) |
| 40 | : system{system_}, pool_locks{ | 39 | : m_system{system}, m_memory_layout{system.Kernel().MemoryLayout()}, |
| 41 | KLightLock{system_.Kernel()}, | 40 | m_pool_locks{ |
| 42 | KLightLock{system_.Kernel()}, | 41 | KLightLock{system.Kernel()}, |
| 43 | KLightLock{system_.Kernel()}, | 42 | KLightLock{system.Kernel()}, |
| 44 | KLightLock{system_.Kernel()}, | 43 | KLightLock{system.Kernel()}, |
| 45 | } {} | 44 | KLightLock{system.Kernel()}, |
| 45 | } {} | ||
| 46 | 46 | ||
| 47 | void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) { | 47 | void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) { |
| 48 | 48 | ||
| 49 | // Clear the management region to zero. | 49 | // Clear the management region to zero. |
| 50 | const VAddr management_region_end = management_region + management_region_size; | 50 | const VAddr management_region_end = management_region + management_region_size; |
| 51 | // std::memset(GetVoidPointer(management_region), 0, management_region_size); | ||
| 51 | 52 | ||
| 52 | // Reset our manager count. | 53 | // Reset our manager count. |
| 53 | num_managers = 0; | 54 | m_num_managers = 0; |
| 54 | 55 | ||
| 55 | // Traverse the virtual memory layout tree, initializing each manager as appropriate. | 56 | // Traverse the virtual memory layout tree, initializing each manager as appropriate. |
| 56 | while (num_managers != MaxManagerCount) { | 57 | while (m_num_managers != MaxManagerCount) { |
| 57 | // Locate the region that should initialize the current manager. | 58 | // Locate the region that should initialize the current manager. |
| 58 | PAddr region_address = 0; | 59 | PAddr region_address = 0; |
| 59 | size_t region_size = 0; | 60 | size_t region_size = 0; |
| 60 | Pool region_pool = Pool::Count; | 61 | Pool region_pool = Pool::Count; |
| 61 | for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { | 62 | for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { |
| 62 | // We only care about regions that we need to create managers for. | 63 | // We only care about regions that we need to create managers for. |
| 63 | if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { | 64 | if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { |
| 64 | continue; | 65 | continue; |
| 65 | } | 66 | } |
| 66 | 67 | ||
| 67 | // We want to initialize the managers in order. | 68 | // We want to initialize the managers in order. |
| 68 | if (it.GetAttributes() != num_managers) { | 69 | if (it.GetAttributes() != m_num_managers) { |
| 69 | continue; | 70 | continue; |
| 70 | } | 71 | } |
| 71 | 72 | ||
| @@ -97,8 +98,8 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | |||
| 97 | } | 98 | } |
| 98 | 99 | ||
| 99 | // Initialize a new manager for the region. | 100 | // Initialize a new manager for the region. |
| 100 | Impl* manager = std::addressof(managers[num_managers++]); | 101 | Impl* manager = std::addressof(m_managers[m_num_managers++]); |
| 101 | ASSERT(num_managers <= managers.size()); | 102 | ASSERT(m_num_managers <= m_managers.size()); |
| 102 | 103 | ||
| 103 | const size_t cur_size = manager->Initialize(region_address, region_size, management_region, | 104 | const size_t cur_size = manager->Initialize(region_address, region_size, management_region, |
| 104 | management_region_end, region_pool); | 105 | management_region_end, region_pool); |
| @@ -107,13 +108,13 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | |||
| 107 | 108 | ||
| 108 | // Insert the manager into the pool list. | 109 | // Insert the manager into the pool list. |
| 109 | const auto region_pool_index = static_cast<u32>(region_pool); | 110 | const auto region_pool_index = static_cast<u32>(region_pool); |
| 110 | if (pool_managers_tail[region_pool_index] == nullptr) { | 111 | if (m_pool_managers_tail[region_pool_index] == nullptr) { |
| 111 | pool_managers_head[region_pool_index] = manager; | 112 | m_pool_managers_head[region_pool_index] = manager; |
| 112 | } else { | 113 | } else { |
| 113 | pool_managers_tail[region_pool_index]->SetNext(manager); | 114 | m_pool_managers_tail[region_pool_index]->SetNext(manager); |
| 114 | manager->SetPrev(pool_managers_tail[region_pool_index]); | 115 | manager->SetPrev(m_pool_managers_tail[region_pool_index]); |
| 115 | } | 116 | } |
| 116 | pool_managers_tail[region_pool_index] = manager; | 117 | m_pool_managers_tail[region_pool_index] = manager; |
| 117 | } | 118 | } |
| 118 | 119 | ||
| 119 | // Free each region to its corresponding heap. | 120 | // Free each region to its corresponding heap. |
| @@ -121,11 +122,10 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | |||
| 121 | const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress(); | 122 | const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress(); |
| 122 | const PAddr ini_end = ini_start + InitialProcessBinarySizeMax; | 123 | const PAddr ini_end = ini_start + InitialProcessBinarySizeMax; |
| 123 | const PAddr ini_last = ini_end - 1; | 124 | const PAddr ini_last = ini_end - 1; |
| 124 | for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { | 125 | for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { |
| 125 | if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { | 126 | if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { |
| 126 | // Get the manager for the region. | 127 | // Get the manager for the region. |
| 127 | auto index = it.GetAttributes(); | 128 | auto& manager = m_managers[it.GetAttributes()]; |
| 128 | auto& manager = managers[index]; | ||
| 129 | 129 | ||
| 130 | const PAddr cur_start = it.GetAddress(); | 130 | const PAddr cur_start = it.GetAddress(); |
| 131 | const PAddr cur_last = it.GetLastAddress(); | 131 | const PAddr cur_last = it.GetLastAddress(); |
| @@ -162,11 +162,19 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | |||
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | // Update the used size for all managers. | 164 | // Update the used size for all managers. |
| 165 | for (size_t i = 0; i < num_managers; ++i) { | 165 | for (size_t i = 0; i < m_num_managers; ++i) { |
| 166 | managers[i].SetInitialUsedHeapSize(reserved_sizes[i]); | 166 | m_managers[i].SetInitialUsedHeapSize(reserved_sizes[i]); |
| 167 | } | 167 | } |
| 168 | } | 168 | } |
| 169 | 169 | ||
| 170 | Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) { | ||
| 171 | UNREACHABLE(); | ||
| 172 | } | ||
| 173 | |||
| 174 | void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) { | ||
| 175 | UNREACHABLE(); | ||
| 176 | } | ||
| 177 | |||
| 170 | PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) { | 178 | PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) { |
| 171 | // Early return if we're allocating no pages. | 179 | // Early return if we're allocating no pages. |
| 172 | if (num_pages == 0) { | 180 | if (num_pages == 0) { |
| @@ -175,7 +183,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | |||
| 175 | 183 | ||
| 176 | // Lock the pool that we're allocating from. | 184 | // Lock the pool that we're allocating from. |
| 177 | const auto [pool, dir] = DecodeOption(option); | 185 | const auto [pool, dir] = DecodeOption(option); |
| 178 | KScopedLightLock lk(pool_locks[static_cast<std::size_t>(pool)]); | 186 | KScopedLightLock lk(m_pool_locks[static_cast<std::size_t>(pool)]); |
| 179 | 187 | ||
| 180 | // Choose a heap based on our page size request. | 188 | // Choose a heap based on our page size request. |
| 181 | const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages); | 189 | const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages); |
| @@ -185,7 +193,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | |||
| 185 | PAddr allocated_block = 0; | 193 | PAddr allocated_block = 0; |
| 186 | for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; | 194 | for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; |
| 187 | chosen_manager = this->GetNextManager(chosen_manager, dir)) { | 195 | chosen_manager = this->GetNextManager(chosen_manager, dir)) { |
| 188 | allocated_block = chosen_manager->AllocateBlock(heap_index, true); | 196 | allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages); |
| 189 | if (allocated_block != 0) { | 197 | if (allocated_block != 0) { |
| 190 | break; | 198 | break; |
| 191 | } | 199 | } |
| @@ -196,10 +204,9 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | |||
| 196 | return 0; | 204 | return 0; |
| 197 | } | 205 | } |
| 198 | 206 | ||
| 199 | // If we allocated more than we need, free some. | 207 | // Maintain the optimized memory bitmap, if we should. |
| 200 | const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index); | 208 | if (m_has_optimized_process[static_cast<size_t>(pool)]) { |
| 201 | if (allocated_pages > num_pages) { | 209 | UNIMPLEMENTED(); |
| 202 | chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); | ||
| 203 | } | 210 | } |
| 204 | 211 | ||
| 205 | // Open the first reference to the pages. | 212 | // Open the first reference to the pages. |
| @@ -209,20 +216,21 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | |||
| 209 | } | 216 | } |
| 210 | 217 | ||
| 211 | Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, | 218 | Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, |
| 212 | Direction dir, bool random) { | 219 | Direction dir, bool unoptimized, bool random) { |
| 213 | // Choose a heap based on our page size request. | 220 | // Choose a heap based on our page size request. |
| 214 | const s32 heap_index = KPageHeap::GetBlockIndex(num_pages); | 221 | const s32 heap_index = KPageHeap::GetBlockIndex(num_pages); |
| 215 | R_UNLESS(0 <= heap_index, ResultOutOfMemory); | 222 | R_UNLESS(0 <= heap_index, ResultOutOfMemory); |
| 216 | 223 | ||
| 217 | // Ensure that we don't leave anything un-freed. | 224 | // Ensure that we don't leave anything un-freed. |
| 218 | auto group_guard = SCOPE_GUARD({ | 225 | ON_RESULT_FAILURE { |
| 219 | for (const auto& it : out->Nodes()) { | 226 | for (const auto& it : out->Nodes()) { |
| 220 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), it.GetAddress()); | 227 | auto& manager = this->GetManager(it.GetAddress()); |
| 221 | const size_t num_pages_to_free = | 228 | const size_t node_num_pages = |
| 222 | std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); | 229 | std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); |
| 223 | manager.Free(it.GetAddress(), num_pages_to_free); | 230 | manager.Free(it.GetAddress(), node_num_pages); |
| 224 | } | 231 | } |
| 225 | }); | 232 | out->Finalize(); |
| 233 | }; | ||
| 226 | 234 | ||
| 227 | // Keep allocating until we've allocated all our pages. | 235 | // Keep allocating until we've allocated all our pages. |
| 228 | for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) { | 236 | for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) { |
| @@ -236,12 +244,17 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, | |||
| 236 | break; | 244 | break; |
| 237 | } | 245 | } |
| 238 | 246 | ||
| 239 | // Safely add it to our group. | 247 | // Ensure we don't leak the block if we fail. |
| 240 | { | 248 | ON_RESULT_FAILURE_2 { |
| 241 | auto block_guard = | 249 | cur_manager->Free(allocated_block, pages_per_alloc); |
| 242 | SCOPE_GUARD({ cur_manager->Free(allocated_block, pages_per_alloc); }); | 250 | }; |
| 243 | R_TRY(out->AddBlock(allocated_block, pages_per_alloc)); | 251 | |
| 244 | block_guard.Cancel(); | 252 | // Add the block to our group. |
| 253 | R_TRY(out->AddBlock(allocated_block, pages_per_alloc)); | ||
| 254 | |||
| 255 | // Maintain the optimized memory bitmap, if we should. | ||
| 256 | if (unoptimized) { | ||
| 257 | UNIMPLEMENTED(); | ||
| 245 | } | 258 | } |
| 246 | 259 | ||
| 247 | num_pages -= pages_per_alloc; | 260 | num_pages -= pages_per_alloc; |
| @@ -253,8 +266,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, | |||
| 253 | R_UNLESS(num_pages == 0, ResultOutOfMemory); | 266 | R_UNLESS(num_pages == 0, ResultOutOfMemory); |
| 254 | 267 | ||
| 255 | // We succeeded! | 268 | // We succeeded! |
| 256 | group_guard.Cancel(); | 269 | R_SUCCEED(); |
| 257 | return ResultSuccess; | ||
| 258 | } | 270 | } |
| 259 | 271 | ||
| 260 | Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) { | 272 | Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) { |
| @@ -266,10 +278,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op | |||
| 266 | 278 | ||
| 267 | // Lock the pool that we're allocating from. | 279 | // Lock the pool that we're allocating from. |
| 268 | const auto [pool, dir] = DecodeOption(option); | 280 | const auto [pool, dir] = DecodeOption(option); |
| 269 | KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]); | 281 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); |
| 270 | 282 | ||
| 271 | // Allocate the page group. | 283 | // Allocate the page group. |
| 272 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false)); | 284 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, |
| 285 | m_has_optimized_process[static_cast<size_t>(pool)], true)); | ||
| 273 | 286 | ||
| 274 | // Open the first reference to the pages. | 287 | // Open the first reference to the pages. |
| 275 | for (const auto& block : out->Nodes()) { | 288 | for (const auto& block : out->Nodes()) { |
| @@ -277,7 +290,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op | |||
| 277 | size_t remaining_pages = block.GetNumPages(); | 290 | size_t remaining_pages = block.GetNumPages(); |
| 278 | while (remaining_pages > 0) { | 291 | while (remaining_pages > 0) { |
| 279 | // Get the manager for the current address. | 292 | // Get the manager for the current address. |
| 280 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address); | 293 | auto& manager = this->GetManager(cur_address); |
| 281 | 294 | ||
| 282 | // Process part or all of the block. | 295 | // Process part or all of the block. |
| 283 | const size_t cur_pages = | 296 | const size_t cur_pages = |
| @@ -290,11 +303,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op | |||
| 290 | } | 303 | } |
| 291 | } | 304 | } |
| 292 | 305 | ||
| 293 | return ResultSuccess; | 306 | R_SUCCEED(); |
| 294 | } | 307 | } |
| 295 | 308 | ||
| 296 | Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, | 309 | Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, |
| 297 | u64 process_id, u8 fill_pattern) { | 310 | u64 process_id, u8 fill_pattern) { |
| 298 | ASSERT(out != nullptr); | 311 | ASSERT(out != nullptr); |
| 299 | ASSERT(out->GetNumPages() == 0); | 312 | ASSERT(out->GetNumPages() == 0); |
| 300 | 313 | ||
| @@ -302,83 +315,89 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag | |||
| 302 | const auto [pool, dir] = DecodeOption(option); | 315 | const auto [pool, dir] = DecodeOption(option); |
| 303 | 316 | ||
| 304 | // Allocate the memory. | 317 | // Allocate the memory. |
| 318 | bool optimized; | ||
| 305 | { | 319 | { |
| 306 | // Lock the pool that we're allocating from. | 320 | // Lock the pool that we're allocating from. |
| 307 | KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]); | 321 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); |
| 322 | |||
| 323 | // Check if we have an optimized process. | ||
| 324 | const bool has_optimized = m_has_optimized_process[static_cast<size_t>(pool)]; | ||
| 325 | const bool is_optimized = m_optimized_process_ids[static_cast<size_t>(pool)] == process_id; | ||
| 308 | 326 | ||
| 309 | // Allocate the page group. | 327 | // Allocate the page group. |
| 310 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false)); | 328 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, |
| 329 | false)); | ||
| 311 | 330 | ||
| 312 | // Open the first reference to the pages. | 331 | // Set whether we should optimize. |
| 313 | for (const auto& block : out->Nodes()) { | 332 | optimized = has_optimized && is_optimized; |
| 314 | PAddr cur_address = block.GetAddress(); | ||
| 315 | size_t remaining_pages = block.GetNumPages(); | ||
| 316 | while (remaining_pages > 0) { | ||
| 317 | // Get the manager for the current address. | ||
| 318 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address); | ||
| 319 | |||
| 320 | // Process part or all of the block. | ||
| 321 | const size_t cur_pages = | ||
| 322 | std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); | ||
| 323 | manager.OpenFirst(cur_address, cur_pages); | ||
| 324 | |||
| 325 | // Advance. | ||
| 326 | cur_address += cur_pages * PageSize; | ||
| 327 | remaining_pages -= cur_pages; | ||
| 328 | } | ||
| 329 | } | ||
| 330 | } | 333 | } |
| 331 | 334 | ||
| 332 | // Set all the allocated memory. | 335 | // Perform optimized memory tracking, if we should. |
| 333 | for (const auto& block : out->Nodes()) { | 336 | if (optimized) { |
| 334 | std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, | 337 | // Iterate over the allocated blocks. |
| 335 | block.GetSize()); | 338 | for (const auto& block : out->Nodes()) { |
| 336 | } | 339 | // Get the block extents. |
| 340 | const PAddr block_address = block.GetAddress(); | ||
| 341 | const size_t block_pages = block.GetNumPages(); | ||
| 337 | 342 | ||
| 338 | return ResultSuccess; | 343 | // If it has no pages, we don't need to do anything. |
| 339 | } | 344 | if (block_pages == 0) { |
| 345 | continue; | ||
| 346 | } | ||
| 340 | 347 | ||
| 341 | void KMemoryManager::Open(PAddr address, size_t num_pages) { | 348 | // Fill all the pages that we need to fill. |
| 342 | // Repeatedly open references until we've done so for all pages. | 349 | bool any_new = false; |
| 343 | while (num_pages) { | 350 | { |
| 344 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address); | 351 | PAddr cur_address = block_address; |
| 345 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | 352 | size_t remaining_pages = block_pages; |
| 353 | while (remaining_pages > 0) { | ||
| 354 | // Get the manager for the current address. | ||
| 355 | auto& manager = this->GetManager(cur_address); | ||
| 356 | |||
| 357 | // Process part or all of the block. | ||
| 358 | const size_t cur_pages = | ||
| 359 | std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); | ||
| 360 | any_new = | ||
| 361 | manager.ProcessOptimizedAllocation(cur_address, cur_pages, fill_pattern); | ||
| 362 | |||
| 363 | // Advance. | ||
| 364 | cur_address += cur_pages * PageSize; | ||
| 365 | remaining_pages -= cur_pages; | ||
| 366 | } | ||
| 367 | } | ||
| 346 | 368 | ||
| 347 | { | 369 | // If there are new pages, update tracking for the allocation. |
| 348 | KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]); | 370 | if (any_new) { |
| 349 | manager.Open(address, cur_pages); | 371 | // Update tracking for the allocation. |
| 372 | PAddr cur_address = block_address; | ||
| 373 | size_t remaining_pages = block_pages; | ||
| 374 | while (remaining_pages > 0) { | ||
| 375 | // Get the manager for the current address. | ||
| 376 | auto& manager = this->GetManager(cur_address); | ||
| 377 | |||
| 378 | // Lock the pool for the manager. | ||
| 379 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 380 | |||
| 381 | // Track some or all of the current pages. | ||
| 382 | const size_t cur_pages = | ||
| 383 | std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); | ||
| 384 | manager.TrackOptimizedAllocation(cur_address, cur_pages); | ||
| 385 | |||
| 386 | // Advance. | ||
| 387 | cur_address += cur_pages * PageSize; | ||
| 388 | remaining_pages -= cur_pages; | ||
| 389 | } | ||
| 390 | } | ||
| 350 | } | 391 | } |
| 351 | 392 | } else { | |
| 352 | num_pages -= cur_pages; | 393 | // Set all the allocated memory. |
| 353 | address += cur_pages * PageSize; | 394 | for (const auto& block : out->Nodes()) { |
| 354 | } | 395 | std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, |
| 355 | } | 396 | block.GetSize()); |
| 356 | |||
| 357 | void KMemoryManager::Close(PAddr address, size_t num_pages) { | ||
| 358 | // Repeatedly close references until we've done so for all pages. | ||
| 359 | while (num_pages) { | ||
| 360 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address); | ||
| 361 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||
| 362 | |||
| 363 | { | ||
| 364 | KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 365 | manager.Close(address, cur_pages); | ||
| 366 | } | 397 | } |
| 367 | |||
| 368 | num_pages -= cur_pages; | ||
| 369 | address += cur_pages * PageSize; | ||
| 370 | } | 398 | } |
| 371 | } | ||
| 372 | 399 | ||
| 373 | void KMemoryManager::Close(const KPageGroup& pg) { | 400 | R_SUCCEED(); |
| 374 | for (const auto& node : pg.Nodes()) { | ||
| 375 | Close(node.GetAddress(), node.GetNumPages()); | ||
| 376 | } | ||
| 377 | } | ||
| 378 | void KMemoryManager::Open(const KPageGroup& pg) { | ||
| 379 | for (const auto& node : pg.Nodes()) { | ||
| 380 | Open(node.GetAddress(), node.GetNumPages()); | ||
| 381 | } | ||
| 382 | } | 401 | } |
| 383 | 402 | ||
| 384 | size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management, | 403 | size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management, |
| @@ -394,18 +413,31 @@ size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr manage | |||
| 394 | ASSERT(Common::IsAligned(total_management_size, PageSize)); | 413 | ASSERT(Common::IsAligned(total_management_size, PageSize)); |
| 395 | 414 | ||
| 396 | // Setup region. | 415 | // Setup region. |
| 397 | pool = p; | 416 | m_pool = p; |
| 398 | management_region = management; | 417 | m_management_region = management; |
| 399 | page_reference_counts.resize( | 418 | m_page_reference_counts.resize( |
| 400 | Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize); | 419 | Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize); |
| 401 | ASSERT(Common::IsAligned(management_region, PageSize)); | 420 | ASSERT(Common::IsAligned(m_management_region, PageSize)); |
| 402 | 421 | ||
| 403 | // Initialize the manager's KPageHeap. | 422 | // Initialize the manager's KPageHeap. |
| 404 | heap.Initialize(address, size, management + manager_size, page_heap_size); | 423 | m_heap.Initialize(address, size, management + manager_size, page_heap_size); |
| 405 | 424 | ||
| 406 | return total_management_size; | 425 | return total_management_size; |
| 407 | } | 426 | } |
| 408 | 427 | ||
| 428 | void KMemoryManager::Impl::TrackUnoptimizedAllocation(PAddr block, size_t num_pages) { | ||
| 429 | UNREACHABLE(); | ||
| 430 | } | ||
| 431 | |||
| 432 | void KMemoryManager::Impl::TrackOptimizedAllocation(PAddr block, size_t num_pages) { | ||
| 433 | UNREACHABLE(); | ||
| 434 | } | ||
| 435 | |||
| 436 | bool KMemoryManager::Impl::ProcessOptimizedAllocation(PAddr block, size_t num_pages, | ||
| 437 | u8 fill_pattern) { | ||
| 438 | UNREACHABLE(); | ||
| 439 | } | ||
| 440 | |||
| 409 | size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) { | 441 | size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) { |
| 410 | const size_t ref_count_size = (region_size / PageSize) * sizeof(u16); | 442 | const size_t ref_count_size = (region_size / PageSize) * sizeof(u16); |
| 411 | const size_t optimize_map_size = | 443 | const size_t optimize_map_size = |
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h index dcb9b6348..401d4e644 100644 --- a/src/core/hle/kernel/k_memory_manager.h +++ b/src/core/hle/kernel/k_memory_manager.h | |||
| @@ -21,11 +21,8 @@ namespace Kernel { | |||
| 21 | 21 | ||
| 22 | class KPageGroup; | 22 | class KPageGroup; |
| 23 | 23 | ||
| 24 | class KMemoryManager final { | 24 | class KMemoryManager { |
| 25 | public: | 25 | public: |
| 26 | YUZU_NON_COPYABLE(KMemoryManager); | ||
| 27 | YUZU_NON_MOVEABLE(KMemoryManager); | ||
| 28 | |||
| 29 | enum class Pool : u32 { | 26 | enum class Pool : u32 { |
| 30 | Application = 0, | 27 | Application = 0, |
| 31 | Applet = 1, | 28 | Applet = 1, |
| @@ -45,16 +42,85 @@ public: | |||
| 45 | enum class Direction : u32 { | 42 | enum class Direction : u32 { |
| 46 | FromFront = 0, | 43 | FromFront = 0, |
| 47 | FromBack = 1, | 44 | FromBack = 1, |
| 48 | |||
| 49 | Shift = 0, | 45 | Shift = 0, |
| 50 | Mask = (0xF << Shift), | 46 | Mask = (0xF << Shift), |
| 51 | }; | 47 | }; |
| 52 | 48 | ||
| 53 | explicit KMemoryManager(Core::System& system_); | 49 | static constexpr size_t MaxManagerCount = 10; |
| 50 | |||
| 51 | explicit KMemoryManager(Core::System& system); | ||
| 54 | 52 | ||
| 55 | void Initialize(VAddr management_region, size_t management_region_size); | 53 | void Initialize(VAddr management_region, size_t management_region_size); |
| 56 | 54 | ||
| 57 | constexpr size_t GetSize(Pool pool) const { | 55 | Result InitializeOptimizedMemory(u64 process_id, Pool pool); |
| 56 | void FinalizeOptimizedMemory(u64 process_id, Pool pool); | ||
| 57 | |||
| 58 | PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); | ||
| 59 | Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option); | ||
| 60 | Result AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id, | ||
| 61 | u8 fill_pattern); | ||
| 62 | |||
| 63 | Pool GetPool(PAddr address) const { | ||
| 64 | return this->GetManager(address).GetPool(); | ||
| 65 | } | ||
| 66 | |||
| 67 | void Open(PAddr address, size_t num_pages) { | ||
| 68 | // Repeatedly open references until we've done so for all pages. | ||
| 69 | while (num_pages) { | ||
| 70 | auto& manager = this->GetManager(address); | ||
| 71 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||
| 72 | |||
| 73 | { | ||
| 74 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 75 | manager.Open(address, cur_pages); | ||
| 76 | } | ||
| 77 | |||
| 78 | num_pages -= cur_pages; | ||
| 79 | address += cur_pages * PageSize; | ||
| 80 | } | ||
| 81 | } | ||
| 82 | |||
| 83 | void OpenFirst(PAddr address, size_t num_pages) { | ||
| 84 | // Repeatedly open references until we've done so for all pages. | ||
| 85 | while (num_pages) { | ||
| 86 | auto& manager = this->GetManager(address); | ||
| 87 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||
| 88 | |||
| 89 | { | ||
| 90 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 91 | manager.OpenFirst(address, cur_pages); | ||
| 92 | } | ||
| 93 | |||
| 94 | num_pages -= cur_pages; | ||
| 95 | address += cur_pages * PageSize; | ||
| 96 | } | ||
| 97 | } | ||
| 98 | |||
| 99 | void Close(PAddr address, size_t num_pages) { | ||
| 100 | // Repeatedly close references until we've done so for all pages. | ||
| 101 | while (num_pages) { | ||
| 102 | auto& manager = this->GetManager(address); | ||
| 103 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||
| 104 | |||
| 105 | { | ||
| 106 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 107 | manager.Close(address, cur_pages); | ||
| 108 | } | ||
| 109 | |||
| 110 | num_pages -= cur_pages; | ||
| 111 | address += cur_pages * PageSize; | ||
| 112 | } | ||
| 113 | } | ||
| 114 | |||
| 115 | size_t GetSize() { | ||
| 116 | size_t total = 0; | ||
| 117 | for (size_t i = 0; i < m_num_managers; i++) { | ||
| 118 | total += m_managers[i].GetSize(); | ||
| 119 | } | ||
| 120 | return total; | ||
| 121 | } | ||
| 122 | |||
| 123 | size_t GetSize(Pool pool) { | ||
| 58 | constexpr Direction GetSizeDirection = Direction::FromFront; | 124 | constexpr Direction GetSizeDirection = Direction::FromFront; |
| 59 | size_t total = 0; | 125 | size_t total = 0; |
| 60 | for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; | 126 | for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; |
| @@ -64,18 +130,36 @@ public: | |||
| 64 | return total; | 130 | return total; |
| 65 | } | 131 | } |
| 66 | 132 | ||
| 67 | PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); | 133 | size_t GetFreeSize() { |
| 68 | Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option); | 134 | size_t total = 0; |
| 69 | Result AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id, | 135 | for (size_t i = 0; i < m_num_managers; i++) { |
| 70 | u8 fill_pattern); | 136 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(m_managers[i].GetPool())]); |
| 137 | total += m_managers[i].GetFreeSize(); | ||
| 138 | } | ||
| 139 | return total; | ||
| 140 | } | ||
| 71 | 141 | ||
| 72 | static constexpr size_t MaxManagerCount = 10; | 142 | size_t GetFreeSize(Pool pool) { |
| 143 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); | ||
| 144 | |||
| 145 | constexpr Direction GetSizeDirection = Direction::FromFront; | ||
| 146 | size_t total = 0; | ||
| 147 | for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; | ||
| 148 | manager = this->GetNextManager(manager, GetSizeDirection)) { | ||
| 149 | total += manager->GetFreeSize(); | ||
| 150 | } | ||
| 151 | return total; | ||
| 152 | } | ||
| 73 | 153 | ||
| 74 | void Close(PAddr address, size_t num_pages); | 154 | void DumpFreeList(Pool pool) { |
| 75 | void Close(const KPageGroup& pg); | 155 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); |
| 76 | 156 | ||
| 77 | void Open(PAddr address, size_t num_pages); | 157 | constexpr Direction DumpDirection = Direction::FromFront; |
| 78 | void Open(const KPageGroup& pg); | 158 | for (auto* manager = this->GetFirstManager(pool, DumpDirection); manager != nullptr; |
| 159 | manager = this->GetNextManager(manager, DumpDirection)) { | ||
| 160 | manager->DumpFreeList(); | ||
| 161 | } | ||
| 162 | } | ||
| 79 | 163 | ||
| 80 | public: | 164 | public: |
| 81 | static size_t CalculateManagementOverheadSize(size_t region_size) { | 165 | static size_t CalculateManagementOverheadSize(size_t region_size) { |
| @@ -88,14 +172,13 @@ public: | |||
| 88 | } | 172 | } |
| 89 | 173 | ||
| 90 | static constexpr Pool GetPool(u32 option) { | 174 | static constexpr Pool GetPool(u32 option) { |
| 91 | return static_cast<Pool>((static_cast<u32>(option) & static_cast<u32>(Pool::Mask)) >> | 175 | return static_cast<Pool>((option & static_cast<u32>(Pool::Mask)) >> |
| 92 | static_cast<u32>(Pool::Shift)); | 176 | static_cast<u32>(Pool::Shift)); |
| 93 | } | 177 | } |
| 94 | 178 | ||
| 95 | static constexpr Direction GetDirection(u32 option) { | 179 | static constexpr Direction GetDirection(u32 option) { |
| 96 | return static_cast<Direction>( | 180 | return static_cast<Direction>((option & static_cast<u32>(Direction::Mask)) >> |
| 97 | (static_cast<u32>(option) & static_cast<u32>(Direction::Mask)) >> | 181 | static_cast<u32>(Direction::Shift)); |
| 98 | static_cast<u32>(Direction::Shift)); | ||
| 99 | } | 182 | } |
| 100 | 183 | ||
| 101 | static constexpr std::tuple<Pool, Direction> DecodeOption(u32 option) { | 184 | static constexpr std::tuple<Pool, Direction> DecodeOption(u32 option) { |
| @@ -103,74 +186,88 @@ public: | |||
| 103 | } | 186 | } |
| 104 | 187 | ||
| 105 | private: | 188 | private: |
| 106 | class Impl final { | 189 | class Impl { |
| 107 | public: | 190 | public: |
| 108 | YUZU_NON_COPYABLE(Impl); | 191 | static size_t CalculateManagementOverheadSize(size_t region_size); |
| 109 | YUZU_NON_MOVEABLE(Impl); | 192 | |
| 193 | static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) { | ||
| 194 | return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / | ||
| 195 | Common::BitSize<u64>()) * | ||
| 196 | sizeof(u64); | ||
| 197 | } | ||
| 110 | 198 | ||
| 199 | public: | ||
| 111 | Impl() = default; | 200 | Impl() = default; |
| 112 | ~Impl() = default; | ||
| 113 | 201 | ||
| 114 | size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end, | 202 | size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end, |
| 115 | Pool p); | 203 | Pool p); |
| 116 | 204 | ||
| 117 | VAddr AllocateBlock(s32 index, bool random) { | 205 | PAddr AllocateBlock(s32 index, bool random) { |
| 118 | return heap.AllocateBlock(index, random); | 206 | return m_heap.AllocateBlock(index, random); |
| 119 | } | 207 | } |
| 120 | 208 | PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { | |
| 121 | void Free(VAddr addr, size_t num_pages) { | 209 | return m_heap.AllocateAligned(index, num_pages, align_pages); |
| 122 | heap.Free(addr, num_pages); | 210 | } |
| 211 | void Free(PAddr addr, size_t num_pages) { | ||
| 212 | m_heap.Free(addr, num_pages); | ||
| 123 | } | 213 | } |
| 124 | 214 | ||
| 125 | void SetInitialUsedHeapSize(size_t reserved_size) { | 215 | void SetInitialUsedHeapSize(size_t reserved_size) { |
| 126 | heap.SetInitialUsedSize(reserved_size); | 216 | m_heap.SetInitialUsedSize(reserved_size); |
| 127 | } | 217 | } |
| 128 | 218 | ||
| 129 | constexpr Pool GetPool() const { | 219 | void InitializeOptimizedMemory() { |
| 130 | return pool; | 220 | UNIMPLEMENTED(); |
| 131 | } | 221 | } |
| 132 | 222 | ||
| 223 | void TrackUnoptimizedAllocation(PAddr block, size_t num_pages); | ||
| 224 | void TrackOptimizedAllocation(PAddr block, size_t num_pages); | ||
| 225 | |||
| 226 | bool ProcessOptimizedAllocation(PAddr block, size_t num_pages, u8 fill_pattern); | ||
| 227 | |||
| 228 | constexpr Pool GetPool() const { | ||
| 229 | return m_pool; | ||
| 230 | } | ||
| 133 | constexpr size_t GetSize() const { | 231 | constexpr size_t GetSize() const { |
| 134 | return heap.GetSize(); | 232 | return m_heap.GetSize(); |
| 233 | } | ||
| 234 | constexpr PAddr GetEndAddress() const { | ||
| 235 | return m_heap.GetEndAddress(); | ||
| 135 | } | 236 | } |
| 136 | 237 | ||
| 137 | constexpr VAddr GetAddress() const { | 238 | size_t GetFreeSize() const { |
| 138 | return heap.GetAddress(); | 239 | return m_heap.GetFreeSize(); |
| 139 | } | 240 | } |
| 140 | 241 | ||
| 141 | constexpr VAddr GetEndAddress() const { | 242 | void DumpFreeList() const { |
| 142 | return heap.GetEndAddress(); | 243 | UNIMPLEMENTED(); |
| 143 | } | 244 | } |
| 144 | 245 | ||
| 145 | constexpr size_t GetPageOffset(PAddr address) const { | 246 | constexpr size_t GetPageOffset(PAddr address) const { |
| 146 | return heap.GetPageOffset(address); | 247 | return m_heap.GetPageOffset(address); |
| 147 | } | 248 | } |
| 148 | |||
| 149 | constexpr size_t GetPageOffsetToEnd(PAddr address) const { | 249 | constexpr size_t GetPageOffsetToEnd(PAddr address) const { |
| 150 | return heap.GetPageOffsetToEnd(address); | 250 | return m_heap.GetPageOffsetToEnd(address); |
| 151 | } | 251 | } |
| 152 | 252 | ||
| 153 | constexpr void SetNext(Impl* n) { | 253 | constexpr void SetNext(Impl* n) { |
| 154 | next = n; | 254 | m_next = n; |
| 155 | } | 255 | } |
| 156 | |||
| 157 | constexpr void SetPrev(Impl* n) { | 256 | constexpr void SetPrev(Impl* n) { |
| 158 | prev = n; | 257 | m_prev = n; |
| 159 | } | 258 | } |
| 160 | |||
| 161 | constexpr Impl* GetNext() const { | 259 | constexpr Impl* GetNext() const { |
| 162 | return next; | 260 | return m_next; |
| 163 | } | 261 | } |
| 164 | |||
| 165 | constexpr Impl* GetPrev() const { | 262 | constexpr Impl* GetPrev() const { |
| 166 | return prev; | 263 | return m_prev; |
| 167 | } | 264 | } |
| 168 | 265 | ||
| 169 | void OpenFirst(PAddr address, size_t num_pages) { | 266 | void OpenFirst(PAddr address, size_t num_pages) { |
| 170 | size_t index = this->GetPageOffset(address); | 267 | size_t index = this->GetPageOffset(address); |
| 171 | const size_t end = index + num_pages; | 268 | const size_t end = index + num_pages; |
| 172 | while (index < end) { | 269 | while (index < end) { |
| 173 | const RefCount ref_count = (++page_reference_counts[index]); | 270 | const RefCount ref_count = (++m_page_reference_counts[index]); |
| 174 | ASSERT(ref_count == 1); | 271 | ASSERT(ref_count == 1); |
| 175 | 272 | ||
| 176 | index++; | 273 | index++; |
| @@ -181,7 +278,7 @@ private: | |||
| 181 | size_t index = this->GetPageOffset(address); | 278 | size_t index = this->GetPageOffset(address); |
| 182 | const size_t end = index + num_pages; | 279 | const size_t end = index + num_pages; |
| 183 | while (index < end) { | 280 | while (index < end) { |
| 184 | const RefCount ref_count = (++page_reference_counts[index]); | 281 | const RefCount ref_count = (++m_page_reference_counts[index]); |
| 185 | ASSERT(ref_count > 1); | 282 | ASSERT(ref_count > 1); |
| 186 | 283 | ||
| 187 | index++; | 284 | index++; |
| @@ -195,8 +292,8 @@ private: | |||
| 195 | size_t free_start = 0; | 292 | size_t free_start = 0; |
| 196 | size_t free_count = 0; | 293 | size_t free_count = 0; |
| 197 | while (index < end) { | 294 | while (index < end) { |
| 198 | ASSERT(page_reference_counts[index] > 0); | 295 | ASSERT(m_page_reference_counts[index] > 0); |
| 199 | const RefCount ref_count = (--page_reference_counts[index]); | 296 | const RefCount ref_count = (--m_page_reference_counts[index]); |
| 200 | 297 | ||
| 201 | // Keep track of how many zero refcounts we see in a row, to minimize calls to free. | 298 | // Keep track of how many zero refcounts we see in a row, to minimize calls to free. |
| 202 | if (ref_count == 0) { | 299 | if (ref_count == 0) { |
| @@ -208,7 +305,7 @@ private: | |||
| 208 | } | 305 | } |
| 209 | } else { | 306 | } else { |
| 210 | if (free_count > 0) { | 307 | if (free_count > 0) { |
| 211 | this->Free(heap.GetAddress() + free_start * PageSize, free_count); | 308 | this->Free(m_heap.GetAddress() + free_start * PageSize, free_count); |
| 212 | free_count = 0; | 309 | free_count = 0; |
| 213 | } | 310 | } |
| 214 | } | 311 | } |
| @@ -217,44 +314,36 @@ private: | |||
| 217 | } | 314 | } |
| 218 | 315 | ||
| 219 | if (free_count > 0) { | 316 | if (free_count > 0) { |
| 220 | this->Free(heap.GetAddress() + free_start * PageSize, free_count); | 317 | this->Free(m_heap.GetAddress() + free_start * PageSize, free_count); |
| 221 | } | 318 | } |
| 222 | } | 319 | } |
| 223 | 320 | ||
| 224 | static size_t CalculateManagementOverheadSize(size_t region_size); | ||
| 225 | |||
| 226 | static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) { | ||
| 227 | return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / | ||
| 228 | Common::BitSize<u64>()) * | ||
| 229 | sizeof(u64); | ||
| 230 | } | ||
| 231 | |||
| 232 | private: | 321 | private: |
| 233 | using RefCount = u16; | 322 | using RefCount = u16; |
| 234 | 323 | ||
| 235 | KPageHeap heap; | 324 | KPageHeap m_heap; |
| 236 | std::vector<RefCount> page_reference_counts; | 325 | std::vector<RefCount> m_page_reference_counts; |
| 237 | VAddr management_region{}; | 326 | VAddr m_management_region{}; |
| 238 | Pool pool{}; | 327 | Pool m_pool{}; |
| 239 | Impl* next{}; | 328 | Impl* m_next{}; |
| 240 | Impl* prev{}; | 329 | Impl* m_prev{}; |
| 241 | }; | 330 | }; |
| 242 | 331 | ||
| 243 | private: | 332 | private: |
| 244 | Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) { | 333 | Impl& GetManager(PAddr address) { |
| 245 | return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; | 334 | return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; |
| 246 | } | 335 | } |
| 247 | 336 | ||
| 248 | const Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) const { | 337 | const Impl& GetManager(PAddr address) const { |
| 249 | return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; | 338 | return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; |
| 250 | } | 339 | } |
| 251 | 340 | ||
| 252 | constexpr Impl* GetFirstManager(Pool pool, Direction dir) const { | 341 | constexpr Impl* GetFirstManager(Pool pool, Direction dir) { |
| 253 | return dir == Direction::FromBack ? pool_managers_tail[static_cast<size_t>(pool)] | 342 | return dir == Direction::FromBack ? m_pool_managers_tail[static_cast<size_t>(pool)] |
| 254 | : pool_managers_head[static_cast<size_t>(pool)]; | 343 | : m_pool_managers_head[static_cast<size_t>(pool)]; |
| 255 | } | 344 | } |
| 256 | 345 | ||
| 257 | constexpr Impl* GetNextManager(Impl* cur, Direction dir) const { | 346 | constexpr Impl* GetNextManager(Impl* cur, Direction dir) { |
| 258 | if (dir == Direction::FromBack) { | 347 | if (dir == Direction::FromBack) { |
| 259 | return cur->GetPrev(); | 348 | return cur->GetPrev(); |
| 260 | } else { | 349 | } else { |
| @@ -263,15 +352,21 @@ private: | |||
| 263 | } | 352 | } |
| 264 | 353 | ||
| 265 | Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir, | 354 | Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir, |
| 266 | bool random); | 355 | bool unoptimized, bool random); |
| 267 | 356 | ||
| 268 | private: | 357 | private: |
| 269 | Core::System& system; | 358 | template <typename T> |
| 270 | std::array<KLightLock, static_cast<size_t>(Pool::Count)> pool_locks; | 359 | using PoolArray = std::array<T, static_cast<size_t>(Pool::Count)>; |
| 271 | std::array<Impl*, MaxManagerCount> pool_managers_head{}; | 360 | |
| 272 | std::array<Impl*, MaxManagerCount> pool_managers_tail{}; | 361 | Core::System& m_system; |
| 273 | std::array<Impl, MaxManagerCount> managers; | 362 | const KMemoryLayout& m_memory_layout; |
| 274 | size_t num_managers{}; | 363 | PoolArray<KLightLock> m_pool_locks; |
| 364 | std::array<Impl*, MaxManagerCount> m_pool_managers_head{}; | ||
| 365 | std::array<Impl*, MaxManagerCount> m_pool_managers_tail{}; | ||
| 366 | std::array<Impl, MaxManagerCount> m_managers; | ||
| 367 | size_t m_num_managers{}; | ||
| 368 | PoolArray<u64> m_optimized_process_ids{}; | ||
| 369 | PoolArray<bool> m_has_optimized_process{}; | ||
| 275 | }; | 370 | }; |
| 276 | 371 | ||
| 277 | } // namespace Kernel | 372 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index c513e790e..0f1bab067 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp | |||
| @@ -114,7 +114,7 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type | |||
| 114 | 114 | ||
| 115 | // Set other basic fields | 115 | // Set other basic fields |
| 116 | m_enable_aslr = enable_aslr; | 116 | m_enable_aslr = enable_aslr; |
| 117 | m_enable_device_address_space_merge = false; | 117 | m_enable_device_address_space_merge = enable_das_merge; |
| 118 | m_address_space_start = start; | 118 | m_address_space_start = start; |
| 119 | m_address_space_end = end; | 119 | m_address_space_end = end; |
| 120 | m_is_kernel = false; | 120 | m_is_kernel = false; |
| @@ -219,10 +219,22 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type | |||
| 219 | } | 219 | } |
| 220 | } | 220 | } |
| 221 | 221 | ||
| 222 | // Set heap members | 222 | // Set heap and fill members. |
| 223 | m_current_heap_end = m_heap_region_start; | 223 | m_current_heap_end = m_heap_region_start; |
| 224 | m_max_heap_size = 0; | 224 | m_max_heap_size = 0; |
| 225 | m_max_physical_memory_size = 0; | 225 | m_mapped_physical_memory_size = 0; |
| 226 | m_mapped_unsafe_physical_memory = 0; | ||
| 227 | m_mapped_insecure_memory = 0; | ||
| 228 | m_mapped_ipc_server_memory = 0; | ||
| 229 | |||
| 230 | m_heap_fill_value = 0; | ||
| 231 | m_ipc_fill_value = 0; | ||
| 232 | m_stack_fill_value = 0; | ||
| 233 | |||
| 234 | // Set allocation option. | ||
| 235 | m_allocate_option = | ||
| 236 | KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack | ||
| 237 | : KMemoryManager::Direction::FromFront); | ||
| 226 | 238 | ||
| 227 | // Ensure that we regions inside our address space | 239 | // Ensure that we regions inside our address space |
| 228 | auto IsInAddressSpace = [&](VAddr addr) { | 240 | auto IsInAddressSpace = [&](VAddr addr) { |
| @@ -271,6 +283,16 @@ void KPageTable::Finalize() { | |||
| 271 | m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size); | 283 | m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size); |
| 272 | }); | 284 | }); |
| 273 | 285 | ||
| 286 | // Release any insecure mapped memory. | ||
| 287 | if (m_mapped_insecure_memory) { | ||
| 288 | UNIMPLEMENTED(); | ||
| 289 | } | ||
| 290 | |||
| 291 | // Release any ipc server memory. | ||
| 292 | if (m_mapped_ipc_server_memory) { | ||
| 293 | UNIMPLEMENTED(); | ||
| 294 | } | ||
| 295 | |||
| 274 | // Close the backing page table, as the destructor is not called for guest objects. | 296 | // Close the backing page table, as the destructor is not called for guest objects. |
| 275 | m_page_table_impl.reset(); | 297 | m_page_table_impl.reset(); |
| 276 | } | 298 | } |
| @@ -690,9 +712,20 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& s | |||
| 690 | R_SUCCEED(); | 712 | R_SUCCEED(); |
| 691 | } | 713 | } |
| 692 | 714 | ||
| 715 | void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) { | ||
| 716 | m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages); | ||
| 717 | } | ||
| 718 | |||
| 719 | void KPageTable::HACK_ClosePages(VAddr virt_addr, size_t num_pages) { | ||
| 720 | for (size_t index = 0; index < num_pages; ++index) { | ||
| 721 | const auto paddr = GetPhysicalAddr(virt_addr + (index * PageSize)); | ||
| 722 | m_system.Kernel().MemoryManager().Close(paddr, 1); | ||
| 723 | } | ||
| 724 | } | ||
| 725 | |||
| 693 | Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | 726 | Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { |
| 694 | // Lock the physical memory lock. | 727 | // Lock the physical memory lock. |
| 695 | KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); | 728 | KScopedLightLock phys_lk(m_map_physical_memory_lock); |
| 696 | 729 | ||
| 697 | // Calculate the last address for convenience. | 730 | // Calculate the last address for convenience. |
| 698 | const VAddr last_address = address + size - 1; | 731 | const VAddr last_address = address + size - 1; |
| @@ -746,15 +779,19 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 746 | { | 779 | { |
| 747 | // Reserve the memory from the process resource limit. | 780 | // Reserve the memory from the process resource limit. |
| 748 | KScopedResourceReservation memory_reservation( | 781 | KScopedResourceReservation memory_reservation( |
| 749 | m_system.Kernel().CurrentProcess()->GetResourceLimit(), | 782 | m_resource_limit, LimitableResource::PhysicalMemory, size - mapped_size); |
| 750 | LimitableResource::PhysicalMemory, size - mapped_size); | ||
| 751 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 783 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 752 | 784 | ||
| 753 | // Allocate pages for the new memory. | 785 | // Allocate pages for the new memory. |
| 754 | KPageGroup pg; | 786 | KPageGroup pg; |
| 755 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( | 787 | R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( |
| 756 | &pg, (size - mapped_size) / PageSize, | 788 | &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0)); |
| 757 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); | 789 | |
| 790 | // If we fail in the next bit (or retry), we need to cleanup the pages. | ||
| 791 | // auto pg_guard = SCOPE_GUARD { | ||
| 792 | // pg.OpenFirst(); | ||
| 793 | // pg.Close(); | ||
| 794 | //}; | ||
| 758 | 795 | ||
| 759 | // Map the memory. | 796 | // Map the memory. |
| 760 | { | 797 | { |
| @@ -814,15 +851,24 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 814 | 851 | ||
| 815 | // Create an update allocator. | 852 | // Create an update allocator. |
| 816 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | 853 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); |
| 817 | Result allocator_result{ResultSuccess}; | 854 | Result allocator_result; |
| 818 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 855 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 819 | m_memory_block_slab_manager, | 856 | m_memory_block_slab_manager, |
| 820 | num_allocator_blocks); | 857 | num_allocator_blocks); |
| 821 | R_TRY(allocator_result); | 858 | R_TRY(allocator_result); |
| 822 | 859 | ||
| 860 | // We're going to perform an update, so create a helper. | ||
| 861 | // KScopedPageTableUpdater updater(this); | ||
| 862 | |||
| 863 | // Prepare to iterate over the memory. | ||
| 864 | auto pg_it = pg.Nodes().begin(); | ||
| 865 | PAddr pg_phys_addr = pg_it->GetAddress(); | ||
| 866 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 867 | |||
| 823 | // Reset the current tracking address, and make sure we clean up on failure. | 868 | // Reset the current tracking address, and make sure we clean up on failure. |
| 869 | // pg_guard.Cancel(); | ||
| 824 | cur_address = address; | 870 | cur_address = address; |
| 825 | auto unmap_guard = detail::ScopeExit([&] { | 871 | ON_RESULT_FAILURE { |
| 826 | if (cur_address > address) { | 872 | if (cur_address > address) { |
| 827 | const VAddr last_unmap_address = cur_address - 1; | 873 | const VAddr last_unmap_address = cur_address - 1; |
| 828 | 874 | ||
| @@ -845,6 +891,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 845 | last_unmap_address + 1 - cur_address) / | 891 | last_unmap_address + 1 - cur_address) / |
| 846 | PageSize; | 892 | PageSize; |
| 847 | 893 | ||
| 894 | // HACK: Manually close the pages. | ||
| 895 | HACK_ClosePages(cur_address, cur_pages); | ||
| 896 | |||
| 848 | // Unmap. | 897 | // Unmap. |
| 849 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, | 898 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, |
| 850 | OperationType::Unmap) | 899 | OperationType::Unmap) |
| @@ -861,12 +910,17 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 861 | ++it; | 910 | ++it; |
| 862 | } | 911 | } |
| 863 | } | 912 | } |
| 864 | }); | ||
| 865 | 913 | ||
| 866 | // Iterate over the memory. | 914 | // Release any remaining unmapped memory. |
| 867 | auto pg_it = pg.Nodes().begin(); | 915 | m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages); |
| 868 | PAddr pg_phys_addr = pg_it->GetAddress(); | 916 | m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages); |
| 869 | size_t pg_pages = pg_it->GetNumPages(); | 917 | for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) { |
| 918 | m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(), | ||
| 919 | pg_it->GetNumPages()); | ||
| 920 | m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(), | ||
| 921 | pg_it->GetNumPages()); | ||
| 922 | } | ||
| 923 | }; | ||
| 870 | 924 | ||
| 871 | auto it = m_memory_block_manager.FindIterator(cur_address); | 925 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| 872 | while (true) { | 926 | while (true) { |
| @@ -901,6 +955,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 901 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, | 955 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, |
| 902 | OperationType::Map, pg_phys_addr)); | 956 | OperationType::Map, pg_phys_addr)); |
| 903 | 957 | ||
| 958 | // HACK: Manually open the pages. | ||
| 959 | HACK_OpenPages(pg_phys_addr, cur_pages); | ||
| 960 | |||
| 904 | // Advance. | 961 | // Advance. |
| 905 | cur_address += cur_pages * PageSize; | 962 | cur_address += cur_pages * PageSize; |
| 906 | map_pages -= cur_pages; | 963 | map_pages -= cur_pages; |
| @@ -932,9 +989,6 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 932 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, | 989 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, |
| 933 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None); | 990 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None); |
| 934 | 991 | ||
| 935 | // Cancel our guard. | ||
| 936 | unmap_guard.Cancel(); | ||
| 937 | |||
| 938 | R_SUCCEED(); | 992 | R_SUCCEED(); |
| 939 | } | 993 | } |
| 940 | } | 994 | } |
| @@ -943,7 +997,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 943 | 997 | ||
| 944 | Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | 998 | Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { |
| 945 | // Lock the physical memory lock. | 999 | // Lock the physical memory lock. |
| 946 | KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); | 1000 | KScopedLightLock phys_lk(m_map_physical_memory_lock); |
| 947 | 1001 | ||
| 948 | // Lock the table. | 1002 | // Lock the table. |
| 949 | KScopedLightLock lk(m_general_lock); | 1003 | KScopedLightLock lk(m_general_lock); |
| @@ -952,8 +1006,11 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 952 | const VAddr last_address = address + size - 1; | 1006 | const VAddr last_address = address + size - 1; |
| 953 | 1007 | ||
| 954 | // Define iteration variables. | 1008 | // Define iteration variables. |
| 955 | VAddr cur_address = 0; | 1009 | VAddr map_start_address = 0; |
| 956 | size_t mapped_size = 0; | 1010 | VAddr map_last_address = 0; |
| 1011 | |||
| 1012 | VAddr cur_address; | ||
| 1013 | size_t mapped_size; | ||
| 957 | size_t num_allocator_blocks = 0; | 1014 | size_t num_allocator_blocks = 0; |
| 958 | 1015 | ||
| 959 | // Check if the memory is mapped. | 1016 | // Check if the memory is mapped. |
| @@ -979,27 +1036,27 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 979 | if (is_normal) { | 1036 | if (is_normal) { |
| 980 | R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory); | 1037 | R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory); |
| 981 | 1038 | ||
| 1039 | if (map_start_address == 0) { | ||
| 1040 | map_start_address = cur_address; | ||
| 1041 | } | ||
| 1042 | map_last_address = | ||
| 1043 | (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address; | ||
| 1044 | |||
| 982 | if (info.GetAddress() < address) { | 1045 | if (info.GetAddress() < address) { |
| 983 | ++num_allocator_blocks; | 1046 | ++num_allocator_blocks; |
| 984 | } | 1047 | } |
| 985 | if (last_address < info.GetLastAddress()) { | 1048 | if (last_address < info.GetLastAddress()) { |
| 986 | ++num_allocator_blocks; | 1049 | ++num_allocator_blocks; |
| 987 | } | 1050 | } |
| 1051 | |||
| 1052 | mapped_size += (map_last_address + 1 - cur_address); | ||
| 988 | } | 1053 | } |
| 989 | 1054 | ||
| 990 | // Check if we're done. | 1055 | // Check if we're done. |
| 991 | if (last_address <= info.GetLastAddress()) { | 1056 | if (last_address <= info.GetLastAddress()) { |
| 992 | if (is_normal) { | ||
| 993 | mapped_size += (last_address + 1 - cur_address); | ||
| 994 | } | ||
| 995 | break; | 1057 | break; |
| 996 | } | 1058 | } |
| 997 | 1059 | ||
| 998 | // Track the memory if it's mapped. | ||
| 999 | if (is_normal) { | ||
| 1000 | mapped_size += VAddr(info.GetEndAddress()) - cur_address; | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | // Advance. | 1060 | // Advance. |
| 1004 | cur_address = info.GetEndAddress(); | 1061 | cur_address = info.GetEndAddress(); |
| 1005 | ++it; | 1062 | ++it; |
| @@ -1009,125 +1066,22 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 1009 | R_SUCCEED_IF(mapped_size == 0); | 1066 | R_SUCCEED_IF(mapped_size == 0); |
| 1010 | } | 1067 | } |
| 1011 | 1068 | ||
| 1012 | // Make a page group for the unmap region. | ||
| 1013 | KPageGroup pg; | ||
| 1014 | { | ||
| 1015 | auto& impl = this->PageTableImpl(); | ||
| 1016 | |||
| 1017 | // Begin traversal. | ||
| 1018 | Common::PageTable::TraversalContext context; | ||
| 1019 | Common::PageTable::TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0}; | ||
| 1020 | bool cur_valid = false; | ||
| 1021 | Common::PageTable::TraversalEntry next_entry; | ||
| 1022 | bool next_valid = false; | ||
| 1023 | size_t tot_size = 0; | ||
| 1024 | |||
| 1025 | cur_address = address; | ||
| 1026 | next_valid = impl.BeginTraversal(next_entry, context, cur_address); | ||
| 1027 | next_entry.block_size = | ||
| 1028 | (next_entry.block_size - (next_entry.phys_addr & (next_entry.block_size - 1))); | ||
| 1029 | |||
| 1030 | // Iterate, building the group. | ||
| 1031 | while (true) { | ||
| 1032 | if ((!next_valid && !cur_valid) || | ||
| 1033 | (next_valid && cur_valid && | ||
| 1034 | next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) { | ||
| 1035 | cur_entry.block_size += next_entry.block_size; | ||
| 1036 | } else { | ||
| 1037 | if (cur_valid) { | ||
| 1038 | // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr)); | ||
| 1039 | R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize)); | ||
| 1040 | } | ||
| 1041 | |||
| 1042 | // Update tracking variables. | ||
| 1043 | tot_size += cur_entry.block_size; | ||
| 1044 | cur_entry = next_entry; | ||
| 1045 | cur_valid = next_valid; | ||
| 1046 | } | ||
| 1047 | |||
| 1048 | if (cur_entry.block_size + tot_size >= size) { | ||
| 1049 | break; | ||
| 1050 | } | ||
| 1051 | |||
| 1052 | next_valid = impl.ContinueTraversal(next_entry, context); | ||
| 1053 | } | ||
| 1054 | |||
| 1055 | // Add the last block. | ||
| 1056 | if (cur_valid) { | ||
| 1057 | // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr)); | ||
| 1058 | R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize)); | ||
| 1059 | } | ||
| 1060 | } | ||
| 1061 | ASSERT(pg.GetNumPages() == mapped_size / PageSize); | ||
| 1062 | |||
| 1063 | // Create an update allocator. | 1069 | // Create an update allocator. |
| 1064 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | 1070 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); |
| 1065 | Result allocator_result{ResultSuccess}; | 1071 | Result allocator_result; |
| 1066 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 1072 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1067 | m_memory_block_slab_manager, num_allocator_blocks); | 1073 | m_memory_block_slab_manager, num_allocator_blocks); |
| 1068 | R_TRY(allocator_result); | 1074 | R_TRY(allocator_result); |
| 1069 | 1075 | ||
| 1070 | // Reset the current tracking address, and make sure we clean up on failure. | 1076 | // We're going to perform an update, so create a helper. |
| 1071 | cur_address = address; | 1077 | // KScopedPageTableUpdater updater(this); |
| 1072 | auto remap_guard = detail::ScopeExit([&] { | ||
| 1073 | if (cur_address > address) { | ||
| 1074 | const VAddr last_map_address = cur_address - 1; | ||
| 1075 | cur_address = address; | ||
| 1076 | |||
| 1077 | // Iterate over the memory we unmapped. | ||
| 1078 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 1079 | auto pg_it = pg.Nodes().begin(); | ||
| 1080 | PAddr pg_phys_addr = pg_it->GetAddress(); | ||
| 1081 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 1082 | |||
| 1083 | while (true) { | ||
| 1084 | // Get the memory info for the pages we unmapped, convert to property. | ||
| 1085 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1086 | |||
| 1087 | // If the memory is normal, we unmapped it and need to re-map it. | ||
| 1088 | if (info.GetState() == KMemoryState::Normal) { | ||
| 1089 | // Determine the range to map. | ||
| 1090 | size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address, | ||
| 1091 | last_map_address + 1 - cur_address) / | ||
| 1092 | PageSize; | ||
| 1093 | |||
| 1094 | // While we have pages to map, map them. | ||
| 1095 | while (map_pages > 0) { | ||
| 1096 | // Check if we're at the end of the physical block. | ||
| 1097 | if (pg_pages == 0) { | ||
| 1098 | // Ensure there are more pages to map. | ||
| 1099 | ASSERT(pg_it != pg.Nodes().end()); | ||
| 1100 | |||
| 1101 | // Advance our physical block. | ||
| 1102 | ++pg_it; | ||
| 1103 | pg_phys_addr = pg_it->GetAddress(); | ||
| 1104 | pg_pages = pg_it->GetNumPages(); | ||
| 1105 | } | ||
| 1106 | |||
| 1107 | // Map whatever we can. | ||
| 1108 | const size_t cur_pages = std::min(pg_pages, map_pages); | ||
| 1109 | ASSERT(this->Operate(cur_address, cur_pages, info.GetPermission(), | ||
| 1110 | OperationType::Map, pg_phys_addr) == ResultSuccess); | ||
| 1111 | |||
| 1112 | // Advance. | ||
| 1113 | cur_address += cur_pages * PageSize; | ||
| 1114 | map_pages -= cur_pages; | ||
| 1115 | |||
| 1116 | pg_phys_addr += cur_pages * PageSize; | ||
| 1117 | pg_pages -= cur_pages; | ||
| 1118 | } | ||
| 1119 | } | ||
| 1120 | 1078 | ||
| 1121 | // Check if we're done. | 1079 | // Separate the mapping. |
| 1122 | if (last_map_address <= info.GetLastAddress()) { | 1080 | R_TRY(Operate(map_start_address, (map_last_address + 1 - map_start_address) / PageSize, |
| 1123 | break; | 1081 | KMemoryPermission::None, OperationType::Separate)); |
| 1124 | } | ||
| 1125 | 1082 | ||
| 1126 | // Advance. | 1083 | // Reset the current tracking address, and make sure we clean up on failure. |
| 1127 | ++it; | 1084 | cur_address = address; |
| 1128 | } | ||
| 1129 | } | ||
| 1130 | }); | ||
| 1131 | 1085 | ||
| 1132 | // Iterate over the memory, unmapping as we go. | 1086 | // Iterate over the memory, unmapping as we go. |
| 1133 | auto it = m_memory_block_manager.FindIterator(cur_address); | 1087 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| @@ -1145,8 +1099,12 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 1145 | last_address + 1 - cur_address) / | 1099 | last_address + 1 - cur_address) / |
| 1146 | PageSize; | 1100 | PageSize; |
| 1147 | 1101 | ||
| 1102 | // HACK: Manually close the pages. | ||
| 1103 | HACK_ClosePages(cur_address, cur_pages); | ||
| 1104 | |||
| 1148 | // Unmap. | 1105 | // Unmap. |
| 1149 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)); | 1106 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) |
| 1107 | .IsSuccess()); | ||
| 1150 | } | 1108 | } |
| 1151 | 1109 | ||
| 1152 | // Check if we're done. | 1110 | // Check if we're done. |
| @@ -1161,8 +1119,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 1161 | 1119 | ||
| 1162 | // Release the memory resource. | 1120 | // Release the memory resource. |
| 1163 | m_mapped_physical_memory_size -= mapped_size; | 1121 | m_mapped_physical_memory_size -= mapped_size; |
| 1164 | auto process{m_system.Kernel().CurrentProcess()}; | 1122 | m_resource_limit->Release(LimitableResource::PhysicalMemory, mapped_size); |
| 1165 | process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); | ||
| 1166 | 1123 | ||
| 1167 | // Update memory blocks. | 1124 | // Update memory blocks. |
| 1168 | m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, | 1125 | m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, |
| @@ -1170,14 +1127,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 1170 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | 1127 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, |
| 1171 | KMemoryBlockDisableMergeAttribute::None); | 1128 | KMemoryBlockDisableMergeAttribute::None); |
| 1172 | 1129 | ||
| 1173 | // TODO(bunnei): This is a workaround until the next set of changes, where we add reference | ||
| 1174 | // counting for mapped pages. Until then, we must manually close the reference to the page | ||
| 1175 | // group. | ||
| 1176 | m_system.Kernel().MemoryManager().Close(pg); | ||
| 1177 | |||
| 1178 | // We succeeded. | 1130 | // We succeeded. |
| 1179 | remap_guard.Cancel(); | ||
| 1180 | |||
| 1181 | R_SUCCEED(); | 1131 | R_SUCCEED(); |
| 1182 | } | 1132 | } |
| 1183 | 1133 | ||
| @@ -1753,8 +1703,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) { | |||
| 1753 | OperationType::Unmap)); | 1703 | OperationType::Unmap)); |
| 1754 | 1704 | ||
| 1755 | // Release the memory from the resource limit. | 1705 | // Release the memory from the resource limit. |
| 1756 | m_system.Kernel().CurrentProcess()->GetResourceLimit()->Release( | 1706 | m_resource_limit->Release(LimitableResource::PhysicalMemory, num_pages * PageSize); |
| 1757 | LimitableResource::PhysicalMemory, num_pages * PageSize); | ||
| 1758 | 1707 | ||
| 1759 | // Apply the memory block update. | 1708 | // Apply the memory block update. |
| 1760 | m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, | 1709 | m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, |
| @@ -1784,8 +1733,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) { | |||
| 1784 | 1733 | ||
| 1785 | // Reserve memory for the heap extension. | 1734 | // Reserve memory for the heap extension. |
| 1786 | KScopedResourceReservation memory_reservation( | 1735 | KScopedResourceReservation memory_reservation( |
| 1787 | m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, | 1736 | m_resource_limit, LimitableResource::PhysicalMemory, allocation_size); |
| 1788 | allocation_size); | ||
| 1789 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 1737 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 1790 | 1738 | ||
| 1791 | // Allocate pages for the heap extension. | 1739 | // Allocate pages for the heap extension. |
| @@ -1873,7 +1821,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_ | |||
| 1873 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); | 1821 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); |
| 1874 | } else { | 1822 | } else { |
| 1875 | KPageGroup page_group; | 1823 | KPageGroup page_group; |
| 1876 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( | 1824 | R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( |
| 1877 | &page_group, needed_num_pages, | 1825 | &page_group, needed_num_pages, |
| 1878 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); | 1826 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); |
| 1879 | R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); | 1827 | R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); |
| @@ -1887,8 +1835,9 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_ | |||
| 1887 | return addr; | 1835 | return addr; |
| 1888 | } | 1836 | } |
| 1889 | 1837 | ||
| 1890 | Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, | 1838 | Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, |
| 1891 | bool is_aligned) { | 1839 | KMemoryPermission perm, bool is_aligned, |
| 1840 | bool check_heap) { | ||
| 1892 | // Lightly validate the range before doing anything else. | 1841 | // Lightly validate the range before doing anything else. |
| 1893 | const size_t num_pages = size / PageSize; | 1842 | const size_t num_pages = size / PageSize; |
| 1894 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | 1843 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); |
| @@ -1898,15 +1847,18 @@ Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMem | |||
| 1898 | 1847 | ||
| 1899 | // Check the memory state. | 1848 | // Check the memory state. |
| 1900 | const auto test_state = | 1849 | const auto test_state = |
| 1901 | (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap); | 1850 | (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) | |
| 1851 | (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None); | ||
| 1902 | size_t num_allocator_blocks; | 1852 | size_t num_allocator_blocks; |
| 1903 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state, | 1853 | KMemoryState old_state; |
| 1854 | R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr, | ||
| 1855 | std::addressof(num_allocator_blocks), address, size, test_state, | ||
| 1904 | test_state, perm, perm, | 1856 | test_state, perm, perm, |
| 1905 | KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, | 1857 | KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, |
| 1906 | KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); | 1858 | KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); |
| 1907 | 1859 | ||
| 1908 | // Create an update allocator. | 1860 | // Create an update allocator. |
| 1909 | Result allocator_result{ResultSuccess}; | 1861 | Result allocator_result; |
| 1910 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 1862 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1911 | m_memory_block_slab_manager, num_allocator_blocks); | 1863 | m_memory_block_slab_manager, num_allocator_blocks); |
| 1912 | R_TRY(allocator_result); | 1864 | R_TRY(allocator_result); |
| @@ -1915,10 +1867,13 @@ Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMem | |||
| 1915 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, | 1867 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, |
| 1916 | &KMemoryBlock::ShareToDevice, KMemoryPermission::None); | 1868 | &KMemoryBlock::ShareToDevice, KMemoryPermission::None); |
| 1917 | 1869 | ||
| 1870 | // Set whether the locked memory was io. | ||
| 1871 | *out_is_io = old_state == KMemoryState::Io; | ||
| 1872 | |||
| 1918 | R_SUCCEED(); | 1873 | R_SUCCEED(); |
| 1919 | } | 1874 | } |
| 1920 | 1875 | ||
| 1921 | Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) { | 1876 | Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap) { |
| 1922 | // Lightly validate the range before doing anything else. | 1877 | // Lightly validate the range before doing anything else. |
| 1923 | const size_t num_pages = size / PageSize; | 1878 | const size_t num_pages = size / PageSize; |
| 1924 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | 1879 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); |
| @@ -1927,16 +1882,16 @@ Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) { | |||
| 1927 | KScopedLightLock lk(m_general_lock); | 1882 | KScopedLightLock lk(m_general_lock); |
| 1928 | 1883 | ||
| 1929 | // Check the memory state. | 1884 | // Check the memory state. |
| 1885 | const auto test_state = KMemoryState::FlagCanDeviceMap | | ||
| 1886 | (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None); | ||
| 1930 | size_t num_allocator_blocks; | 1887 | size_t num_allocator_blocks; |
| 1931 | R_TRY(this->CheckMemoryStateContiguous( | 1888 | R_TRY(this->CheckMemoryStateContiguous( |
| 1932 | std::addressof(num_allocator_blocks), address, size, | 1889 | std::addressof(num_allocator_blocks), address, size, test_state, test_state, |
| 1933 | KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, | ||
| 1934 | KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, | ||
| 1935 | KMemoryPermission::None, KMemoryPermission::None, | 1890 | KMemoryPermission::None, KMemoryPermission::None, |
| 1936 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); | 1891 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); |
| 1937 | 1892 | ||
| 1938 | // Create an update allocator. | 1893 | // Create an update allocator. |
| 1939 | Result allocator_result{ResultSuccess}; | 1894 | Result allocator_result; |
| 1940 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 1895 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1941 | m_memory_block_slab_manager, num_allocator_blocks); | 1896 | m_memory_block_slab_manager, num_allocator_blocks); |
| 1942 | R_TRY(allocator_result); | 1897 | R_TRY(allocator_result); |
| @@ -2070,6 +2025,10 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, | |||
| 2070 | m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); | 2025 | m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); |
| 2071 | break; | 2026 | break; |
| 2072 | } | 2027 | } |
| 2028 | case OperationType::Separate: { | ||
| 2029 | // HACK: Unimplemented. | ||
| 2030 | break; | ||
| 2031 | } | ||
| 2073 | case OperationType::ChangePermissions: | 2032 | case OperationType::ChangePermissions: |
| 2074 | case OperationType::ChangePermissionsAndRefresh: | 2033 | case OperationType::ChangePermissionsAndRefresh: |
| 2075 | break; | 2034 | break; |
| @@ -2105,6 +2064,7 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const { | |||
| 2105 | case KMemoryState::GeneratedCode: | 2064 | case KMemoryState::GeneratedCode: |
| 2106 | case KMemoryState::CodeOut: | 2065 | case KMemoryState::CodeOut: |
| 2107 | case KMemoryState::Coverage: | 2066 | case KMemoryState::Coverage: |
| 2067 | case KMemoryState::Insecure: | ||
| 2108 | return m_alias_code_region_start; | 2068 | return m_alias_code_region_start; |
| 2109 | case KMemoryState::Code: | 2069 | case KMemoryState::Code: |
| 2110 | case KMemoryState::CodeData: | 2070 | case KMemoryState::CodeData: |
| @@ -2140,6 +2100,7 @@ size_t KPageTable::GetRegionSize(KMemoryState state) const { | |||
| 2140 | case KMemoryState::GeneratedCode: | 2100 | case KMemoryState::GeneratedCode: |
| 2141 | case KMemoryState::CodeOut: | 2101 | case KMemoryState::CodeOut: |
| 2142 | case KMemoryState::Coverage: | 2102 | case KMemoryState::Coverage: |
| 2103 | case KMemoryState::Insecure: | ||
| 2143 | return m_alias_code_region_end - m_alias_code_region_start; | 2104 | return m_alias_code_region_end - m_alias_code_region_start; |
| 2144 | case KMemoryState::Code: | 2105 | case KMemoryState::Code: |
| 2145 | case KMemoryState::CodeData: | 2106 | case KMemoryState::CodeData: |
| @@ -2181,6 +2142,7 @@ bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const { | |||
| 2181 | case KMemoryState::GeneratedCode: | 2142 | case KMemoryState::GeneratedCode: |
| 2182 | case KMemoryState::CodeOut: | 2143 | case KMemoryState::CodeOut: |
| 2183 | case KMemoryState::Coverage: | 2144 | case KMemoryState::Coverage: |
| 2145 | case KMemoryState::Insecure: | ||
| 2184 | return is_in_region && !is_in_heap && !is_in_alias; | 2146 | return is_in_region && !is_in_heap && !is_in_alias; |
| 2185 | case KMemoryState::Normal: | 2147 | case KMemoryState::Normal: |
| 2186 | ASSERT(is_in_heap); | 2148 | ASSERT(is_in_heap); |
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 44388655d..fa29db758 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp | |||
| @@ -126,10 +126,12 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) | |||
| 126 | LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); | 126 | LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); |
| 127 | return result; | 127 | return result; |
| 128 | } | 128 | } |
| 129 | bool is_out_io{}; | ||
| 129 | ASSERT(system.CurrentProcess() | 130 | ASSERT(system.CurrentProcess() |
| 130 | ->PageTable() | 131 | ->PageTable() |
| 131 | .LockForMapDeviceAddressSpace(handle_description->address, handle_description->size, | 132 | .LockForMapDeviceAddressSpace(&is_out_io, handle_description->address, |
| 132 | Kernel::KMemoryPermission::None, true) | 133 | handle_description->size, |
| 134 | Kernel::KMemoryPermission::None, true, false) | ||
| 133 | .IsSuccess()); | 135 | .IsSuccess()); |
| 134 | std::memcpy(output.data(), ¶ms, sizeof(params)); | 136 | std::memcpy(output.data(), ¶ms, sizeof(params)); |
| 135 | return result; | 137 | return result; |