summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/kernel.cpp
diff options
context:
space:
mode:
authorGravatar bunnei2022-03-02 17:55:51 -0800
committerGravatar GitHub2022-03-02 17:55:51 -0800
commit3ab82e758285108e12ff5ebcadb419a0d0c4e6e7 (patch)
tree96931961bb24ecbe660f7709966ccc7fa16c3ed3 /src/core/hle/kernel/kernel.cpp
parentMerge pull request #7959 from merryhime/cmpxchg (diff)
parenthle: kernel: Re-create memory layout at initialization. (diff)
downloadyuzu-3ab82e758285108e12ff5ebcadb419a0d0c4e6e7.tar.gz
yuzu-3ab82e758285108e12ff5ebcadb419a0d0c4e6e7.tar.xz
yuzu-3ab82e758285108e12ff5ebcadb419a0d0c4e6e7.zip
Merge pull request #7956 from bunnei/improve-mem-manager
Kernel Memory Updates (Part 4): Revamp KMemoryManager & other fixes
Diffstat (limited to 'src/core/hle/kernel/kernel.cpp')
-rw-r--r--src/core/hle/kernel/kernel.cpp113
1 files changed, 57 insertions, 56 deletions
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 797f47021..71bd466cf 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -70,13 +70,12 @@ struct KernelCore::Impl {
70 70
71 // Derive the initial memory layout from the emulated board 71 // Derive the initial memory layout from the emulated board
72 Init::InitializeSlabResourceCounts(kernel); 72 Init::InitializeSlabResourceCounts(kernel);
73 KMemoryLayout memory_layout; 73 DeriveInitialMemoryLayout();
74 DeriveInitialMemoryLayout(memory_layout); 74 Init::InitializeSlabHeaps(system, *memory_layout);
75 Init::InitializeSlabHeaps(system, memory_layout);
76 75
77 // Initialize kernel memory and resources. 76 // Initialize kernel memory and resources.
78 InitializeSystemResourceLimit(kernel, system.CoreTiming(), memory_layout); 77 InitializeSystemResourceLimit(kernel, system.CoreTiming());
79 InitializeMemoryLayout(memory_layout); 78 InitializeMemoryLayout();
80 InitializePageSlab(); 79 InitializePageSlab();
81 InitializeSchedulers(); 80 InitializeSchedulers();
82 InitializeSuspendThreads(); 81 InitializeSuspendThreads();
@@ -219,12 +218,11 @@ struct KernelCore::Impl {
219 218
220 // Creates the default system resource limit 219 // Creates the default system resource limit
221 void InitializeSystemResourceLimit(KernelCore& kernel, 220 void InitializeSystemResourceLimit(KernelCore& kernel,
222 const Core::Timing::CoreTiming& core_timing, 221 const Core::Timing::CoreTiming& core_timing) {
223 const KMemoryLayout& memory_layout) {
224 system_resource_limit = KResourceLimit::Create(system.Kernel()); 222 system_resource_limit = KResourceLimit::Create(system.Kernel());
225 system_resource_limit->Initialize(&core_timing); 223 system_resource_limit->Initialize(&core_timing);
226 224
227 const auto [total_size, kernel_size] = memory_layout.GetTotalAndKernelMemorySizes(); 225 const auto [total_size, kernel_size] = memory_layout->GetTotalAndKernelMemorySizes();
228 226
229 // If setting the default system values fails, then something seriously wrong has occurred. 227 // If setting the default system values fails, then something seriously wrong has occurred.
230 ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, total_size) 228 ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, total_size)
@@ -353,16 +351,18 @@ struct KernelCore::Impl {
353 return schedulers[thread_id]->GetCurrentThread(); 351 return schedulers[thread_id]->GetCurrentThread();
354 } 352 }
355 353
356 void DeriveInitialMemoryLayout(KMemoryLayout& memory_layout) { 354 void DeriveInitialMemoryLayout() {
355 memory_layout = std::make_unique<KMemoryLayout>();
356
357 // Insert the root region for the virtual memory tree, from which all other regions will 357 // Insert the root region for the virtual memory tree, from which all other regions will
358 // derive. 358 // derive.
359 memory_layout.GetVirtualMemoryRegionTree().InsertDirectly( 359 memory_layout->GetVirtualMemoryRegionTree().InsertDirectly(
360 KernelVirtualAddressSpaceBase, 360 KernelVirtualAddressSpaceBase,
361 KernelVirtualAddressSpaceBase + KernelVirtualAddressSpaceSize - 1); 361 KernelVirtualAddressSpaceBase + KernelVirtualAddressSpaceSize - 1);
362 362
363 // Insert the root region for the physical memory tree, from which all other regions will 363 // Insert the root region for the physical memory tree, from which all other regions will
364 // derive. 364 // derive.
365 memory_layout.GetPhysicalMemoryRegionTree().InsertDirectly( 365 memory_layout->GetPhysicalMemoryRegionTree().InsertDirectly(
366 KernelPhysicalAddressSpaceBase, 366 KernelPhysicalAddressSpaceBase,
367 KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1); 367 KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1);
368 368
@@ -379,7 +379,7 @@ struct KernelCore::Impl {
379 if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) { 379 if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) {
380 kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start; 380 kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start;
381 } 381 }
382 ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( 382 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
383 kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel)); 383 kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel));
384 384
385 // Setup the code region. 385 // Setup the code region.
@@ -388,11 +388,11 @@ struct KernelCore::Impl {
388 Common::AlignDown(code_start_virt_addr, CodeRegionAlign); 388 Common::AlignDown(code_start_virt_addr, CodeRegionAlign);
389 constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign); 389 constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign);
390 constexpr size_t code_region_size = code_region_end - code_region_start; 390 constexpr size_t code_region_size = code_region_end - code_region_start;
391 ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( 391 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
392 code_region_start, code_region_size, KMemoryRegionType_KernelCode)); 392 code_region_start, code_region_size, KMemoryRegionType_KernelCode));
393 393
394 // Setup board-specific device physical regions. 394 // Setup board-specific device physical regions.
395 Init::SetupDevicePhysicalMemoryRegions(memory_layout); 395 Init::SetupDevicePhysicalMemoryRegions(*memory_layout);
396 396
397 // Determine the amount of space needed for the misc region. 397 // Determine the amount of space needed for the misc region.
398 size_t misc_region_needed_size; 398 size_t misc_region_needed_size;
@@ -401,7 +401,7 @@ struct KernelCore::Impl {
401 misc_region_needed_size = Core::Hardware::NUM_CPU_CORES * (3 * (PageSize + PageSize)); 401 misc_region_needed_size = Core::Hardware::NUM_CPU_CORES * (3 * (PageSize + PageSize));
402 402
403 // Account for each auto-map device. 403 // Account for each auto-map device.
404 for (const auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { 404 for (const auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
405 if (region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) { 405 if (region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) {
406 // Check that the region is valid. 406 // Check that the region is valid.
407 ASSERT(region.GetEndAddress() != 0); 407 ASSERT(region.GetEndAddress() != 0);
@@ -426,22 +426,22 @@ struct KernelCore::Impl {
426 426
427 // Setup the misc region. 427 // Setup the misc region.
428 const VAddr misc_region_start = 428 const VAddr misc_region_start =
429 memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( 429 memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
430 misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel); 430 misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel);
431 ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( 431 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
432 misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc)); 432 misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc));
433 433
434 // Setup the stack region. 434 // Setup the stack region.
435 constexpr size_t StackRegionSize = 14_MiB; 435 constexpr size_t StackRegionSize = 14_MiB;
436 constexpr size_t StackRegionAlign = KernelAslrAlignment; 436 constexpr size_t StackRegionAlign = KernelAslrAlignment;
437 const VAddr stack_region_start = 437 const VAddr stack_region_start =
438 memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( 438 memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
439 StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel); 439 StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel);
440 ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( 440 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
441 stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack)); 441 stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack));
442 442
443 // Determine the size of the resource region. 443 // Determine the size of the resource region.
444 const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit(); 444 const size_t resource_region_size = memory_layout->GetResourceRegionSizeForInit();
445 445
446 // Determine the size of the slab region. 446 // Determine the size of the slab region.
447 const size_t slab_region_size = 447 const size_t slab_region_size =
@@ -458,23 +458,23 @@ struct KernelCore::Impl {
458 Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) - 458 Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) -
459 Common::AlignDown(code_end_phys_addr, SlabRegionAlign); 459 Common::AlignDown(code_end_phys_addr, SlabRegionAlign);
460 const VAddr slab_region_start = 460 const VAddr slab_region_start =
461 memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( 461 memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
462 slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) + 462 slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) +
463 (code_end_phys_addr % SlabRegionAlign); 463 (code_end_phys_addr % SlabRegionAlign);
464 ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( 464 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
465 slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab)); 465 slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab));
466 466
467 // Setup the temp region. 467 // Setup the temp region.
468 constexpr size_t TempRegionSize = 128_MiB; 468 constexpr size_t TempRegionSize = 128_MiB;
469 constexpr size_t TempRegionAlign = KernelAslrAlignment; 469 constexpr size_t TempRegionAlign = KernelAslrAlignment;
470 const VAddr temp_region_start = 470 const VAddr temp_region_start =
471 memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( 471 memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
472 TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel); 472 TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel);
473 ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize, 473 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize,
474 KMemoryRegionType_KernelTemp)); 474 KMemoryRegionType_KernelTemp));
475 475
476 // Automatically map in devices that have auto-map attributes. 476 // Automatically map in devices that have auto-map attributes.
477 for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { 477 for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
478 // We only care about kernel regions. 478 // We only care about kernel regions.
479 if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) { 479 if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) {
480 continue; 480 continue;
@@ -501,21 +501,21 @@ struct KernelCore::Impl {
501 const size_t map_size = 501 const size_t map_size =
502 Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr; 502 Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr;
503 const VAddr map_virt_addr = 503 const VAddr map_virt_addr =
504 memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( 504 memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
505 map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize); 505 map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize);
506 ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( 506 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
507 map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice)); 507 map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice));
508 region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr); 508 region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr);
509 } 509 }
510 510
511 Init::SetupDramPhysicalMemoryRegions(memory_layout); 511 Init::SetupDramPhysicalMemoryRegions(*memory_layout);
512 512
513 // Insert a physical region for the kernel code region. 513 // Insert a physical region for the kernel code region.
514 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( 514 ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
515 code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode)); 515 code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode));
516 516
517 // Insert a physical region for the kernel slab region. 517 // Insert a physical region for the kernel slab region.
518 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( 518 ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
519 slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab)); 519 slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab));
520 520
521 // Determine size available for kernel page table heaps, requiring > 8 MB. 521 // Determine size available for kernel page table heaps, requiring > 8 MB.
@@ -524,12 +524,12 @@ struct KernelCore::Impl {
524 ASSERT(page_table_heap_size / 4_MiB > 2); 524 ASSERT(page_table_heap_size / 4_MiB > 2);
525 525
526 // Insert a physical region for the kernel page table heap region 526 // Insert a physical region for the kernel page table heap region
527 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( 527 ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
528 slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap)); 528 slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap));
529 529
530 // All DRAM regions that we haven't tagged by this point will be mapped under the linear 530 // All DRAM regions that we haven't tagged by this point will be mapped under the linear
531 // mapping. Tag them. 531 // mapping. Tag them.
532 for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { 532 for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
533 if (region.GetType() == KMemoryRegionType_Dram) { 533 if (region.GetType() == KMemoryRegionType_Dram) {
534 // Check that the region is valid. 534 // Check that the region is valid.
535 ASSERT(region.GetEndAddress() != 0); 535 ASSERT(region.GetEndAddress() != 0);
@@ -541,7 +541,7 @@ struct KernelCore::Impl {
541 541
542 // Get the linear region extents. 542 // Get the linear region extents.
543 const auto linear_extents = 543 const auto linear_extents =
544 memory_layout.GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( 544 memory_layout->GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
545 KMemoryRegionAttr_LinearMapped); 545 KMemoryRegionAttr_LinearMapped);
546 ASSERT(linear_extents.GetEndAddress() != 0); 546 ASSERT(linear_extents.GetEndAddress() != 0);
547 547
@@ -553,7 +553,7 @@ struct KernelCore::Impl {
553 Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) - 553 Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) -
554 aligned_linear_phys_start; 554 aligned_linear_phys_start;
555 const VAddr linear_region_start = 555 const VAddr linear_region_start =
556 memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( 556 memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
557 linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign); 557 linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign);
558 558
559 const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start; 559 const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start;
@@ -562,7 +562,7 @@ struct KernelCore::Impl {
562 { 562 {
563 PAddr cur_phys_addr = 0; 563 PAddr cur_phys_addr = 0;
564 u64 cur_size = 0; 564 u64 cur_size = 0;
565 for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { 565 for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
566 if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) { 566 if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
567 continue; 567 continue;
568 } 568 }
@@ -581,55 +581,49 @@ struct KernelCore::Impl {
581 581
582 const VAddr region_virt_addr = 582 const VAddr region_virt_addr =
583 region.GetAddress() + linear_region_phys_to_virt_diff; 583 region.GetAddress() + linear_region_phys_to_virt_diff;
584 ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( 584 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
585 region_virt_addr, region.GetSize(), 585 region_virt_addr, region.GetSize(),
586 GetTypeForVirtualLinearMapping(region.GetType()))); 586 GetTypeForVirtualLinearMapping(region.GetType())));
587 region.SetPairAddress(region_virt_addr); 587 region.SetPairAddress(region_virt_addr);
588 588
589 KMemoryRegion* virt_region = 589 KMemoryRegion* virt_region =
590 memory_layout.GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr); 590 memory_layout->GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr);
591 ASSERT(virt_region != nullptr); 591 ASSERT(virt_region != nullptr);
592 virt_region->SetPairAddress(region.GetAddress()); 592 virt_region->SetPairAddress(region.GetAddress());
593 } 593 }
594 } 594 }
595 595
596 // Insert regions for the initial page table region. 596 // Insert regions for the initial page table region.
597 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( 597 ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
598 resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt)); 598 resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt));
599 ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( 599 ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
600 resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize, 600 resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize,
601 KMemoryRegionType_VirtualDramKernelInitPt)); 601 KMemoryRegionType_VirtualDramKernelInitPt));
602 602
603 // All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to 603 // All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to
604 // some pool partition. Tag them. 604 // some pool partition. Tag them.
605 for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { 605 for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
606 if (region.GetType() == (KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped)) { 606 if (region.GetType() == (KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped)) {
607 region.SetType(KMemoryRegionType_DramPoolPartition); 607 region.SetType(KMemoryRegionType_DramPoolPartition);
608 } 608 }
609 } 609 }
610 610
611 // Setup all other memory regions needed to arrange the pool partitions. 611 // Setup all other memory regions needed to arrange the pool partitions.
612 Init::SetupPoolPartitionMemoryRegions(memory_layout); 612 Init::SetupPoolPartitionMemoryRegions(*memory_layout);
613 613
614 // Cache all linear regions in their own trees for faster access, later. 614 // Cache all linear regions in their own trees for faster access, later.
615 memory_layout.InitializeLinearMemoryRegionTrees(aligned_linear_phys_start, 615 memory_layout->InitializeLinearMemoryRegionTrees(aligned_linear_phys_start,
616 linear_region_start); 616 linear_region_start);
617 } 617 }
618 618
619 void InitializeMemoryLayout(const KMemoryLayout& memory_layout) { 619 void InitializeMemoryLayout() {
620 const auto system_pool = memory_layout.GetKernelSystemPoolRegionPhysicalExtents(); 620 const auto system_pool = memory_layout->GetKernelSystemPoolRegionPhysicalExtents();
621 const auto applet_pool = memory_layout.GetKernelAppletPoolRegionPhysicalExtents();
622 const auto application_pool = memory_layout.GetKernelApplicationPoolRegionPhysicalExtents();
623 621
624 // Initialize memory managers 622 // Initialize the memory manager.
625 memory_manager = std::make_unique<KMemoryManager>(system); 623 memory_manager = std::make_unique<KMemoryManager>(system);
626 memory_manager->InitializeManager(KMemoryManager::Pool::Application, 624 const auto& management_region = memory_layout->GetPoolManagementRegion();
627 application_pool.GetAddress(), 625 ASSERT(management_region.GetEndAddress() != 0);
628 application_pool.GetEndAddress()); 626 memory_manager->Initialize(management_region.GetAddress(), management_region.GetSize());
629 memory_manager->InitializeManager(KMemoryManager::Pool::Applet, applet_pool.GetAddress(),
630 applet_pool.GetEndAddress());
631 memory_manager->InitializeManager(KMemoryManager::Pool::System, system_pool.GetAddress(),
632 system_pool.GetEndAddress());
633 627
634 // Setup memory regions for emulated processes 628 // Setup memory regions for emulated processes
635 // TODO(bunnei): These should not be hardcoded regions initialized within the kernel 629 // TODO(bunnei): These should not be hardcoded regions initialized within the kernel
@@ -770,6 +764,9 @@ struct KernelCore::Impl {
770 Kernel::KSharedMemory* irs_shared_mem{}; 764 Kernel::KSharedMemory* irs_shared_mem{};
771 Kernel::KSharedMemory* time_shared_mem{}; 765 Kernel::KSharedMemory* time_shared_mem{};
772 766
767 // Memory layout
768 std::unique_ptr<KMemoryLayout> memory_layout;
769
773 // Threads used for services 770 // Threads used for services
774 std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads; 771 std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
775 Common::ThreadWorker service_threads_manager; 772 Common::ThreadWorker service_threads_manager;
@@ -1135,6 +1132,10 @@ const KWorkerTaskManager& KernelCore::WorkerTaskManager() const {
1135 return impl->worker_task_manager; 1132 return impl->worker_task_manager;
1136} 1133}
1137 1134
1135const KMemoryLayout& KernelCore::MemoryLayout() const {
1136 return *impl->memory_layout;
1137}
1138
1138bool KernelCore::IsPhantomModeForSingleCore() const { 1139bool KernelCore::IsPhantomModeForSingleCore() const {
1139 return impl->IsPhantomModeForSingleCore(); 1140 return impl->IsPhantomModeForSingleCore();
1140} 1141}