summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/kernel.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel/kernel.cpp')
-rw-r--r--src/core/hle/kernel/kernel.cpp322
1 files changed, 296 insertions, 26 deletions
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index a1520e147..8fd990577 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -1,4 +1,4 @@
1// Copyright 2014 Citra Emulator Project 1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
@@ -12,6 +12,7 @@
12#include <utility> 12#include <utility>
13 13
14#include "common/assert.h" 14#include "common/assert.h"
15#include "common/common_sizes.h"
15#include "common/logging/log.h" 16#include "common/logging/log.h"
16#include "common/microprofile.h" 17#include "common/microprofile.h"
17#include "common/thread.h" 18#include "common/thread.h"
@@ -268,45 +269,314 @@ struct KernelCore::Impl {
268 return schedulers[thread_id]->GetCurrentThread(); 269 return schedulers[thread_id]->GetCurrentThread();
269 } 270 }
270 271
272 void DeriveInitialMemoryLayout(KMemoryLayout& memory_layout) {
273 // Insert the root region for the virtual memory tree, from which all other regions will
274 // derive.
275 memory_layout.GetVirtualMemoryRegionTree().InsertDirectly(
276 KernelVirtualAddressSpaceBase,
277 KernelVirtualAddressSpaceBase + KernelVirtualAddressSpaceSize - 1);
278
279 // Insert the root region for the physical memory tree, from which all other regions will
280 // derive.
281 memory_layout.GetPhysicalMemoryRegionTree().InsertDirectly(
282 KernelPhysicalAddressSpaceBase,
283 KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1);
284
285 // Save start and end for ease of use.
286 const VAddr code_start_virt_addr = KernelVirtualAddressCodeBase;
287 const VAddr code_end_virt_addr = KernelVirtualAddressCodeEnd;
288
289 // Setup the containing kernel region.
290 constexpr size_t KernelRegionSize = Common::Size_1_GB;
291 constexpr size_t KernelRegionAlign = Common::Size_1_GB;
292 constexpr VAddr kernel_region_start =
293 Common::AlignDown(code_start_virt_addr, KernelRegionAlign);
294 size_t kernel_region_size = KernelRegionSize;
295 if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) {
296 kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start;
297 }
298 ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
299 kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel));
300
301 // Setup the code region.
302 constexpr size_t CodeRegionAlign = PageSize;
303 constexpr VAddr code_region_start =
304 Common::AlignDown(code_start_virt_addr, CodeRegionAlign);
305 constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign);
306 constexpr size_t code_region_size = code_region_end - code_region_start;
307 ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
308 code_region_start, code_region_size, KMemoryRegionType_KernelCode));
309
310 // Setup board-specific device physical regions.
311 Init::SetupDevicePhysicalMemoryRegions(memory_layout);
312
313 // Determine the amount of space needed for the misc region.
314 size_t misc_region_needed_size;
315 {
316 // Each core has a one page stack for all three stack types (Main, Idle, Exception).
317 misc_region_needed_size = Core::Hardware::NUM_CPU_CORES * (3 * (PageSize + PageSize));
318
319 // Account for each auto-map device.
320 for (const auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
321 if (region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) {
322 // Check that the region is valid.
323 ASSERT(region.GetEndAddress() != 0);
324
325 // Account for the region.
326 misc_region_needed_size +=
327 PageSize + (Common::AlignUp(region.GetLastAddress(), PageSize) -
328 Common::AlignDown(region.GetAddress(), PageSize));
329 }
330 }
331
332 // Multiply the needed size by three, to account for the need for guard space.
333 misc_region_needed_size *= 3;
334 }
335
336 // Decide on the actual size for the misc region.
337 constexpr size_t MiscRegionAlign = KernelAslrAlignment;
338 constexpr size_t MiscRegionMinimumSize = Common::Size_32_MB;
339 const size_t misc_region_size = Common::AlignUp(
340 std::max(misc_region_needed_size, MiscRegionMinimumSize), MiscRegionAlign);
341 ASSERT(misc_region_size > 0);
342
343 // Setup the misc region.
344 const VAddr misc_region_start =
345 memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
346 misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel);
347 ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
348 misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc));
349
350 // Setup the stack region.
351 constexpr size_t StackRegionSize = Common::Size_14_MB;
352 constexpr size_t StackRegionAlign = KernelAslrAlignment;
353 const VAddr stack_region_start =
354 memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
355 StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel);
356 ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
357 stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack));
358
359 // Determine the size of the resource region.
360 const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit();
361
362 // Determine the size of the slab region.
363 const size_t slab_region_size = Common::AlignUp(KernelSlabHeapSize, PageSize);
364 ASSERT(slab_region_size <= resource_region_size);
365
366 // Setup the slab region.
367 const PAddr code_start_phys_addr = KernelPhysicalAddressCodeBase;
368 const PAddr code_end_phys_addr = code_start_phys_addr + code_region_size;
369 const PAddr slab_start_phys_addr = code_end_phys_addr;
370 const PAddr slab_end_phys_addr = slab_start_phys_addr + slab_region_size;
371 constexpr size_t SlabRegionAlign = KernelAslrAlignment;
372 const size_t slab_region_needed_size =
373 Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) -
374 Common::AlignDown(code_end_phys_addr, SlabRegionAlign);
375 const VAddr slab_region_start =
376 memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
377 slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) +
378 (code_end_phys_addr % SlabRegionAlign);
379 ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
380 slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab));
381
382 // Setup the temp region.
383 constexpr size_t TempRegionSize = Common::Size_128_MB;
384 constexpr size_t TempRegionAlign = KernelAslrAlignment;
385 const VAddr temp_region_start =
386 memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
387 TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel);
388 ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize,
389 KMemoryRegionType_KernelTemp));
390
391 // Automatically map in devices that have auto-map attributes.
392 for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
393 // We only care about kernel regions.
394 if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) {
395 continue;
396 }
397
398 // Check whether we should map the region.
399 if (!region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) {
400 continue;
401 }
402
403 // If this region has already been mapped, no need to consider it.
404 if (region.HasTypeAttribute(KMemoryRegionAttr_DidKernelMap)) {
405 continue;
406 }
407
408 // Check that the region is valid.
409 ASSERT(region.GetEndAddress() != 0);
410
411 // Set the attribute to note we've mapped this region.
412 region.SetTypeAttribute(KMemoryRegionAttr_DidKernelMap);
413
414 // Create a virtual pair region and insert it into the tree.
415 const PAddr map_phys_addr = Common::AlignDown(region.GetAddress(), PageSize);
416 const size_t map_size =
417 Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr;
418 const VAddr map_virt_addr =
419 memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
420 map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize);
421 ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
422 map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice));
423 region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr);
424 }
425
426 Init::SetupDramPhysicalMemoryRegions(memory_layout);
427
428 // Insert a physical region for the kernel code region.
429 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
430 code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode));
431
432 // Insert a physical region for the kernel slab region.
433 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
434 slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab));
435
436 // Determine size available for kernel page table heaps, requiring > 8 MB.
437 const PAddr resource_end_phys_addr = slab_start_phys_addr + resource_region_size;
438 const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr;
439 ASSERT(page_table_heap_size / Common::Size_4_MB > 2);
440
441 // Insert a physical region for the kernel page table heap region
442 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
443 slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap));
444
445 // All DRAM regions that we haven't tagged by this point will be mapped under the linear
446 // mapping. Tag them.
447 for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
448 if (region.GetType() == KMemoryRegionType_Dram) {
449 // Check that the region is valid.
450 ASSERT(region.GetEndAddress() != 0);
451
452 // Set the linear map attribute.
453 region.SetTypeAttribute(KMemoryRegionAttr_LinearMapped);
454 }
455 }
456
457 // Get the linear region extents.
458 const auto linear_extents =
459 memory_layout.GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
460 KMemoryRegionAttr_LinearMapped);
461 ASSERT(linear_extents.GetEndAddress() != 0);
462
463 // Setup the linear mapping region.
464 constexpr size_t LinearRegionAlign = Common::Size_1_GB;
465 const PAddr aligned_linear_phys_start =
466 Common::AlignDown(linear_extents.GetAddress(), LinearRegionAlign);
467 const size_t linear_region_size =
468 Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) -
469 aligned_linear_phys_start;
470 const VAddr linear_region_start =
471 memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
472 linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign);
473
474 const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start;
475
476 // Map and create regions for all the linearly-mapped data.
477 {
478 PAddr cur_phys_addr = 0;
479 u64 cur_size = 0;
480 for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
481 if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
482 continue;
483 }
484
485 ASSERT(region.GetEndAddress() != 0);
486
487 if (cur_size == 0) {
488 cur_phys_addr = region.GetAddress();
489 cur_size = region.GetSize();
490 } else if (cur_phys_addr + cur_size == region.GetAddress()) {
491 cur_size += region.GetSize();
492 } else {
493 cur_phys_addr = region.GetAddress();
494 cur_size = region.GetSize();
495 }
496
497 const VAddr region_virt_addr =
498 region.GetAddress() + linear_region_phys_to_virt_diff;
499 ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
500 region_virt_addr, region.GetSize(),
501 GetTypeForVirtualLinearMapping(region.GetType())));
502 region.SetPairAddress(region_virt_addr);
503
504 KMemoryRegion* virt_region =
505 memory_layout.GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr);
506 ASSERT(virt_region != nullptr);
507 virt_region->SetPairAddress(region.GetAddress());
508 }
509 }
510
511 // Insert regions for the initial page table region.
512 ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(
513 resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt));
514 ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(
515 resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize,
516 KMemoryRegionType_VirtualDramKernelInitPt));
517
518 // All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to
519 // some pool partition. Tag them.
520 for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) {
521 if (region.GetType() == (KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped)) {
522 region.SetType(KMemoryRegionType_DramPoolPartition);
523 }
524 }
525
526 // Setup all other memory regions needed to arrange the pool partitions.
527 Init::SetupPoolPartitionMemoryRegions(memory_layout);
528
529 // Cache all linear regions in their own trees for faster access, later.
530 memory_layout.InitializeLinearMemoryRegionTrees(aligned_linear_phys_start,
531 linear_region_start);
532 }
533
271 void InitializeMemoryLayout() { 534 void InitializeMemoryLayout() {
272 // Initialize memory layout 535 // Derive the initial memory layout from the emulated board
273 constexpr KMemoryLayout layout{KMemoryLayout::GetDefaultLayout()}; 536 KMemoryLayout memory_layout;
537 DeriveInitialMemoryLayout(memory_layout);
538
539 const auto system_pool = memory_layout.GetKernelSystemPoolRegionPhysicalExtents();
540 const auto applet_pool = memory_layout.GetKernelAppletPoolRegionPhysicalExtents();
541 const auto application_pool = memory_layout.GetKernelApplicationPoolRegionPhysicalExtents();
542
543 // Initialize memory managers
544 memory_manager = std::make_unique<KMemoryManager>();
545 memory_manager->InitializeManager(KMemoryManager::Pool::Application,
546 application_pool.GetAddress(),
547 application_pool.GetEndAddress());
548 memory_manager->InitializeManager(KMemoryManager::Pool::Applet, applet_pool.GetAddress(),
549 applet_pool.GetEndAddress());
550 memory_manager->InitializeManager(KMemoryManager::Pool::System, system_pool.GetAddress(),
551 system_pool.GetEndAddress());
552
553 // Setup memory regions for emulated processes
554 // TODO(bunnei): These should not be hardcoded regions initialized within the kernel
274 constexpr std::size_t hid_size{0x40000}; 555 constexpr std::size_t hid_size{0x40000};
275 constexpr std::size_t font_size{0x1100000}; 556 constexpr std::size_t font_size{0x1100000};
276 constexpr std::size_t irs_size{0x8000}; 557 constexpr std::size_t irs_size{0x8000};
277 constexpr std::size_t time_size{0x1000}; 558 constexpr std::size_t time_size{0x1000};
278 constexpr PAddr hid_addr{layout.System().StartAddress()};
279 constexpr PAddr font_pa{layout.System().StartAddress() + hid_size};
280 constexpr PAddr irs_addr{layout.System().StartAddress() + hid_size + font_size};
281 constexpr PAddr time_addr{layout.System().StartAddress() + hid_size + font_size + irs_size};
282 559
283 // Initialize memory manager 560 const PAddr hid_phys_addr{system_pool.GetAddress()};
284 memory_manager = std::make_unique<KMemoryManager>(); 561 const PAddr font_phys_addr{system_pool.GetAddress() + hid_size};
285 memory_manager->InitializeManager(KMemoryManager::Pool::Application, 562 const PAddr irs_phys_addr{system_pool.GetAddress() + hid_size + font_size};
286 layout.Application().StartAddress(), 563 const PAddr time_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size};
287 layout.Application().EndAddress());
288 memory_manager->InitializeManager(KMemoryManager::Pool::Applet,
289 layout.Applet().StartAddress(),
290 layout.Applet().EndAddress());
291 memory_manager->InitializeManager(KMemoryManager::Pool::System,
292 layout.System().StartAddress(),
293 layout.System().EndAddress());
294 564
295 hid_shared_mem = Kernel::KSharedMemory::Create( 565 hid_shared_mem = Kernel::KSharedMemory::Create(
296 system.Kernel(), system.DeviceMemory(), nullptr, {hid_addr, hid_size / PageSize}, 566 system.Kernel(), system.DeviceMemory(), nullptr, {hid_phys_addr, hid_size / PageSize},
297 KMemoryPermission::None, KMemoryPermission::Read, hid_addr, hid_size, 567 KMemoryPermission::None, KMemoryPermission::Read, hid_phys_addr, hid_size,
298 "HID:SharedMemory"); 568 "HID:SharedMemory");
299 font_shared_mem = Kernel::KSharedMemory::Create( 569 font_shared_mem = Kernel::KSharedMemory::Create(
300 system.Kernel(), system.DeviceMemory(), nullptr, {font_pa, font_size / PageSize}, 570 system.Kernel(), system.DeviceMemory(), nullptr, {font_phys_addr, font_size / PageSize},
301 KMemoryPermission::None, KMemoryPermission::Read, font_pa, font_size, 571 KMemoryPermission::None, KMemoryPermission::Read, font_phys_addr, font_size,
302 "Font:SharedMemory"); 572 "Font:SharedMemory");
303 irs_shared_mem = Kernel::KSharedMemory::Create( 573 irs_shared_mem = Kernel::KSharedMemory::Create(
304 system.Kernel(), system.DeviceMemory(), nullptr, {irs_addr, irs_size / PageSize}, 574 system.Kernel(), system.DeviceMemory(), nullptr, {irs_phys_addr, irs_size / PageSize},
305 KMemoryPermission::None, KMemoryPermission::Read, irs_addr, irs_size, 575 KMemoryPermission::None, KMemoryPermission::Read, irs_phys_addr, irs_size,
306 "IRS:SharedMemory"); 576 "IRS:SharedMemory");
307 time_shared_mem = Kernel::KSharedMemory::Create( 577 time_shared_mem = Kernel::KSharedMemory::Create(
308 system.Kernel(), system.DeviceMemory(), nullptr, {time_addr, time_size / PageSize}, 578 system.Kernel(), system.DeviceMemory(), nullptr, {time_phys_addr, time_size / PageSize},
309 KMemoryPermission::None, KMemoryPermission::Read, time_addr, time_size, 579 KMemoryPermission::None, KMemoryPermission::Read, time_phys_addr, time_size,
310 "Time:SharedMemory"); 580 "Time:SharedMemory");
311 581
312 // Allocate slab heaps 582 // Allocate slab heaps