diff options
| author | 2021-04-10 00:02:52 -0700 | |
|---|---|---|
| committer | 2021-04-10 00:02:52 -0700 | |
| commit | b04877dd952d7da11647f16626952d7325e4e900 (patch) | |
| tree | d097ebea3b24a2c815015c17e6c5fd89c69ce045 | |
| parent | Merge pull request #6171 from german77/services (diff) | |
| parent | hle: kernel: Breakup InitializeMemoryLayout. (diff) | |
| download | yuzu-b04877dd952d7da11647f16626952d7325e4e900.tar.gz yuzu-b04877dd952d7da11647f16626952d7325e4e900.tar.xz yuzu-b04877dd952d7da11647f16626952d7325e4e900.zip | |
Merge pull request #6099 from bunnei/derive-mem
Kernel Rework: Derive memory regions from board layout.
26 files changed, 2139 insertions, 173 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 66931ac97..9f8dafa3b 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt | |||
| @@ -110,6 +110,7 @@ add_library(common STATIC | |||
| 110 | cityhash.h | 110 | cityhash.h |
| 111 | common_funcs.h | 111 | common_funcs.h |
| 112 | common_paths.h | 112 | common_paths.h |
| 113 | common_sizes.h | ||
| 113 | common_types.h | 114 | common_types.h |
| 114 | concepts.h | 115 | concepts.h |
| 115 | div_ceil.h | 116 | div_ceil.h |
diff --git a/src/common/common_sizes.h b/src/common/common_sizes.h new file mode 100644 index 000000000..7e9fd968b --- /dev/null +++ b/src/common/common_sizes.h | |||
| @@ -0,0 +1,43 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <limits> | ||
| 8 | |||
| 9 | #include "common/common_types.h" | ||
| 10 | |||
| 11 | namespace Common { | ||
| 12 | |||
| 13 | enum : u64 { | ||
| 14 | Size_1_KB = 0x400ULL, | ||
| 15 | Size_64_KB = 64ULL * Size_1_KB, | ||
| 16 | Size_128_KB = 128ULL * Size_1_KB, | ||
| 17 | Size_1_MB = 0x100000ULL, | ||
| 18 | Size_2_MB = 2ULL * Size_1_MB, | ||
| 19 | Size_4_MB = 4ULL * Size_1_MB, | ||
| 20 | Size_5_MB = 5ULL * Size_1_MB, | ||
| 21 | Size_14_MB = 14ULL * Size_1_MB, | ||
| 22 | Size_32_MB = 32ULL * Size_1_MB, | ||
| 23 | Size_33_MB = 33ULL * Size_1_MB, | ||
| 24 | Size_128_MB = 128ULL * Size_1_MB, | ||
| 25 | Size_448_MB = 448ULL * Size_1_MB, | ||
| 26 | Size_507_MB = 507ULL * Size_1_MB, | ||
| 27 | Size_562_MB = 562ULL * Size_1_MB, | ||
| 28 | Size_1554_MB = 1554ULL * Size_1_MB, | ||
| 29 | Size_2048_MB = 2048ULL * Size_1_MB, | ||
| 30 | Size_2193_MB = 2193ULL * Size_1_MB, | ||
| 31 | Size_3285_MB = 3285ULL * Size_1_MB, | ||
| 32 | Size_4916_MB = 4916ULL * Size_1_MB, | ||
| 33 | Size_1_GB = 0x40000000ULL, | ||
| 34 | Size_2_GB = 2ULL * Size_1_GB, | ||
| 35 | Size_4_GB = 4ULL * Size_1_GB, | ||
| 36 | Size_6_GB = 6ULL * Size_1_GB, | ||
| 37 | Size_8_GB = 8ULL * Size_1_GB, | ||
| 38 | Size_64_GB = 64ULL * Size_1_GB, | ||
| 39 | Size_512_GB = 512ULL * Size_1_GB, | ||
| 40 | Size_Invalid = std::numeric_limits<u64>::max(), | ||
| 41 | }; | ||
| 42 | |||
| 43 | } // namespace Common | ||
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 17f251c37..0c1f5b0c8 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -141,6 +141,9 @@ add_library(core STATIC | |||
| 141 | hardware_interrupt_manager.h | 141 | hardware_interrupt_manager.h |
| 142 | hle/ipc.h | 142 | hle/ipc.h |
| 143 | hle/ipc_helpers.h | 143 | hle/ipc_helpers.h |
| 144 | hle/kernel/board/nintendo/nx/k_system_control.cpp | ||
| 145 | hle/kernel/board/nintendo/nx/k_system_control.h | ||
| 146 | hle/kernel/board/nintendo/nx/secure_monitor.h | ||
| 144 | hle/kernel/client_port.cpp | 147 | hle/kernel/client_port.cpp |
| 145 | hle/kernel/client_port.h | 148 | hle/kernel/client_port.h |
| 146 | hle/kernel/client_session.cpp | 149 | hle/kernel/client_session.cpp |
| @@ -169,9 +172,13 @@ add_library(core STATIC | |||
| 169 | hle/kernel/k_memory_block.h | 172 | hle/kernel/k_memory_block.h |
| 170 | hle/kernel/k_memory_block_manager.cpp | 173 | hle/kernel/k_memory_block_manager.cpp |
| 171 | hle/kernel/k_memory_block_manager.h | 174 | hle/kernel/k_memory_block_manager.h |
| 175 | hle/kernel/k_memory_layout.cpp | ||
| 176 | hle/kernel/k_memory_layout.board.nintendo_nx.cpp | ||
| 172 | hle/kernel/k_memory_layout.h | 177 | hle/kernel/k_memory_layout.h |
| 173 | hle/kernel/k_memory_manager.cpp | 178 | hle/kernel/k_memory_manager.cpp |
| 174 | hle/kernel/k_memory_manager.h | 179 | hle/kernel/k_memory_manager.h |
| 180 | hle/kernel/k_memory_region.h | ||
| 181 | hle/kernel/k_memory_region_type.h | ||
| 175 | hle/kernel/k_page_bitmap.h | 182 | hle/kernel/k_page_bitmap.h |
| 176 | hle/kernel/k_page_heap.cpp | 183 | hle/kernel/k_page_heap.cpp |
| 177 | hle/kernel/k_page_heap.h | 184 | hle/kernel/k_page_heap.h |
| @@ -196,11 +203,11 @@ add_library(core STATIC | |||
| 196 | hle/kernel/k_spin_lock.h | 203 | hle/kernel/k_spin_lock.h |
| 197 | hle/kernel/k_synchronization_object.cpp | 204 | hle/kernel/k_synchronization_object.cpp |
| 198 | hle/kernel/k_synchronization_object.h | 205 | hle/kernel/k_synchronization_object.h |
| 199 | hle/kernel/k_system_control.cpp | ||
| 200 | hle/kernel/k_system_control.h | 206 | hle/kernel/k_system_control.h |
| 201 | hle/kernel/k_thread.cpp | 207 | hle/kernel/k_thread.cpp |
| 202 | hle/kernel/k_thread.h | 208 | hle/kernel/k_thread.h |
| 203 | hle/kernel/k_thread_queue.h | 209 | hle/kernel/k_thread_queue.h |
| 210 | hle/kernel/k_trace.h | ||
| 204 | hle/kernel/k_writable_event.cpp | 211 | hle/kernel/k_writable_event.cpp |
| 205 | hle/kernel/k_writable_event.h | 212 | hle/kernel/k_writable_event.h |
| 206 | hle/kernel/kernel.cpp | 213 | hle/kernel/kernel.cpp |
diff --git a/src/core/hle/kernel/arch/arm64/k_memory_region_device_types.inc b/src/core/hle/kernel/arch/arm64/k_memory_region_device_types.inc new file mode 100644 index 000000000..857b512ba --- /dev/null +++ b/src/core/hle/kernel/arch/arm64/k_memory_region_device_types.inc | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | // All architectures must define NumArchitectureDeviceRegions. | ||
| 6 | constexpr inline const auto NumArchitectureDeviceRegions = 3; | ||
| 7 | |||
| 8 | constexpr inline const auto KMemoryRegionType_Uart = | ||
| 9 | KMemoryRegionType_ArchDeviceBase.DeriveSparse(0, NumArchitectureDeviceRegions, 0); | ||
| 10 | constexpr inline const auto KMemoryRegionType_InterruptCpuInterface = | ||
| 11 | KMemoryRegionType_ArchDeviceBase.DeriveSparse(0, NumArchitectureDeviceRegions, 1) | ||
| 12 | .SetAttribute(KMemoryRegionAttr_NoUserMap); | ||
| 13 | constexpr inline const auto KMemoryRegionType_InterruptDistributor = | ||
| 14 | KMemoryRegionType_ArchDeviceBase.DeriveSparse(0, NumArchitectureDeviceRegions, 2) | ||
| 15 | .SetAttribute(KMemoryRegionAttr_NoUserMap); | ||
| 16 | static_assert(KMemoryRegionType_Uart.GetValue() == (0x1D)); | ||
| 17 | static_assert(KMemoryRegionType_InterruptCpuInterface.GetValue() == | ||
| 18 | (0x2D | KMemoryRegionAttr_NoUserMap)); | ||
| 19 | static_assert(KMemoryRegionType_InterruptDistributor.GetValue() == | ||
| 20 | (0x4D | KMemoryRegionAttr_NoUserMap)); | ||
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc b/src/core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc new file mode 100644 index 000000000..58d6c0b16 --- /dev/null +++ b/src/core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc | |||
| @@ -0,0 +1,52 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | // All architectures must define NumBoardDeviceRegions. | ||
| 6 | constexpr inline const auto NumBoardDeviceRegions = 6; | ||
| 7 | // UNUSED: .Derive(NumBoardDeviceRegions, 0); | ||
| 8 | constexpr inline const auto KMemoryRegionType_MemoryController = | ||
| 9 | KMemoryRegionType_BoardDeviceBase.Derive(NumBoardDeviceRegions, 1) | ||
| 10 | .SetAttribute(KMemoryRegionAttr_NoUserMap); | ||
| 11 | constexpr inline const auto KMemoryRegionType_MemoryController1 = | ||
| 12 | KMemoryRegionType_BoardDeviceBase.Derive(NumBoardDeviceRegions, 2) | ||
| 13 | .SetAttribute(KMemoryRegionAttr_NoUserMap); | ||
| 14 | constexpr inline const auto KMemoryRegionType_MemoryController0 = | ||
| 15 | KMemoryRegionType_BoardDeviceBase.Derive(NumBoardDeviceRegions, 3) | ||
| 16 | .SetAttribute(KMemoryRegionAttr_NoUserMap); | ||
| 17 | constexpr inline const auto KMemoryRegionType_PowerManagementController = | ||
| 18 | KMemoryRegionType_BoardDeviceBase.Derive(NumBoardDeviceRegions, 4).DeriveTransition(); | ||
| 19 | constexpr inline const auto KMemoryRegionType_LegacyLpsDevices = | ||
| 20 | KMemoryRegionType_BoardDeviceBase.Derive(NumBoardDeviceRegions, 5); | ||
| 21 | static_assert(KMemoryRegionType_MemoryController.GetValue() == | ||
| 22 | (0x55 | KMemoryRegionAttr_NoUserMap)); | ||
| 23 | static_assert(KMemoryRegionType_MemoryController1.GetValue() == | ||
| 24 | (0x65 | KMemoryRegionAttr_NoUserMap)); | ||
| 25 | static_assert(KMemoryRegionType_MemoryController0.GetValue() == | ||
| 26 | (0x95 | KMemoryRegionAttr_NoUserMap)); | ||
| 27 | static_assert(KMemoryRegionType_PowerManagementController.GetValue() == (0x1A5)); | ||
| 28 | |||
| 29 | static_assert(KMemoryRegionType_LegacyLpsDevices.GetValue() == 0xC5); | ||
| 30 | |||
| 31 | constexpr inline const auto NumLegacyLpsDevices = 7; | ||
| 32 | constexpr inline const auto KMemoryRegionType_LegacyLpsExceptionVectors = | ||
| 33 | KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 0); | ||
| 34 | constexpr inline const auto KMemoryRegionType_LegacyLpsIram = | ||
| 35 | KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 1); | ||
| 36 | constexpr inline const auto KMemoryRegionType_LegacyLpsFlowController = | ||
| 37 | KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 2); | ||
| 38 | constexpr inline const auto KMemoryRegionType_LegacyLpsPrimaryICtlr = | ||
| 39 | KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 3); | ||
| 40 | constexpr inline const auto KMemoryRegionType_LegacyLpsSemaphore = | ||
| 41 | KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 4); | ||
| 42 | constexpr inline const auto KMemoryRegionType_LegacyLpsAtomics = | ||
| 43 | KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 5); | ||
| 44 | constexpr inline const auto KMemoryRegionType_LegacyLpsClkRst = | ||
| 45 | KMemoryRegionType_LegacyLpsDevices.Derive(NumLegacyLpsDevices, 6); | ||
| 46 | static_assert(KMemoryRegionType_LegacyLpsExceptionVectors.GetValue() == 0x3C5); | ||
| 47 | static_assert(KMemoryRegionType_LegacyLpsIram.GetValue() == 0x5C5); | ||
| 48 | static_assert(KMemoryRegionType_LegacyLpsFlowController.GetValue() == 0x6C5); | ||
| 49 | static_assert(KMemoryRegionType_LegacyLpsPrimaryICtlr.GetValue() == 0x9C5); | ||
| 50 | static_assert(KMemoryRegionType_LegacyLpsSemaphore.GetValue() == 0xAC5); | ||
| 51 | static_assert(KMemoryRegionType_LegacyLpsAtomics.GetValue() == 0xCC5); | ||
| 52 | static_assert(KMemoryRegionType_LegacyLpsClkRst.GetValue() == 0x11C5); | ||
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp new file mode 100644 index 000000000..86472b5ce --- /dev/null +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.cpp | |||
| @@ -0,0 +1,164 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <random> | ||
| 6 | |||
| 7 | #include "common/common_sizes.h" | ||
| 8 | #include "core/hle/kernel/board/nintendo/nx/k_system_control.h" | ||
| 9 | #include "core/hle/kernel/board/nintendo/nx/secure_monitor.h" | ||
| 10 | #include "core/hle/kernel/k_trace.h" | ||
| 11 | |||
| 12 | namespace Kernel::Board::Nintendo::Nx { | ||
| 13 | |||
| 14 | namespace impl { | ||
| 15 | |||
| 16 | constexpr const std::size_t RequiredNonSecureSystemMemorySizeVi = 0x2238 * 4 * 1024; | ||
| 17 | constexpr const std::size_t RequiredNonSecureSystemMemorySizeNvservices = 0x710 * 4 * 1024; | ||
| 18 | constexpr const std::size_t RequiredNonSecureSystemMemorySizeMisc = 0x80 * 4 * 1024; | ||
| 19 | |||
| 20 | } // namespace impl | ||
| 21 | |||
| 22 | constexpr const std::size_t RequiredNonSecureSystemMemorySize = | ||
| 23 | impl::RequiredNonSecureSystemMemorySizeVi + impl::RequiredNonSecureSystemMemorySizeNvservices + | ||
| 24 | impl::RequiredNonSecureSystemMemorySizeMisc; | ||
| 25 | |||
| 26 | namespace { | ||
| 27 | |||
| 28 | u32 GetMemoryModeForInit() { | ||
| 29 | return 0x01; | ||
| 30 | } | ||
| 31 | |||
| 32 | u32 GetMemorySizeForInit() { | ||
| 33 | return 0; | ||
| 34 | } | ||
| 35 | |||
| 36 | Smc::MemoryArrangement GetMemoryArrangeForInit() { | ||
| 37 | switch (GetMemoryModeForInit() & 0x3F) { | ||
| 38 | case 0x01: | ||
| 39 | default: | ||
| 40 | return Smc::MemoryArrangement_4GB; | ||
| 41 | case 0x02: | ||
| 42 | return Smc::MemoryArrangement_4GBForAppletDev; | ||
| 43 | case 0x03: | ||
| 44 | return Smc::MemoryArrangement_4GBForSystemDev; | ||
| 45 | case 0x11: | ||
| 46 | return Smc::MemoryArrangement_6GB; | ||
| 47 | case 0x12: | ||
| 48 | return Smc::MemoryArrangement_6GBForAppletDev; | ||
| 49 | case 0x21: | ||
| 50 | return Smc::MemoryArrangement_8GB; | ||
| 51 | } | ||
| 52 | } | ||
| 53 | } // namespace | ||
| 54 | |||
| 55 | // Initialization. | ||
| 56 | size_t KSystemControl::Init::GetIntendedMemorySize() { | ||
| 57 | switch (GetMemorySizeForInit()) { | ||
| 58 | case Smc::MemorySize_4GB: | ||
| 59 | default: // All invalid modes should go to 4GB. | ||
| 60 | return Common::Size_4_GB; | ||
| 61 | case Smc::MemorySize_6GB: | ||
| 62 | return Common::Size_6_GB; | ||
| 63 | case Smc::MemorySize_8GB: | ||
| 64 | return Common::Size_8_GB; | ||
| 65 | } | ||
| 66 | } | ||
| 67 | |||
| 68 | PAddr KSystemControl::Init::GetKernelPhysicalBaseAddress(u64 base_address) { | ||
| 69 | return base_address; | ||
| 70 | } | ||
| 71 | |||
| 72 | bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() { | ||
| 73 | return true; | ||
| 74 | } | ||
| 75 | |||
| 76 | std::size_t KSystemControl::Init::GetApplicationPoolSize() { | ||
| 77 | // Get the base pool size. | ||
| 78 | const size_t base_pool_size = []() -> size_t { | ||
| 79 | switch (GetMemoryArrangeForInit()) { | ||
| 80 | case Smc::MemoryArrangement_4GB: | ||
| 81 | default: | ||
| 82 | return Common::Size_3285_MB; | ||
| 83 | case Smc::MemoryArrangement_4GBForAppletDev: | ||
| 84 | return Common::Size_2048_MB; | ||
| 85 | case Smc::MemoryArrangement_4GBForSystemDev: | ||
| 86 | return Common::Size_3285_MB; | ||
| 87 | case Smc::MemoryArrangement_6GB: | ||
| 88 | return Common::Size_4916_MB; | ||
| 89 | case Smc::MemoryArrangement_6GBForAppletDev: | ||
| 90 | return Common::Size_3285_MB; | ||
| 91 | case Smc::MemoryArrangement_8GB: | ||
| 92 | return Common::Size_4916_MB; | ||
| 93 | } | ||
| 94 | }(); | ||
| 95 | |||
| 96 | // Return (possibly) adjusted size. | ||
| 97 | return base_pool_size; | ||
| 98 | } | ||
| 99 | |||
| 100 | size_t KSystemControl::Init::GetAppletPoolSize() { | ||
| 101 | // Get the base pool size. | ||
| 102 | const size_t base_pool_size = []() -> size_t { | ||
| 103 | switch (GetMemoryArrangeForInit()) { | ||
| 104 | case Smc::MemoryArrangement_4GB: | ||
| 105 | default: | ||
| 106 | return Common::Size_507_MB; | ||
| 107 | case Smc::MemoryArrangement_4GBForAppletDev: | ||
| 108 | return Common::Size_1554_MB; | ||
| 109 | case Smc::MemoryArrangement_4GBForSystemDev: | ||
| 110 | return Common::Size_448_MB; | ||
| 111 | case Smc::MemoryArrangement_6GB: | ||
| 112 | return Common::Size_562_MB; | ||
| 113 | case Smc::MemoryArrangement_6GBForAppletDev: | ||
| 114 | return Common::Size_2193_MB; | ||
| 115 | case Smc::MemoryArrangement_8GB: | ||
| 116 | return Common::Size_2193_MB; | ||
| 117 | } | ||
| 118 | }(); | ||
| 119 | |||
| 120 | // Return (possibly) adjusted size. | ||
| 121 | constexpr size_t ExtraSystemMemoryForAtmosphere = Common::Size_33_MB; | ||
| 122 | return base_pool_size - ExtraSystemMemoryForAtmosphere - KTraceBufferSize; | ||
| 123 | } | ||
| 124 | |||
| 125 | size_t KSystemControl::Init::GetMinimumNonSecureSystemPoolSize() { | ||
| 126 | // Verify that our minimum is at least as large as Nintendo's. | ||
| 127 | constexpr size_t MinimumSize = RequiredNonSecureSystemMemorySize; | ||
| 128 | static_assert(MinimumSize >= 0x29C8000); | ||
| 129 | |||
| 130 | return MinimumSize; | ||
| 131 | } | ||
| 132 | |||
| 133 | namespace { | ||
| 134 | template <typename F> | ||
| 135 | u64 GenerateUniformRange(u64 min, u64 max, F f) { | ||
| 136 | // Handle the case where the difference is too large to represent. | ||
| 137 | if (max == std::numeric_limits<u64>::max() && min == std::numeric_limits<u64>::min()) { | ||
| 138 | return f(); | ||
| 139 | } | ||
| 140 | |||
| 141 | // Iterate until we get a value in range. | ||
| 142 | const u64 range_size = ((max + 1) - min); | ||
| 143 | const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size; | ||
| 144 | while (true) { | ||
| 145 | if (const u64 rnd = f(); rnd < effective_max) { | ||
| 146 | return min + (rnd % range_size); | ||
| 147 | } | ||
| 148 | } | ||
| 149 | } | ||
| 150 | |||
| 151 | } // Anonymous namespace | ||
| 152 | |||
| 153 | u64 KSystemControl::GenerateRandomU64() { | ||
| 154 | static std::random_device device; | ||
| 155 | static std::mt19937 gen(device()); | ||
| 156 | static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max()); | ||
| 157 | return distribution(gen); | ||
| 158 | } | ||
| 159 | |||
| 160 | u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) { | ||
| 161 | return GenerateUniformRange(min, max, GenerateRandomU64); | ||
| 162 | } | ||
| 163 | |||
| 164 | } // namespace Kernel::Board::Nintendo::Nx | ||
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h new file mode 100644 index 000000000..52f230ced --- /dev/null +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include "common/common_types.h" | ||
| 8 | |||
| 9 | namespace Kernel::Board::Nintendo::Nx { | ||
| 10 | |||
| 11 | class KSystemControl { | ||
| 12 | public: | ||
| 13 | class Init { | ||
| 14 | public: | ||
| 15 | // Initialization. | ||
| 16 | static std::size_t GetIntendedMemorySize(); | ||
| 17 | static PAddr GetKernelPhysicalBaseAddress(u64 base_address); | ||
| 18 | static bool ShouldIncreaseThreadResourceLimit(); | ||
| 19 | static std::size_t GetApplicationPoolSize(); | ||
| 20 | static std::size_t GetAppletPoolSize(); | ||
| 21 | static std::size_t GetMinimumNonSecureSystemPoolSize(); | ||
| 22 | }; | ||
| 23 | |||
| 24 | static u64 GenerateRandomRange(u64 min, u64 max); | ||
| 25 | static u64 GenerateRandomU64(); | ||
| 26 | }; | ||
| 27 | |||
| 28 | } // namespace Kernel::Board::Nintendo::Nx | ||
diff --git a/src/core/hle/kernel/board/nintendo/nx/secure_monitor.h b/src/core/hle/kernel/board/nintendo/nx/secure_monitor.h new file mode 100644 index 000000000..0c366b252 --- /dev/null +++ b/src/core/hle/kernel/board/nintendo/nx/secure_monitor.h | |||
| @@ -0,0 +1,26 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include "common/common_types.h" | ||
| 8 | |||
| 9 | namespace Kernel::Board::Nintendo::Nx::Smc { | ||
| 10 | |||
| 11 | enum MemorySize { | ||
| 12 | MemorySize_4GB = 0, | ||
| 13 | MemorySize_6GB = 1, | ||
| 14 | MemorySize_8GB = 2, | ||
| 15 | }; | ||
| 16 | |||
| 17 | enum MemoryArrangement { | ||
| 18 | MemoryArrangement_4GB = 0, | ||
| 19 | MemoryArrangement_4GBForAppletDev = 1, | ||
| 20 | MemoryArrangement_4GBForSystemDev = 2, | ||
| 21 | MemoryArrangement_6GB = 3, | ||
| 22 | MemoryArrangement_6GBForAppletDev = 4, | ||
| 23 | MemoryArrangement_8GB = 5, | ||
| 24 | }; | ||
| 25 | |||
| 26 | } // namespace Kernel::Board::Nintendo::Nx::Smc | ||
diff --git a/src/core/hle/kernel/k_address_space_info.cpp b/src/core/hle/kernel/k_address_space_info.cpp index 24944d15b..c7549f7a2 100644 --- a/src/core/hle/kernel/k_address_space_info.cpp +++ b/src/core/hle/kernel/k_address_space_info.cpp | |||
| @@ -5,45 +5,34 @@ | |||
| 5 | #include <array> | 5 | #include <array> |
| 6 | 6 | ||
| 7 | #include "common/assert.h" | 7 | #include "common/assert.h" |
| 8 | #include "common/common_sizes.h" | ||
| 8 | #include "core/hle/kernel/k_address_space_info.h" | 9 | #include "core/hle/kernel/k_address_space_info.h" |
| 9 | 10 | ||
| 10 | namespace Kernel { | 11 | namespace Kernel { |
| 11 | 12 | ||
| 12 | namespace { | 13 | namespace { |
| 13 | 14 | ||
| 14 | enum : u64 { | ||
| 15 | Size_1_MB = 0x100000, | ||
| 16 | Size_2_MB = 2 * Size_1_MB, | ||
| 17 | Size_128_MB = 128 * Size_1_MB, | ||
| 18 | Size_1_GB = 0x40000000, | ||
| 19 | Size_2_GB = 2 * Size_1_GB, | ||
| 20 | Size_4_GB = 4 * Size_1_GB, | ||
| 21 | Size_6_GB = 6 * Size_1_GB, | ||
| 22 | Size_64_GB = 64 * Size_1_GB, | ||
| 23 | Size_512_GB = 512 * Size_1_GB, | ||
| 24 | Invalid = std::numeric_limits<u64>::max(), | ||
| 25 | }; | ||
| 26 | |||
| 27 | // clang-format off | 15 | // clang-format off |
| 28 | constexpr std::array<KAddressSpaceInfo, 13> AddressSpaceInfos{{ | 16 | constexpr std::array<KAddressSpaceInfo, 13> AddressSpaceInfos{{ |
| 29 | { .bit_width = 32, .address = Size_2_MB , .size = Size_1_GB - Size_2_MB , .type = KAddressSpaceInfo::Type::MapSmall, }, | 17 | { .bit_width = 32, .address = Common::Size_2_MB , .size = Common::Size_1_GB - Common::Size_2_MB , .type = KAddressSpaceInfo::Type::MapSmall, }, |
| 30 | { .bit_width = 32, .address = Size_1_GB , .size = Size_4_GB - Size_1_GB , .type = KAddressSpaceInfo::Type::MapLarge, }, | 18 | { .bit_width = 32, .address = Common::Size_1_GB , .size = Common::Size_4_GB - Common::Size_1_GB , .type = KAddressSpaceInfo::Type::MapLarge, }, |
| 31 | { .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = KAddressSpaceInfo::Type::Heap, }, | 19 | { .bit_width = 32, .address = Common::Size_Invalid, .size = Common::Size_1_GB , .type = KAddressSpaceInfo::Type::Alias, }, |
| 32 | { .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = KAddressSpaceInfo::Type::Alias, }, | 20 | { .bit_width = 32, .address = Common::Size_Invalid, .size = Common::Size_1_GB , .type = KAddressSpaceInfo::Type::Heap, }, |
| 33 | { .bit_width = 36, .address = Size_128_MB, .size = Size_2_GB - Size_128_MB, .type = KAddressSpaceInfo::Type::MapSmall, }, | 21 | { .bit_width = 36, .address = Common::Size_128_MB , .size = Common::Size_2_GB - Common::Size_128_MB, .type = KAddressSpaceInfo::Type::MapSmall, }, |
| 34 | { .bit_width = 36, .address = Size_2_GB , .size = Size_64_GB - Size_2_GB , .type = KAddressSpaceInfo::Type::MapLarge, }, | 22 | { .bit_width = 36, .address = Common::Size_2_GB , .size = Common::Size_64_GB - Common::Size_2_GB , .type = KAddressSpaceInfo::Type::MapLarge, }, |
| 35 | { .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, }, | 23 | { .bit_width = 36, .address = Common::Size_Invalid, .size = Common::Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, }, |
| 36 | { .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Alias, }, | 24 | { .bit_width = 36, .address = Common::Size_Invalid, .size = Common::Size_6_GB , .type = KAddressSpaceInfo::Type::Alias, }, |
| 37 | { .bit_width = 39, .address = Size_128_MB, .size = Size_512_GB - Size_128_MB, .type = KAddressSpaceInfo::Type::Map39Bit, }, | 25 | { .bit_width = 39, .address = Common::Size_128_MB , .size = Common::Size_512_GB - Common::Size_128_MB, .type = KAddressSpaceInfo::Type::Map39Bit, }, |
| 38 | { .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = KAddressSpaceInfo::Type::MapSmall }, | 26 | { .bit_width = 39, .address = Common::Size_Invalid, .size = Common::Size_64_GB , .type = KAddressSpaceInfo::Type::MapSmall }, |
| 39 | { .bit_width = 39, .address = Invalid , .size = Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, }, | 27 | { .bit_width = 39, .address = Common::Size_Invalid, .size = Common::Size_6_GB , .type = KAddressSpaceInfo::Type::Heap, }, |
| 40 | { .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = KAddressSpaceInfo::Type::Alias, }, | 28 | { .bit_width = 39, .address = Common::Size_Invalid, .size = Common::Size_64_GB , .type = KAddressSpaceInfo::Type::Alias, }, |
| 41 | { .bit_width = 39, .address = Invalid , .size = Size_2_GB , .type = KAddressSpaceInfo::Type::Stack, }, | 29 | { .bit_width = 39, .address = Common::Size_Invalid, .size = Common::Size_2_GB , .type = KAddressSpaceInfo::Type::Stack, }, |
| 42 | }}; | 30 | }}; |
| 43 | // clang-format on | 31 | // clang-format on |
| 44 | 32 | ||
| 45 | constexpr bool IsAllowedIndexForAddress(std::size_t index) { | 33 | constexpr bool IsAllowedIndexForAddress(std::size_t index) { |
| 46 | return index < AddressSpaceInfos.size() && AddressSpaceInfos[index].address != Invalid; | 34 | return index < AddressSpaceInfos.size() && |
| 35 | AddressSpaceInfos[index].address != Common::Size_Invalid; | ||
| 47 | } | 36 | } |
| 48 | 37 | ||
| 49 | using IndexArray = | 38 | using IndexArray = |
diff --git a/src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp b/src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp new file mode 100644 index 000000000..a78551291 --- /dev/null +++ b/src/core/hle/kernel/k_memory_layout.board.nintendo_nx.cpp | |||
| @@ -0,0 +1,199 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "common/alignment.h" | ||
| 6 | #include "core/hle/kernel/k_memory_layout.h" | ||
| 7 | #include "core/hle/kernel/k_memory_manager.h" | ||
| 8 | #include "core/hle/kernel/k_system_control.h" | ||
| 9 | #include "core/hle/kernel/k_trace.h" | ||
| 10 | |||
| 11 | namespace Kernel { | ||
| 12 | |||
| 13 | namespace { | ||
| 14 | |||
| 15 | constexpr size_t CarveoutAlignment = 0x20000; | ||
| 16 | constexpr size_t CarveoutSizeMax = (512ULL * 1024 * 1024) - CarveoutAlignment; | ||
| 17 | |||
| 18 | bool SetupPowerManagementControllerMemoryRegion(KMemoryLayout& memory_layout) { | ||
| 19 | // Above firmware 2.0.0, the PMC is not mappable. | ||
| 20 | return memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 21 | 0x7000E000, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap) && | ||
| 22 | memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 23 | 0x7000E400, 0xC00, | ||
| 24 | KMemoryRegionType_PowerManagementController | KMemoryRegionAttr_NoUserMap); | ||
| 25 | } | ||
| 26 | |||
| 27 | void InsertPoolPartitionRegionIntoBothTrees(KMemoryLayout& memory_layout, size_t start, size_t size, | ||
| 28 | KMemoryRegionType phys_type, | ||
| 29 | KMemoryRegionType virt_type, u32& cur_attr) { | ||
| 30 | const u32 attr = cur_attr++; | ||
| 31 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert(start, size, | ||
| 32 | static_cast<u32>(phys_type), attr)); | ||
| 33 | const KMemoryRegion* phys = memory_layout.GetPhysicalMemoryRegionTree().FindByTypeAndAttribute( | ||
| 34 | static_cast<u32>(phys_type), attr); | ||
| 35 | ASSERT(phys != nullptr); | ||
| 36 | ASSERT(phys->GetEndAddress() != 0); | ||
| 37 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(phys->GetPairAddress(), size, | ||
| 38 | static_cast<u32>(virt_type), attr)); | ||
| 39 | } | ||
| 40 | |||
| 41 | } // namespace | ||
| 42 | |||
| 43 | namespace Init { | ||
| 44 | |||
| 45 | void SetupDevicePhysicalMemoryRegions(KMemoryLayout& memory_layout) { | ||
| 46 | ASSERT(SetupPowerManagementControllerMemoryRegion(memory_layout)); | ||
| 47 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 48 | 0x70019000, 0x1000, KMemoryRegionType_MemoryController | KMemoryRegionAttr_NoUserMap)); | ||
| 49 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 50 | 0x7001C000, 0x1000, KMemoryRegionType_MemoryController0 | KMemoryRegionAttr_NoUserMap)); | ||
| 51 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 52 | 0x7001D000, 0x1000, KMemoryRegionType_MemoryController1 | KMemoryRegionAttr_NoUserMap)); | ||
| 53 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 54 | 0x50040000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); | ||
| 55 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 56 | 0x50041000, 0x1000, | ||
| 57 | KMemoryRegionType_InterruptDistributor | KMemoryRegionAttr_ShouldKernelMap)); | ||
| 58 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 59 | 0x50042000, 0x1000, | ||
| 60 | KMemoryRegionType_InterruptCpuInterface | KMemoryRegionAttr_ShouldKernelMap)); | ||
| 61 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 62 | 0x50043000, 0x1D000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); | ||
| 63 | |||
| 64 | // Map IRAM unconditionally, to support debug-logging-to-iram build config. | ||
| 65 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 66 | 0x40000000, 0x40000, KMemoryRegionType_LegacyLpsIram | KMemoryRegionAttr_ShouldKernelMap)); | ||
| 67 | |||
| 68 | // Above firmware 2.0.0, prevent mapping the bpmp exception vectors or the ipatch region. | ||
| 69 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 70 | 0x6000F000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); | ||
| 71 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 72 | 0x6001DC00, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); | ||
| 73 | } | ||
| 74 | |||
| 75 | void SetupDramPhysicalMemoryRegions(KMemoryLayout& memory_layout) { | ||
| 76 | const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize(); | ||
| 77 | const PAddr physical_memory_base_address = | ||
| 78 | KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress); | ||
| 79 | |||
| 80 | // Insert blocks into the tree. | ||
| 81 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 82 | physical_memory_base_address, intended_memory_size, KMemoryRegionType_Dram)); | ||
| 83 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 84 | physical_memory_base_address, ReservedEarlyDramSize, KMemoryRegionType_DramReservedEarly)); | ||
| 85 | |||
| 86 | // Insert the KTrace block at the end of Dram, if KTrace is enabled. | ||
| 87 | static_assert(!IsKTraceEnabled || KTraceBufferSize > 0); | ||
| 88 | if constexpr (IsKTraceEnabled) { | ||
| 89 | const PAddr ktrace_buffer_phys_addr = | ||
| 90 | physical_memory_base_address + intended_memory_size - KTraceBufferSize; | ||
| 91 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 92 | ktrace_buffer_phys_addr, KTraceBufferSize, KMemoryRegionType_KernelTraceBuffer)); | ||
| 93 | } | ||
| 94 | } | ||
| 95 | |||
| 96 | void SetupPoolPartitionMemoryRegions(KMemoryLayout& memory_layout) { | ||
| 97 | // Start by identifying the extents of the DRAM memory region. | ||
| 98 | const auto dram_extents = memory_layout.GetMainMemoryPhysicalExtents(); | ||
| 99 | ASSERT(dram_extents.GetEndAddress() != 0); | ||
| 100 | |||
| 101 | // Determine the end of the pool region. | ||
| 102 | const u64 pool_end = dram_extents.GetEndAddress() - KTraceBufferSize; | ||
| 103 | |||
| 104 | // Find the start of the kernel DRAM region. | ||
| 105 | const KMemoryRegion* kernel_dram_region = | ||
| 106 | memory_layout.GetPhysicalMemoryRegionTree().FindFirstDerived( | ||
| 107 | KMemoryRegionType_DramKernelBase); | ||
| 108 | ASSERT(kernel_dram_region != nullptr); | ||
| 109 | |||
| 110 | const u64 kernel_dram_start = kernel_dram_region->GetAddress(); | ||
| 111 | ASSERT(Common::IsAligned(kernel_dram_start, CarveoutAlignment)); | ||
| 112 | |||
| 113 | // Find the start of the pool partitions region. | ||
| 114 | const KMemoryRegion* pool_partitions_region = | ||
| 115 | memory_layout.GetPhysicalMemoryRegionTree().FindByTypeAndAttribute( | ||
| 116 | KMemoryRegionType_DramPoolPartition, 0); | ||
| 117 | ASSERT(pool_partitions_region != nullptr); | ||
| 118 | const u64 pool_partitions_start = pool_partitions_region->GetAddress(); | ||
| 119 | |||
| 120 | // Setup the pool partition layouts. | ||
| 121 | // On 5.0.0+, setup modern 4-pool-partition layout. | ||
| 122 | |||
| 123 | // Get Application and Applet pool sizes. | ||
| 124 | const size_t application_pool_size = KSystemControl::Init::GetApplicationPoolSize(); | ||
| 125 | const size_t applet_pool_size = KSystemControl::Init::GetAppletPoolSize(); | ||
| 126 | const size_t unsafe_system_pool_min_size = | ||
| 127 | KSystemControl::Init::GetMinimumNonSecureSystemPoolSize(); | ||
| 128 | |||
| 129 | // Decide on starting addresses for our pools. | ||
| 130 | const u64 application_pool_start = pool_end - application_pool_size; | ||
| 131 | const u64 applet_pool_start = application_pool_start - applet_pool_size; | ||
| 132 | const u64 unsafe_system_pool_start = std::min( | ||
| 133 | kernel_dram_start + CarveoutSizeMax, | ||
| 134 | Common::AlignDown(applet_pool_start - unsafe_system_pool_min_size, CarveoutAlignment)); | ||
| 135 | const size_t unsafe_system_pool_size = applet_pool_start - unsafe_system_pool_start; | ||
| 136 | |||
| 137 | // We want to arrange application pool depending on where the middle of dram is. | ||
| 138 | const u64 dram_midpoint = (dram_extents.GetAddress() + dram_extents.GetEndAddress()) / 2; | ||
| 139 | u32 cur_pool_attr = 0; | ||
| 140 | size_t total_overhead_size = 0; | ||
| 141 | if (dram_extents.GetEndAddress() <= dram_midpoint || dram_midpoint <= application_pool_start) { | ||
| 142 | InsertPoolPartitionRegionIntoBothTrees( | ||
| 143 | memory_layout, application_pool_start, application_pool_size, | ||
| 144 | KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, | ||
| 145 | cur_pool_attr); | ||
| 146 | total_overhead_size += | ||
| 147 | KMemoryManager::CalculateManagementOverheadSize(application_pool_size); | ||
| 148 | } else { | ||
| 149 | const size_t first_application_pool_size = dram_midpoint - application_pool_start; | ||
| 150 | const size_t second_application_pool_size = | ||
| 151 | application_pool_start + application_pool_size - dram_midpoint; | ||
| 152 | InsertPoolPartitionRegionIntoBothTrees( | ||
| 153 | memory_layout, application_pool_start, first_application_pool_size, | ||
| 154 | KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, | ||
| 155 | cur_pool_attr); | ||
| 156 | InsertPoolPartitionRegionIntoBothTrees( | ||
| 157 | memory_layout, dram_midpoint, second_application_pool_size, | ||
| 158 | KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, | ||
| 159 | cur_pool_attr); | ||
| 160 | total_overhead_size += | ||
| 161 | KMemoryManager::CalculateManagementOverheadSize(first_application_pool_size); | ||
| 162 | total_overhead_size += | ||
| 163 | KMemoryManager::CalculateManagementOverheadSize(second_application_pool_size); | ||
| 164 | } | ||
| 165 | |||
| 166 | // Insert the applet pool. | ||
| 167 | InsertPoolPartitionRegionIntoBothTrees(memory_layout, applet_pool_start, applet_pool_size, | ||
| 168 | KMemoryRegionType_DramAppletPool, | ||
| 169 | KMemoryRegionType_VirtualDramAppletPool, cur_pool_attr); | ||
| 170 | total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(applet_pool_size); | ||
| 171 | |||
| 172 | // Insert the nonsecure system pool. | ||
| 173 | InsertPoolPartitionRegionIntoBothTrees( | ||
| 174 | memory_layout, unsafe_system_pool_start, unsafe_system_pool_size, | ||
| 175 | KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool, | ||
| 176 | cur_pool_attr); | ||
| 177 | total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(unsafe_system_pool_size); | ||
| 178 | |||
| 179 | // Insert the pool management region. | ||
| 180 | total_overhead_size += KMemoryManager::CalculateManagementOverheadSize( | ||
| 181 | (unsafe_system_pool_start - pool_partitions_start) - total_overhead_size); | ||
| 182 | const u64 pool_management_start = unsafe_system_pool_start - total_overhead_size; | ||
| 183 | const size_t pool_management_size = total_overhead_size; | ||
| 184 | u32 pool_management_attr = 0; | ||
| 185 | InsertPoolPartitionRegionIntoBothTrees( | ||
| 186 | memory_layout, pool_management_start, pool_management_size, | ||
| 187 | KMemoryRegionType_DramPoolManagement, KMemoryRegionType_VirtualDramPoolManagement, | ||
| 188 | pool_management_attr); | ||
| 189 | |||
| 190 | // Insert the system pool. | ||
| 191 | const u64 system_pool_size = pool_management_start - pool_partitions_start; | ||
| 192 | InsertPoolPartitionRegionIntoBothTrees(memory_layout, pool_partitions_start, system_pool_size, | ||
| 193 | KMemoryRegionType_DramSystemPool, | ||
| 194 | KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr); | ||
| 195 | } | ||
| 196 | |||
| 197 | } // namespace Init | ||
| 198 | |||
| 199 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_memory_layout.cpp b/src/core/hle/kernel/k_memory_layout.cpp new file mode 100644 index 000000000..fb1e2435f --- /dev/null +++ b/src/core/hle/kernel/k_memory_layout.cpp | |||
| @@ -0,0 +1,166 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <array> | ||
| 6 | |||
| 7 | #include "common/alignment.h" | ||
| 8 | #include "core/hle/kernel/k_memory_layout.h" | ||
| 9 | #include "core/hle/kernel/k_system_control.h" | ||
| 10 | |||
| 11 | namespace Kernel { | ||
| 12 | |||
| 13 | namespace { | ||
| 14 | |||
| 15 | template <typename... Args> | ||
| 16 | KMemoryRegion* AllocateRegion(KMemoryRegionAllocator& memory_region_allocator, Args&&... args) { | ||
| 17 | return memory_region_allocator.Allocate(std::forward<Args>(args)...); | ||
| 18 | } | ||
| 19 | |||
| 20 | } // namespace | ||
| 21 | |||
| 22 | KMemoryRegionTree::KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator_) | ||
| 23 | : memory_region_allocator{memory_region_allocator_} {} | ||
| 24 | |||
| 25 | void KMemoryRegionTree::InsertDirectly(u64 address, u64 last_address, u32 attr, u32 type_id) { | ||
| 26 | this->insert(*AllocateRegion(memory_region_allocator, address, last_address, attr, type_id)); | ||
| 27 | } | ||
| 28 | |||
| 29 | bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) { | ||
| 30 | // Locate the memory region that contains the address. | ||
| 31 | KMemoryRegion* found = this->FindModifiable(address); | ||
| 32 | |||
| 33 | // We require that the old attr is correct. | ||
| 34 | if (found->GetAttributes() != old_attr) { | ||
| 35 | return false; | ||
| 36 | } | ||
| 37 | |||
| 38 | // We further require that the region can be split from the old region. | ||
| 39 | const u64 inserted_region_end = address + size; | ||
| 40 | const u64 inserted_region_last = inserted_region_end - 1; | ||
| 41 | if (found->GetLastAddress() < inserted_region_last) { | ||
| 42 | return false; | ||
| 43 | } | ||
| 44 | |||
| 45 | // Further, we require that the type id is a valid transformation. | ||
| 46 | if (!found->CanDerive(type_id)) { | ||
| 47 | return false; | ||
| 48 | } | ||
| 49 | |||
| 50 | // Cache information from the region before we remove it. | ||
| 51 | const u64 old_address = found->GetAddress(); | ||
| 52 | const u64 old_last = found->GetLastAddress(); | ||
| 53 | const u64 old_pair = found->GetPairAddress(); | ||
| 54 | const u32 old_type = found->GetType(); | ||
| 55 | |||
| 56 | // Erase the existing region from the tree. | ||
| 57 | this->erase(this->iterator_to(*found)); | ||
| 58 | |||
| 59 | // Insert the new region into the tree. | ||
| 60 | if (old_address == address) { | ||
| 61 | // Reuse the old object for the new region, if we can. | ||
| 62 | found->Reset(address, inserted_region_last, old_pair, new_attr, type_id); | ||
| 63 | this->insert(*found); | ||
| 64 | } else { | ||
| 65 | // If we can't re-use, adjust the old region. | ||
| 66 | found->Reset(old_address, address - 1, old_pair, old_attr, old_type); | ||
| 67 | this->insert(*found); | ||
| 68 | |||
| 69 | // Insert a new region for the split. | ||
| 70 | const u64 new_pair = (old_pair != std::numeric_limits<u64>::max()) | ||
| 71 | ? old_pair + (address - old_address) | ||
| 72 | : old_pair; | ||
| 73 | this->insert(*AllocateRegion(memory_region_allocator, address, inserted_region_last, | ||
| 74 | new_pair, new_attr, type_id)); | ||
| 75 | } | ||
| 76 | |||
| 77 | // If we need to insert a region after the region, do so. | ||
| 78 | if (old_last != inserted_region_last) { | ||
| 79 | const u64 after_pair = (old_pair != std::numeric_limits<u64>::max()) | ||
| 80 | ? old_pair + (inserted_region_end - old_address) | ||
| 81 | : old_pair; | ||
| 82 | this->insert(*AllocateRegion(memory_region_allocator, inserted_region_end, old_last, | ||
| 83 | after_pair, old_attr, old_type)); | ||
| 84 | } | ||
| 85 | |||
| 86 | return true; | ||
| 87 | } | ||
| 88 | |||
| 89 | VAddr KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id) { | ||
| 90 | // We want to find the total extents of the type id. | ||
| 91 | const auto extents = this->GetDerivedRegionExtents(static_cast<KMemoryRegionType>(type_id)); | ||
| 92 | |||
| 93 | // Ensure that our alignment is correct. | ||
| 94 | ASSERT(Common::IsAligned(extents.GetAddress(), alignment)); | ||
| 95 | |||
| 96 | const u64 first_address = extents.GetAddress(); | ||
| 97 | const u64 last_address = extents.GetLastAddress(); | ||
| 98 | |||
| 99 | const u64 first_index = first_address / alignment; | ||
| 100 | const u64 last_index = last_address / alignment; | ||
| 101 | |||
| 102 | while (true) { | ||
| 103 | const u64 candidate = | ||
| 104 | KSystemControl::GenerateRandomRange(first_index, last_index) * alignment; | ||
| 105 | |||
| 106 | // Ensure that the candidate doesn't overflow with the size. | ||
| 107 | if (!(candidate < candidate + size)) { | ||
| 108 | continue; | ||
| 109 | } | ||
| 110 | |||
| 111 | const u64 candidate_last = candidate + size - 1; | ||
| 112 | |||
| 113 | // Ensure that the candidate fits within the region. | ||
| 114 | if (candidate_last > last_address) { | ||
| 115 | continue; | ||
| 116 | } | ||
| 117 | |||
| 118 | // Locate the candidate region, and ensure it fits and has the correct type id. | ||
| 119 | if (const auto& candidate_region = *this->Find(candidate); | ||
| 120 | !(candidate_last <= candidate_region.GetLastAddress() && | ||
| 121 | candidate_region.GetType() == type_id)) { | ||
| 122 | continue; | ||
| 123 | } | ||
| 124 | |||
| 125 | return candidate; | ||
| 126 | } | ||
| 127 | } | ||
| 128 | |||
| 129 | KMemoryLayout::KMemoryLayout() | ||
| 130 | : virtual_tree{memory_region_allocator}, physical_tree{memory_region_allocator}, | ||
| 131 | virtual_linear_tree{memory_region_allocator}, physical_linear_tree{memory_region_allocator} {} | ||
| 132 | |||
| 133 | void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, | ||
| 134 | VAddr linear_virtual_start) { | ||
| 135 | // Set static differences. | ||
| 136 | linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start; | ||
| 137 | linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start; | ||
| 138 | |||
| 139 | // Initialize linear trees. | ||
| 140 | for (auto& region : GetPhysicalMemoryRegionTree()) { | ||
| 141 | if (region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) { | ||
| 142 | GetPhysicalLinearMemoryRegionTree().InsertDirectly( | ||
| 143 | region.GetAddress(), region.GetLastAddress(), region.GetAttributes(), | ||
| 144 | region.GetType()); | ||
| 145 | } | ||
| 146 | } | ||
| 147 | |||
| 148 | for (auto& region : GetVirtualMemoryRegionTree()) { | ||
| 149 | if (region.IsDerivedFrom(KMemoryRegionType_Dram)) { | ||
| 150 | GetVirtualLinearMemoryRegionTree().InsertDirectly( | ||
| 151 | region.GetAddress(), region.GetLastAddress(), region.GetAttributes(), | ||
| 152 | region.GetType()); | ||
| 153 | } | ||
| 154 | } | ||
| 155 | } | ||
| 156 | |||
| 157 | size_t KMemoryLayout::GetResourceRegionSizeForInit() { | ||
| 158 | // Calculate resource region size based on whether we allow extra threads. | ||
| 159 | const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit(); | ||
| 160 | size_t resource_region_size = | ||
| 161 | KernelResourceSize + (use_extra_resources ? KernelSlabHeapAdditionalSize : 0); | ||
| 162 | |||
| 163 | return resource_region_size; | ||
| 164 | } | ||
| 165 | |||
| 166 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index 0821d2d8c..288642d9a 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h | |||
| @@ -1,23 +1,69 @@ | |||
| 1 | // Copyright 2020 yuzu Emulator Project | 1 | // Copyright 2021 yuzu Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <utility> | ||
| 8 | |||
| 9 | #include "common/alignment.h" | ||
| 10 | #include "common/common_sizes.h" | ||
| 7 | #include "common/common_types.h" | 11 | #include "common/common_types.h" |
| 8 | #include "core/device_memory.h" | 12 | #include "core/device_memory.h" |
| 13 | #include "core/hle/kernel/k_memory_region.h" | ||
| 14 | #include "core/hle/kernel/k_memory_region_type.h" | ||
| 15 | #include "core/hle/kernel/memory_types.h" | ||
| 9 | 16 | ||
| 10 | namespace Kernel { | 17 | namespace Kernel { |
| 11 | 18 | ||
| 12 | constexpr std::size_t KernelAslrAlignment = 2 * 1024 * 1024; | 19 | constexpr std::size_t L1BlockSize = Common::Size_1_GB; |
| 20 | constexpr std::size_t L2BlockSize = Common::Size_2_MB; | ||
| 21 | |||
| 22 | constexpr std::size_t GetMaximumOverheadSize(std::size_t size) { | ||
| 23 | return (Common::DivideUp(size, L1BlockSize) + Common::DivideUp(size, L2BlockSize)) * PageSize; | ||
| 24 | } | ||
| 25 | |||
| 26 | constexpr std::size_t MainMemorySize = Common::Size_4_GB; | ||
| 27 | constexpr std::size_t MainMemorySizeMax = Common::Size_8_GB; | ||
| 28 | |||
| 29 | constexpr std::size_t ReservedEarlyDramSize = 0x60000; | ||
| 30 | constexpr std::size_t DramPhysicalAddress = 0x80000000; | ||
| 31 | |||
| 32 | constexpr std::size_t KernelAslrAlignment = Common::Size_2_MB; | ||
| 13 | constexpr std::size_t KernelVirtualAddressSpaceWidth = 1ULL << 39; | 33 | constexpr std::size_t KernelVirtualAddressSpaceWidth = 1ULL << 39; |
| 14 | constexpr std::size_t KernelPhysicalAddressSpaceWidth = 1ULL << 48; | 34 | constexpr std::size_t KernelPhysicalAddressSpaceWidth = 1ULL << 48; |
| 35 | |||
| 15 | constexpr std::size_t KernelVirtualAddressSpaceBase = 0ULL - KernelVirtualAddressSpaceWidth; | 36 | constexpr std::size_t KernelVirtualAddressSpaceBase = 0ULL - KernelVirtualAddressSpaceWidth; |
| 16 | constexpr std::size_t KernelVirtualAddressSpaceEnd = | 37 | constexpr std::size_t KernelVirtualAddressSpaceEnd = |
| 17 | KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment); | 38 | KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment); |
| 18 | constexpr std::size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1; | 39 | constexpr std::size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1ULL; |
| 19 | constexpr std::size_t KernelVirtualAddressSpaceSize = | 40 | constexpr std::size_t KernelVirtualAddressSpaceSize = |
| 20 | KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase; | 41 | KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase; |
| 42 | constexpr std::size_t KernelVirtualAddressCodeBase = KernelVirtualAddressSpaceBase; | ||
| 43 | constexpr std::size_t KernelVirtualAddressCodeSize = 0x62000; | ||
| 44 | constexpr std::size_t KernelVirtualAddressCodeEnd = | ||
| 45 | KernelVirtualAddressCodeBase + KernelVirtualAddressCodeSize; | ||
| 46 | |||
| 47 | constexpr std::size_t KernelPhysicalAddressSpaceBase = 0ULL; | ||
| 48 | constexpr std::size_t KernelPhysicalAddressSpaceEnd = | ||
| 49 | KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceWidth; | ||
| 50 | constexpr std::size_t KernelPhysicalAddressSpaceLast = KernelPhysicalAddressSpaceEnd - 1ULL; | ||
| 51 | constexpr std::size_t KernelPhysicalAddressSpaceSize = | ||
| 52 | KernelPhysicalAddressSpaceEnd - KernelPhysicalAddressSpaceBase; | ||
| 53 | constexpr std::size_t KernelPhysicalAddressCodeBase = DramPhysicalAddress + ReservedEarlyDramSize; | ||
| 54 | |||
| 55 | constexpr std::size_t KernelPageTableHeapSize = GetMaximumOverheadSize(MainMemorySizeMax); | ||
| 56 | constexpr std::size_t KernelInitialPageHeapSize = Common::Size_128_KB; | ||
| 57 | |||
| 58 | constexpr std::size_t KernelSlabHeapDataSize = Common::Size_5_MB; | ||
| 59 | constexpr std::size_t KernelSlabHeapGapsSize = Common::Size_2_MB - Common::Size_64_KB; | ||
| 60 | constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSize; | ||
| 61 | |||
| 62 | // NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860. | ||
| 63 | constexpr std::size_t KernelSlabHeapAdditionalSize = 0x68000ULL; | ||
| 64 | |||
| 65 | constexpr std::size_t KernelResourceSize = | ||
| 66 | KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize; | ||
| 21 | 67 | ||
| 22 | constexpr bool IsKernelAddressKey(VAddr key) { | 68 | constexpr bool IsKernelAddressKey(VAddr key) { |
| 23 | return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast; | 69 | return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast; |
| @@ -27,64 +73,327 @@ constexpr bool IsKernelAddress(VAddr address) { | |||
| 27 | return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd; | 73 | return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd; |
| 28 | } | 74 | } |
| 29 | 75 | ||
| 30 | class KMemoryRegion final { | 76 | class KMemoryLayout final { |
| 31 | friend class KMemoryLayout; | ||
| 32 | |||
| 33 | public: | 77 | public: |
| 34 | constexpr PAddr StartAddress() const { | 78 | KMemoryLayout(); |
| 35 | return start_address; | 79 | |
| 80 | KMemoryRegionTree& GetVirtualMemoryRegionTree() { | ||
| 81 | return virtual_tree; | ||
| 82 | } | ||
| 83 | const KMemoryRegionTree& GetVirtualMemoryRegionTree() const { | ||
| 84 | return virtual_tree; | ||
| 85 | } | ||
| 86 | KMemoryRegionTree& GetPhysicalMemoryRegionTree() { | ||
| 87 | return physical_tree; | ||
| 88 | } | ||
| 89 | const KMemoryRegionTree& GetPhysicalMemoryRegionTree() const { | ||
| 90 | return physical_tree; | ||
| 91 | } | ||
| 92 | KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() { | ||
| 93 | return virtual_linear_tree; | ||
| 94 | } | ||
| 95 | const KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() const { | ||
| 96 | return virtual_linear_tree; | ||
| 97 | } | ||
| 98 | KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() { | ||
| 99 | return physical_linear_tree; | ||
| 100 | } | ||
| 101 | const KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() const { | ||
| 102 | return physical_linear_tree; | ||
| 103 | } | ||
| 104 | |||
| 105 | VAddr GetLinearVirtualAddress(PAddr address) const { | ||
| 106 | return address + linear_phys_to_virt_diff; | ||
| 107 | } | ||
| 108 | PAddr GetLinearPhysicalAddress(VAddr address) const { | ||
| 109 | return address + linear_virt_to_phys_diff; | ||
| 110 | } | ||
| 111 | |||
| 112 | const KMemoryRegion* FindVirtual(VAddr address) const { | ||
| 113 | return Find(address, GetVirtualMemoryRegionTree()); | ||
| 114 | } | ||
| 115 | const KMemoryRegion* FindPhysical(PAddr address) const { | ||
| 116 | return Find(address, GetPhysicalMemoryRegionTree()); | ||
| 117 | } | ||
| 118 | |||
| 119 | const KMemoryRegion* FindVirtualLinear(VAddr address) const { | ||
| 120 | return Find(address, GetVirtualLinearMemoryRegionTree()); | ||
| 121 | } | ||
| 122 | const KMemoryRegion* FindPhysicalLinear(PAddr address) const { | ||
| 123 | return Find(address, GetPhysicalLinearMemoryRegionTree()); | ||
| 124 | } | ||
| 125 | |||
| 126 | VAddr GetMainStackTopAddress(s32 core_id) const { | ||
| 127 | return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscMainStack); | ||
| 128 | } | ||
| 129 | VAddr GetIdleStackTopAddress(s32 core_id) const { | ||
| 130 | return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscIdleStack); | ||
| 131 | } | ||
| 132 | VAddr GetExceptionStackTopAddress(s32 core_id) const { | ||
| 133 | return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack); | ||
| 134 | } | ||
| 135 | |||
| 136 | VAddr GetSlabRegionAddress() const { | ||
| 137 | return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab)) | ||
| 138 | .GetAddress(); | ||
| 139 | } | ||
| 140 | |||
| 141 | const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const { | ||
| 142 | return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type)); | ||
| 143 | } | ||
| 144 | PAddr GetDevicePhysicalAddress(KMemoryRegionType type) const { | ||
| 145 | return GetDeviceRegion(type).GetAddress(); | ||
| 146 | } | ||
| 147 | VAddr GetDeviceVirtualAddress(KMemoryRegionType type) const { | ||
| 148 | return GetDeviceRegion(type).GetPairAddress(); | ||
| 149 | } | ||
| 150 | |||
| 151 | const KMemoryRegion& GetPoolManagementRegion() const { | ||
| 152 | return Dereference( | ||
| 153 | GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramPoolManagement)); | ||
| 154 | } | ||
| 155 | const KMemoryRegion& GetPageTableHeapRegion() const { | ||
| 156 | return Dereference( | ||
| 157 | GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelPtHeap)); | ||
| 158 | } | ||
| 159 | const KMemoryRegion& GetKernelStackRegion() const { | ||
| 160 | return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelStack)); | ||
| 161 | } | ||
| 162 | const KMemoryRegion& GetTempRegion() const { | ||
| 163 | return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelTemp)); | ||
| 164 | } | ||
| 165 | |||
| 166 | const KMemoryRegion& GetKernelTraceBufferRegion() const { | ||
| 167 | return Dereference(GetVirtualLinearMemoryRegionTree().FindByType( | ||
| 168 | KMemoryRegionType_VirtualDramKernelTraceBuffer)); | ||
| 169 | } | ||
| 170 | |||
| 171 | const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const { | ||
| 172 | return Dereference(FindVirtualLinear(address)); | ||
| 173 | } | ||
| 174 | |||
| 175 | const KMemoryRegion* GetPhysicalKernelTraceBufferRegion() const { | ||
| 176 | return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_KernelTraceBuffer); | ||
| 177 | } | ||
| 178 | const KMemoryRegion* GetPhysicalOnMemoryBootImageRegion() const { | ||
| 179 | return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_OnMemoryBootImage); | ||
| 180 | } | ||
| 181 | const KMemoryRegion* GetPhysicalDTBRegion() const { | ||
| 182 | return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DTB); | ||
| 183 | } | ||
| 184 | |||
| 185 | bool IsHeapPhysicalAddress(const KMemoryRegion*& region, PAddr address) const { | ||
| 186 | return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(), | ||
| 187 | KMemoryRegionType_DramUserPool); | ||
| 188 | } | ||
| 189 | bool IsHeapVirtualAddress(const KMemoryRegion*& region, VAddr address) const { | ||
| 190 | return IsTypedAddress(region, address, GetVirtualLinearMemoryRegionTree(), | ||
| 191 | KMemoryRegionType_VirtualDramUserPool); | ||
| 192 | } | ||
| 193 | |||
| 194 | bool IsHeapPhysicalAddress(const KMemoryRegion*& region, PAddr address, size_t size) const { | ||
| 195 | return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(), | ||
| 196 | KMemoryRegionType_DramUserPool); | ||
| 197 | } | ||
| 198 | bool IsHeapVirtualAddress(const KMemoryRegion*& region, VAddr address, size_t size) const { | ||
| 199 | return IsTypedAddress(region, address, size, GetVirtualLinearMemoryRegionTree(), | ||
| 200 | KMemoryRegionType_VirtualDramUserPool); | ||
| 201 | } | ||
| 202 | |||
| 203 | bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, PAddr address) const { | ||
| 204 | return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(), | ||
| 205 | static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped)); | ||
| 206 | } | ||
| 207 | bool IsLinearMappedPhysicalAddress(const KMemoryRegion*& region, PAddr address, | ||
| 208 | size_t size) const { | ||
| 209 | return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(), | ||
| 210 | static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped)); | ||
| 211 | } | ||
| 212 | |||
| 213 | std::pair<size_t, size_t> GetTotalAndKernelMemorySizes() const { | ||
| 214 | size_t total_size = 0, kernel_size = 0; | ||
| 215 | for (const auto& region : GetPhysicalMemoryRegionTree()) { | ||
| 216 | if (region.IsDerivedFrom(KMemoryRegionType_Dram)) { | ||
| 217 | total_size += region.GetSize(); | ||
| 218 | if (!region.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { | ||
| 219 | kernel_size += region.GetSize(); | ||
| 220 | } | ||
| 221 | } | ||
| 222 | } | ||
| 223 | return std::make_pair(total_size, kernel_size); | ||
| 224 | } | ||
| 225 | |||
| 226 | void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, | ||
| 227 | VAddr linear_virtual_start); | ||
| 228 | static size_t GetResourceRegionSizeForInit(); | ||
| 229 | |||
| 230 | auto GetKernelRegionExtents() const { | ||
| 231 | return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel); | ||
| 232 | } | ||
| 233 | auto GetKernelCodeRegionExtents() const { | ||
| 234 | return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelCode); | ||
| 235 | } | ||
| 236 | auto GetKernelStackRegionExtents() const { | ||
| 237 | return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelStack); | ||
| 238 | } | ||
| 239 | auto GetKernelMiscRegionExtents() const { | ||
| 240 | return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelMisc); | ||
| 241 | } | ||
| 242 | auto GetKernelSlabRegionExtents() const { | ||
| 243 | return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelSlab); | ||
| 244 | } | ||
| 245 | |||
| 246 | auto GetLinearRegionPhysicalExtents() const { | ||
| 247 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 248 | KMemoryRegionAttr_LinearMapped); | ||
| 249 | } | ||
| 250 | |||
| 251 | auto GetLinearRegionVirtualExtents() const { | ||
| 252 | const auto physical = GetLinearRegionPhysicalExtents(); | ||
| 253 | return KMemoryRegion(GetLinearVirtualAddress(physical.GetAddress()), | ||
| 254 | GetLinearVirtualAddress(physical.GetLastAddress()), 0, | ||
| 255 | KMemoryRegionType_None); | ||
| 256 | } | ||
| 257 | |||
| 258 | auto GetMainMemoryPhysicalExtents() const { | ||
| 259 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Dram); | ||
| 260 | } | ||
| 261 | auto GetCarveoutRegionExtents() const { | ||
| 262 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 263 | KMemoryRegionAttr_CarveoutProtected); | ||
| 264 | } | ||
| 265 | |||
| 266 | auto GetKernelRegionPhysicalExtents() const { | ||
| 267 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 268 | KMemoryRegionType_DramKernelBase); | ||
| 269 | } | ||
| 270 | auto GetKernelCodeRegionPhysicalExtents() const { | ||
| 271 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 272 | KMemoryRegionType_DramKernelCode); | ||
| 273 | } | ||
| 274 | auto GetKernelSlabRegionPhysicalExtents() const { | ||
| 275 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 276 | KMemoryRegionType_DramKernelSlab); | ||
| 277 | } | ||
| 278 | auto GetKernelPageTableHeapRegionPhysicalExtents() const { | ||
| 279 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 280 | KMemoryRegionType_DramKernelPtHeap); | ||
| 281 | } | ||
| 282 | auto GetKernelInitPageTableRegionPhysicalExtents() const { | ||
| 283 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 284 | KMemoryRegionType_DramKernelInitPt); | ||
| 285 | } | ||
| 286 | |||
| 287 | auto GetKernelPoolManagementRegionPhysicalExtents() const { | ||
| 288 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 289 | KMemoryRegionType_DramPoolManagement); | ||
| 290 | } | ||
| 291 | auto GetKernelPoolPartitionRegionPhysicalExtents() const { | ||
| 292 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 293 | KMemoryRegionType_DramPoolPartition); | ||
| 294 | } | ||
| 295 | auto GetKernelSystemPoolRegionPhysicalExtents() const { | ||
| 296 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 297 | KMemoryRegionType_DramSystemPool); | ||
| 298 | } | ||
| 299 | auto GetKernelSystemNonSecurePoolRegionPhysicalExtents() const { | ||
| 300 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 301 | KMemoryRegionType_DramSystemNonSecurePool); | ||
| 302 | } | ||
| 303 | auto GetKernelAppletPoolRegionPhysicalExtents() const { | ||
| 304 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 305 | KMemoryRegionType_DramAppletPool); | ||
| 306 | } | ||
| 307 | auto GetKernelApplicationPoolRegionPhysicalExtents() const { | ||
| 308 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 309 | KMemoryRegionType_DramApplicationPool); | ||
| 36 | } | 310 | } |
| 37 | 311 | ||
| 38 | constexpr PAddr EndAddress() const { | 312 | auto GetKernelTraceBufferRegionPhysicalExtents() const { |
| 39 | return end_address; | 313 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( |
| 314 | KMemoryRegionType_KernelTraceBuffer); | ||
| 40 | } | 315 | } |
| 41 | 316 | ||
| 42 | private: | 317 | private: |
| 43 | constexpr KMemoryRegion() = default; | 318 | template <typename AddressType> |
| 44 | constexpr KMemoryRegion(PAddr start_address, PAddr end_address) | 319 | static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address, |
| 45 | : start_address{start_address}, end_address{end_address} {} | 320 | const KMemoryRegionTree& tree, KMemoryRegionType type) { |
| 321 | // Check if the cached region already contains the address. | ||
| 322 | if (region != nullptr && region->Contains(address)) { | ||
| 323 | return true; | ||
| 324 | } | ||
| 46 | 325 | ||
| 47 | const PAddr start_address{}; | 326 | // Find the containing region, and update the cache. |
| 48 | const PAddr end_address{}; | 327 | if (const KMemoryRegion* found = tree.Find(address); |
| 49 | }; | 328 | found != nullptr && found->IsDerivedFrom(type)) { |
| 329 | region = found; | ||
| 330 | return true; | ||
| 331 | } else { | ||
| 332 | return false; | ||
| 333 | } | ||
| 334 | } | ||
| 50 | 335 | ||
| 51 | class KMemoryLayout final { | 336 | template <typename AddressType> |
| 52 | public: | 337 | static bool IsTypedAddress(const KMemoryRegion*& region, AddressType address, size_t size, |
| 53 | constexpr const KMemoryRegion& Application() const { | 338 | const KMemoryRegionTree& tree, KMemoryRegionType type) { |
| 54 | return application; | 339 | // Get the end of the checked region. |
| 340 | const u64 last_address = address + size - 1; | ||
| 341 | |||
| 342 | // Walk the tree to verify the region is correct. | ||
| 343 | const KMemoryRegion* cur = | ||
| 344 | (region != nullptr && region->Contains(address)) ? region : tree.Find(address); | ||
| 345 | while (cur != nullptr && cur->IsDerivedFrom(type)) { | ||
| 346 | if (last_address <= cur->GetLastAddress()) { | ||
| 347 | region = cur; | ||
| 348 | return true; | ||
| 349 | } | ||
| 350 | |||
| 351 | cur = cur->GetNext(); | ||
| 352 | } | ||
| 353 | return false; | ||
| 55 | } | 354 | } |
| 56 | 355 | ||
| 57 | constexpr const KMemoryRegion& Applet() const { | 356 | template <typename AddressType> |
| 58 | return applet; | 357 | static const KMemoryRegion* Find(AddressType address, const KMemoryRegionTree& tree) { |
| 358 | return tree.Find(address); | ||
| 59 | } | 359 | } |
| 60 | 360 | ||
| 61 | constexpr const KMemoryRegion& System() const { | 361 | static KMemoryRegion& Dereference(KMemoryRegion* region) { |
| 62 | return system; | 362 | ASSERT(region != nullptr); |
| 363 | return *region; | ||
| 63 | } | 364 | } |
| 64 | 365 | ||
| 65 | static constexpr KMemoryLayout GetDefaultLayout() { | 366 | static const KMemoryRegion& Dereference(const KMemoryRegion* region) { |
| 66 | constexpr std::size_t application_size{0xcd500000}; | 367 | ASSERT(region != nullptr); |
| 67 | constexpr std::size_t applet_size{0x1fb00000}; | 368 | return *region; |
| 68 | constexpr PAddr application_start_address{Core::DramMemoryMap::End - application_size}; | 369 | } |
| 69 | constexpr PAddr application_end_address{Core::DramMemoryMap::End}; | 370 | |
| 70 | constexpr PAddr applet_start_address{application_start_address - applet_size}; | 371 | VAddr GetStackTopAddress(s32 core_id, KMemoryRegionType type) const { |
| 71 | constexpr PAddr applet_end_address{applet_start_address + applet_size}; | 372 | const auto& region = Dereference( |
| 72 | constexpr PAddr system_start_address{Core::DramMemoryMap::SlabHeapEnd}; | 373 | GetVirtualMemoryRegionTree().FindByTypeAndAttribute(type, static_cast<u32>(core_id))); |
| 73 | constexpr PAddr system_end_address{applet_start_address}; | 374 | ASSERT(region.GetEndAddress() != 0); |
| 74 | return {application_start_address, application_end_address, applet_start_address, | 375 | return region.GetEndAddress(); |
| 75 | applet_end_address, system_start_address, system_end_address}; | ||
| 76 | } | 376 | } |
| 77 | 377 | ||
| 78 | private: | 378 | private: |
| 79 | constexpr KMemoryLayout(PAddr application_start_address, std::size_t application_size, | 379 | u64 linear_phys_to_virt_diff{}; |
| 80 | PAddr applet_start_address, std::size_t applet_size, | 380 | u64 linear_virt_to_phys_diff{}; |
| 81 | PAddr system_start_address, std::size_t system_size) | 381 | KMemoryRegionAllocator memory_region_allocator; |
| 82 | : application{application_start_address, application_size}, | 382 | KMemoryRegionTree virtual_tree; |
| 83 | applet{applet_start_address, applet_size}, system{system_start_address, system_size} {} | 383 | KMemoryRegionTree physical_tree; |
| 84 | 384 | KMemoryRegionTree virtual_linear_tree; | |
| 85 | const KMemoryRegion application; | 385 | KMemoryRegionTree physical_linear_tree; |
| 86 | const KMemoryRegion applet; | ||
| 87 | const KMemoryRegion system; | ||
| 88 | }; | 386 | }; |
| 89 | 387 | ||
| 388 | namespace Init { | ||
| 389 | |||
| 390 | // These should be generic, regardless of board. | ||
| 391 | void SetupPoolPartitionMemoryRegions(KMemoryLayout& memory_layout); | ||
| 392 | |||
| 393 | // These may be implemented in a board-specific manner. | ||
| 394 | void SetupDevicePhysicalMemoryRegions(KMemoryLayout& memory_layout); | ||
| 395 | void SetupDramPhysicalMemoryRegions(KMemoryLayout& memory_layout); | ||
| 396 | |||
| 397 | } // namespace Init | ||
| 398 | |||
| 90 | } // namespace Kernel | 399 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 9027602bf..aa71697b2 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp | |||
| @@ -173,4 +173,16 @@ ResultCode KMemoryManager::Free(KPageLinkedList& page_list, std::size_t num_page | |||
| 173 | return RESULT_SUCCESS; | 173 | return RESULT_SUCCESS; |
| 174 | } | 174 | } |
| 175 | 175 | ||
| 176 | std::size_t KMemoryManager::Impl::CalculateManagementOverheadSize(std::size_t region_size) { | ||
| 177 | const std::size_t ref_count_size = (region_size / PageSize) * sizeof(u16); | ||
| 178 | const std::size_t optimize_map_size = | ||
| 179 | (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / | ||
| 180 | Common::BitSize<u64>()) * | ||
| 181 | sizeof(u64); | ||
| 182 | const std::size_t manager_meta_size = | ||
| 183 | Common::AlignUp(optimize_map_size + ref_count_size, PageSize); | ||
| 184 | const std::size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region_size); | ||
| 185 | return manager_meta_size + page_heap_size; | ||
| 186 | } | ||
| 187 | |||
| 176 | } // namespace Kernel | 188 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h index ae9f683b8..ac840b3d0 100644 --- a/src/core/hle/kernel/k_memory_manager.h +++ b/src/core/hle/kernel/k_memory_manager.h | |||
| @@ -29,6 +29,10 @@ public: | |||
| 29 | 29 | ||
| 30 | Shift = 4, | 30 | Shift = 4, |
| 31 | Mask = (0xF << Shift), | 31 | Mask = (0xF << Shift), |
| 32 | |||
| 33 | // Aliases. | ||
| 34 | Unsafe = Application, | ||
| 35 | Secure = System, | ||
| 32 | }; | 36 | }; |
| 33 | 37 | ||
| 34 | enum class Direction : u32 { | 38 | enum class Direction : u32 { |
| @@ -56,6 +60,10 @@ public: | |||
| 56 | static constexpr std::size_t MaxManagerCount = 10; | 60 | static constexpr std::size_t MaxManagerCount = 10; |
| 57 | 61 | ||
| 58 | public: | 62 | public: |
| 63 | static std::size_t CalculateManagementOverheadSize(std::size_t region_size) { | ||
| 64 | return Impl::CalculateManagementOverheadSize(region_size); | ||
| 65 | } | ||
| 66 | |||
| 59 | static constexpr u32 EncodeOption(Pool pool, Direction dir) { | 67 | static constexpr u32 EncodeOption(Pool pool, Direction dir) { |
| 60 | return (static_cast<u32>(pool) << static_cast<u32>(Pool::Shift)) | | 68 | return (static_cast<u32>(pool) << static_cast<u32>(Pool::Shift)) | |
| 61 | (static_cast<u32>(dir) << static_cast<u32>(Direction::Shift)); | 69 | (static_cast<u32>(dir) << static_cast<u32>(Direction::Shift)); |
| @@ -86,6 +94,16 @@ private: | |||
| 86 | Pool pool{}; | 94 | Pool pool{}; |
| 87 | 95 | ||
| 88 | public: | 96 | public: |
| 97 | static std::size_t CalculateManagementOverheadSize(std::size_t region_size); | ||
| 98 | |||
| 99 | static constexpr std::size_t CalculateOptimizedProcessOverheadSize( | ||
| 100 | std::size_t region_size) { | ||
| 101 | return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / | ||
| 102 | Common::BitSize<u64>()) * | ||
| 103 | sizeof(u64); | ||
| 104 | } | ||
| 105 | |||
| 106 | public: | ||
| 89 | Impl() = default; | 107 | Impl() = default; |
| 90 | 108 | ||
| 91 | std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address); | 109 | std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address); |
diff --git a/src/core/hle/kernel/k_memory_region.h b/src/core/hle/kernel/k_memory_region.h new file mode 100644 index 000000000..a861c04ab --- /dev/null +++ b/src/core/hle/kernel/k_memory_region.h | |||
| @@ -0,0 +1,350 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include "common/assert.h" | ||
| 8 | #include "common/common_types.h" | ||
| 9 | #include "common/intrusive_red_black_tree.h" | ||
| 10 | #include "core/hle/kernel/k_memory_region_type.h" | ||
| 11 | |||
| 12 | namespace Kernel { | ||
| 13 | |||
| 14 | class KMemoryRegionAllocator; | ||
| 15 | |||
| 16 | class KMemoryRegion final : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryRegion>, | ||
| 17 | NonCopyable { | ||
| 18 | friend class KMemoryRegionTree; | ||
| 19 | |||
| 20 | public: | ||
| 21 | constexpr KMemoryRegion() = default; | ||
| 22 | constexpr KMemoryRegion(u64 address_, u64 last_address_) | ||
| 23 | : address{address_}, last_address{last_address_} {} | ||
| 24 | constexpr KMemoryRegion(u64 address_, u64 last_address_, u64 pair_address_, u32 attributes_, | ||
| 25 | u32 type_id_) | ||
| 26 | : address(address_), last_address(last_address_), pair_address(pair_address_), | ||
| 27 | attributes(attributes_), type_id(type_id_) {} | ||
| 28 | constexpr KMemoryRegion(u64 address_, u64 last_address_, u32 attributes_, u32 type_id_) | ||
| 29 | : KMemoryRegion(address_, last_address_, std::numeric_limits<u64>::max(), attributes_, | ||
| 30 | type_id_) {} | ||
| 31 | |||
| 32 | static constexpr int Compare(const KMemoryRegion& lhs, const KMemoryRegion& rhs) { | ||
| 33 | if (lhs.GetAddress() < rhs.GetAddress()) { | ||
| 34 | return -1; | ||
| 35 | } else if (lhs.GetAddress() <= rhs.GetLastAddress()) { | ||
| 36 | return 0; | ||
| 37 | } else { | ||
| 38 | return 1; | ||
| 39 | } | ||
| 40 | } | ||
| 41 | |||
| 42 | private: | ||
| 43 | constexpr void Reset(u64 a, u64 la, u64 p, u32 r, u32 t) { | ||
| 44 | address = a; | ||
| 45 | pair_address = p; | ||
| 46 | last_address = la; | ||
| 47 | attributes = r; | ||
| 48 | type_id = t; | ||
| 49 | } | ||
| 50 | |||
| 51 | public: | ||
| 52 | constexpr u64 GetAddress() const { | ||
| 53 | return address; | ||
| 54 | } | ||
| 55 | |||
| 56 | constexpr u64 GetPairAddress() const { | ||
| 57 | return pair_address; | ||
| 58 | } | ||
| 59 | |||
| 60 | constexpr u64 GetLastAddress() const { | ||
| 61 | return last_address; | ||
| 62 | } | ||
| 63 | |||
| 64 | constexpr u64 GetEndAddress() const { | ||
| 65 | return this->GetLastAddress() + 1; | ||
| 66 | } | ||
| 67 | |||
| 68 | constexpr size_t GetSize() const { | ||
| 69 | return this->GetEndAddress() - this->GetAddress(); | ||
| 70 | } | ||
| 71 | |||
| 72 | constexpr u32 GetAttributes() const { | ||
| 73 | return attributes; | ||
| 74 | } | ||
| 75 | |||
| 76 | constexpr u32 GetType() const { | ||
| 77 | return type_id; | ||
| 78 | } | ||
| 79 | |||
| 80 | constexpr void SetType(u32 type) { | ||
| 81 | ASSERT(this->CanDerive(type)); | ||
| 82 | type_id = type; | ||
| 83 | } | ||
| 84 | |||
| 85 | constexpr bool Contains(u64 address) const { | ||
| 86 | ASSERT(this->GetEndAddress() != 0); | ||
| 87 | return this->GetAddress() <= address && address <= this->GetLastAddress(); | ||
| 88 | } | ||
| 89 | |||
| 90 | constexpr bool IsDerivedFrom(u32 type) const { | ||
| 91 | return (this->GetType() | type) == this->GetType(); | ||
| 92 | } | ||
| 93 | |||
| 94 | constexpr bool HasTypeAttribute(u32 attr) const { | ||
| 95 | return (this->GetType() | attr) == this->GetType(); | ||
| 96 | } | ||
| 97 | |||
| 98 | constexpr bool CanDerive(u32 type) const { | ||
| 99 | return (this->GetType() | type) == type; | ||
| 100 | } | ||
| 101 | |||
| 102 | constexpr void SetPairAddress(u64 a) { | ||
| 103 | pair_address = a; | ||
| 104 | } | ||
| 105 | |||
| 106 | constexpr void SetTypeAttribute(u32 attr) { | ||
| 107 | type_id |= attr; | ||
| 108 | } | ||
| 109 | |||
| 110 | private: | ||
| 111 | u64 address{}; | ||
| 112 | u64 last_address{}; | ||
| 113 | u64 pair_address{}; | ||
| 114 | u32 attributes{}; | ||
| 115 | u32 type_id{}; | ||
| 116 | }; | ||
| 117 | |||
| 118 | class KMemoryRegionTree final : NonCopyable { | ||
| 119 | public: | ||
| 120 | struct DerivedRegionExtents { | ||
| 121 | const KMemoryRegion* first_region{}; | ||
| 122 | const KMemoryRegion* last_region{}; | ||
| 123 | |||
| 124 | constexpr DerivedRegionExtents() = default; | ||
| 125 | |||
| 126 | constexpr u64 GetAddress() const { | ||
| 127 | return this->first_region->GetAddress(); | ||
| 128 | } | ||
| 129 | |||
| 130 | constexpr u64 GetLastAddress() const { | ||
| 131 | return this->last_region->GetLastAddress(); | ||
| 132 | } | ||
| 133 | |||
| 134 | constexpr u64 GetEndAddress() const { | ||
| 135 | return this->GetLastAddress() + 1; | ||
| 136 | } | ||
| 137 | |||
| 138 | constexpr size_t GetSize() const { | ||
| 139 | return this->GetEndAddress() - this->GetAddress(); | ||
| 140 | } | ||
| 141 | }; | ||
| 142 | |||
| 143 | private: | ||
| 144 | using TreeType = | ||
| 145 | Common::IntrusiveRedBlackTreeBaseTraits<KMemoryRegion>::TreeType<KMemoryRegion>; | ||
| 146 | |||
| 147 | public: | ||
| 148 | using value_type = TreeType::value_type; | ||
| 149 | using size_type = TreeType::size_type; | ||
| 150 | using difference_type = TreeType::difference_type; | ||
| 151 | using pointer = TreeType::pointer; | ||
| 152 | using const_pointer = TreeType::const_pointer; | ||
| 153 | using reference = TreeType::reference; | ||
| 154 | using const_reference = TreeType::const_reference; | ||
| 155 | using iterator = TreeType::iterator; | ||
| 156 | using const_iterator = TreeType::const_iterator; | ||
| 157 | |||
| 158 | private: | ||
| 159 | TreeType m_tree{}; | ||
| 160 | KMemoryRegionAllocator& memory_region_allocator; | ||
| 161 | |||
| 162 | public: | ||
| 163 | explicit KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator_); | ||
| 164 | |||
| 165 | public: | ||
| 166 | KMemoryRegion* FindModifiable(u64 address) { | ||
| 167 | if (auto it = this->find(KMemoryRegion(address, address, 0, 0)); it != this->end()) { | ||
| 168 | return std::addressof(*it); | ||
| 169 | } else { | ||
| 170 | return nullptr; | ||
| 171 | } | ||
| 172 | } | ||
| 173 | |||
| 174 | const KMemoryRegion* Find(u64 address) const { | ||
| 175 | if (auto it = this->find(KMemoryRegion(address, address, 0, 0)); it != this->cend()) { | ||
| 176 | return std::addressof(*it); | ||
| 177 | } else { | ||
| 178 | return nullptr; | ||
| 179 | } | ||
| 180 | } | ||
| 181 | |||
| 182 | const KMemoryRegion* FindByType(KMemoryRegionType type_id) const { | ||
| 183 | for (auto it = this->cbegin(); it != this->cend(); ++it) { | ||
| 184 | if (it->GetType() == static_cast<u32>(type_id)) { | ||
| 185 | return std::addressof(*it); | ||
| 186 | } | ||
| 187 | } | ||
| 188 | return nullptr; | ||
| 189 | } | ||
| 190 | |||
| 191 | const KMemoryRegion* FindByTypeAndAttribute(u32 type_id, u32 attr) const { | ||
| 192 | for (auto it = this->cbegin(); it != this->cend(); ++it) { | ||
| 193 | if (it->GetType() == type_id && it->GetAttributes() == attr) { | ||
| 194 | return std::addressof(*it); | ||
| 195 | } | ||
| 196 | } | ||
| 197 | return nullptr; | ||
| 198 | } | ||
| 199 | |||
| 200 | const KMemoryRegion* FindFirstDerived(KMemoryRegionType type_id) const { | ||
| 201 | for (auto it = this->cbegin(); it != this->cend(); it++) { | ||
| 202 | if (it->IsDerivedFrom(type_id)) { | ||
| 203 | return std::addressof(*it); | ||
| 204 | } | ||
| 205 | } | ||
| 206 | return nullptr; | ||
| 207 | } | ||
| 208 | |||
| 209 | const KMemoryRegion* FindLastDerived(KMemoryRegionType type_id) const { | ||
| 210 | const KMemoryRegion* region = nullptr; | ||
| 211 | for (auto it = this->begin(); it != this->end(); it++) { | ||
| 212 | if (it->IsDerivedFrom(type_id)) { | ||
| 213 | region = std::addressof(*it); | ||
| 214 | } | ||
| 215 | } | ||
| 216 | return region; | ||
| 217 | } | ||
| 218 | |||
| 219 | DerivedRegionExtents GetDerivedRegionExtents(KMemoryRegionType type_id) const { | ||
| 220 | DerivedRegionExtents extents; | ||
| 221 | |||
| 222 | ASSERT(extents.first_region == nullptr); | ||
| 223 | ASSERT(extents.last_region == nullptr); | ||
| 224 | |||
| 225 | for (auto it = this->cbegin(); it != this->cend(); it++) { | ||
| 226 | if (it->IsDerivedFrom(type_id)) { | ||
| 227 | if (extents.first_region == nullptr) { | ||
| 228 | extents.first_region = std::addressof(*it); | ||
| 229 | } | ||
| 230 | extents.last_region = std::addressof(*it); | ||
| 231 | } | ||
| 232 | } | ||
| 233 | |||
| 234 | ASSERT(extents.first_region != nullptr); | ||
| 235 | ASSERT(extents.last_region != nullptr); | ||
| 236 | |||
| 237 | return extents; | ||
| 238 | } | ||
| 239 | |||
| 240 | DerivedRegionExtents GetDerivedRegionExtents(u32 type_id) const { | ||
| 241 | return GetDerivedRegionExtents(static_cast<KMemoryRegionType>(type_id)); | ||
| 242 | } | ||
| 243 | |||
| 244 | public: | ||
| 245 | void InsertDirectly(u64 address, u64 last_address, u32 attr = 0, u32 type_id = 0); | ||
| 246 | bool Insert(u64 address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0); | ||
| 247 | |||
| 248 | VAddr GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id); | ||
| 249 | |||
| 250 | VAddr GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, u32 type_id, | ||
| 251 | size_t guard_size) { | ||
| 252 | return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size; | ||
| 253 | } | ||
| 254 | |||
| 255 | public: | ||
| 256 | // Iterator accessors. | ||
| 257 | iterator begin() { | ||
| 258 | return m_tree.begin(); | ||
| 259 | } | ||
| 260 | |||
| 261 | const_iterator begin() const { | ||
| 262 | return m_tree.begin(); | ||
| 263 | } | ||
| 264 | |||
| 265 | iterator end() { | ||
| 266 | return m_tree.end(); | ||
| 267 | } | ||
| 268 | |||
| 269 | const_iterator end() const { | ||
| 270 | return m_tree.end(); | ||
| 271 | } | ||
| 272 | |||
| 273 | const_iterator cbegin() const { | ||
| 274 | return this->begin(); | ||
| 275 | } | ||
| 276 | |||
| 277 | const_iterator cend() const { | ||
| 278 | return this->end(); | ||
| 279 | } | ||
| 280 | |||
| 281 | iterator iterator_to(reference ref) { | ||
| 282 | return m_tree.iterator_to(ref); | ||
| 283 | } | ||
| 284 | |||
| 285 | const_iterator iterator_to(const_reference ref) const { | ||
| 286 | return m_tree.iterator_to(ref); | ||
| 287 | } | ||
| 288 | |||
| 289 | // Content management. | ||
| 290 | bool empty() const { | ||
| 291 | return m_tree.empty(); | ||
| 292 | } | ||
| 293 | |||
| 294 | reference back() { | ||
| 295 | return m_tree.back(); | ||
| 296 | } | ||
| 297 | |||
| 298 | const_reference back() const { | ||
| 299 | return m_tree.back(); | ||
| 300 | } | ||
| 301 | |||
| 302 | reference front() { | ||
| 303 | return m_tree.front(); | ||
| 304 | } | ||
| 305 | |||
| 306 | const_reference front() const { | ||
| 307 | return m_tree.front(); | ||
| 308 | } | ||
| 309 | |||
| 310 | iterator insert(reference ref) { | ||
| 311 | return m_tree.insert(ref); | ||
| 312 | } | ||
| 313 | |||
| 314 | iterator erase(iterator it) { | ||
| 315 | return m_tree.erase(it); | ||
| 316 | } | ||
| 317 | |||
| 318 | iterator find(const_reference ref) const { | ||
| 319 | return m_tree.find(ref); | ||
| 320 | } | ||
| 321 | |||
| 322 | iterator nfind(const_reference ref) const { | ||
| 323 | return m_tree.nfind(ref); | ||
| 324 | } | ||
| 325 | }; | ||
| 326 | |||
| 327 | class KMemoryRegionAllocator final : NonCopyable { | ||
| 328 | public: | ||
| 329 | static constexpr size_t MaxMemoryRegions = 200; | ||
| 330 | |||
| 331 | constexpr KMemoryRegionAllocator() = default; | ||
| 332 | |||
| 333 | template <typename... Args> | ||
| 334 | KMemoryRegion* Allocate(Args&&... args) { | ||
| 335 | // Ensure we stay within the bounds of our heap. | ||
| 336 | ASSERT(this->num_regions < MaxMemoryRegions); | ||
| 337 | |||
| 338 | // Create the new region. | ||
| 339 | KMemoryRegion* region = std::addressof(this->region_heap[this->num_regions++]); | ||
| 340 | new (region) KMemoryRegion(std::forward<Args>(args)...); | ||
| 341 | |||
| 342 | return region; | ||
| 343 | } | ||
| 344 | |||
| 345 | private: | ||
| 346 | std::array<KMemoryRegion, MaxMemoryRegions> region_heap{}; | ||
| 347 | size_t num_regions{}; | ||
| 348 | }; | ||
| 349 | |||
| 350 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h new file mode 100644 index 000000000..a05e66677 --- /dev/null +++ b/src/core/hle/kernel/k_memory_region_type.h | |||
| @@ -0,0 +1,338 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include "common/bit_util.h" | ||
| 8 | #include "common/common_funcs.h" | ||
| 9 | #include "common/common_types.h" | ||
| 10 | |||
| 11 | #define ARCH_ARM64 | ||
| 12 | #define BOARD_NINTENDO_NX | ||
| 13 | |||
| 14 | namespace Kernel { | ||
| 15 | |||
| 16 | enum KMemoryRegionType : u32 { | ||
| 17 | KMemoryRegionAttr_CarveoutProtected = 0x04000000, | ||
| 18 | KMemoryRegionAttr_DidKernelMap = 0x08000000, | ||
| 19 | KMemoryRegionAttr_ShouldKernelMap = 0x10000000, | ||
| 20 | KMemoryRegionAttr_UserReadOnly = 0x20000000, | ||
| 21 | KMemoryRegionAttr_NoUserMap = 0x40000000, | ||
| 22 | KMemoryRegionAttr_LinearMapped = 0x80000000, | ||
| 23 | }; | ||
| 24 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryRegionType); | ||
| 25 | |||
| 26 | namespace impl { | ||
| 27 | |||
| 28 | constexpr size_t BitsForDeriveSparse(size_t n) { | ||
| 29 | return n + 1; | ||
| 30 | } | ||
| 31 | |||
| 32 | constexpr size_t BitsForDeriveDense(size_t n) { | ||
| 33 | size_t low = 0, high = 1; | ||
| 34 | for (size_t i = 0; i < n - 1; ++i) { | ||
| 35 | if ((++low) == high) { | ||
| 36 | ++high; | ||
| 37 | low = 0; | ||
| 38 | } | ||
| 39 | } | ||
| 40 | return high + 1; | ||
| 41 | } | ||
| 42 | |||
| 43 | class KMemoryRegionTypeValue { | ||
| 44 | public: | ||
| 45 | using ValueType = std::underlying_type_t<KMemoryRegionType>; | ||
| 46 | |||
| 47 | constexpr KMemoryRegionTypeValue() = default; | ||
| 48 | |||
| 49 | constexpr operator KMemoryRegionType() const { | ||
| 50 | return static_cast<KMemoryRegionType>(m_value); | ||
| 51 | } | ||
| 52 | |||
| 53 | constexpr ValueType GetValue() const { | ||
| 54 | return m_value; | ||
| 55 | } | ||
| 56 | |||
| 57 | constexpr const KMemoryRegionTypeValue& Finalize() { | ||
| 58 | m_finalized = true; | ||
| 59 | return *this; | ||
| 60 | } | ||
| 61 | |||
| 62 | constexpr const KMemoryRegionTypeValue& SetSparseOnly() { | ||
| 63 | m_sparse_only = true; | ||
| 64 | return *this; | ||
| 65 | } | ||
| 66 | |||
| 67 | constexpr const KMemoryRegionTypeValue& SetDenseOnly() { | ||
| 68 | m_dense_only = true; | ||
| 69 | return *this; | ||
| 70 | } | ||
| 71 | |||
| 72 | constexpr KMemoryRegionTypeValue& SetAttribute(u32 attr) { | ||
| 73 | m_value |= attr; | ||
| 74 | return *this; | ||
| 75 | } | ||
| 76 | |||
| 77 | constexpr KMemoryRegionTypeValue DeriveInitial( | ||
| 78 | size_t i, size_t next = Common::BitSize<ValueType>()) const { | ||
| 79 | KMemoryRegionTypeValue new_type = *this; | ||
| 80 | new_type.m_value = (ValueType{1} << i); | ||
| 81 | new_type.m_next_bit = next; | ||
| 82 | return new_type; | ||
| 83 | } | ||
| 84 | |||
| 85 | constexpr KMemoryRegionTypeValue DeriveAttribute(u32 attr) const { | ||
| 86 | KMemoryRegionTypeValue new_type = *this; | ||
| 87 | new_type.m_value |= attr; | ||
| 88 | return new_type; | ||
| 89 | } | ||
| 90 | |||
| 91 | constexpr KMemoryRegionTypeValue DeriveTransition(size_t ofs = 0, size_t adv = 1) const { | ||
| 92 | KMemoryRegionTypeValue new_type = *this; | ||
| 93 | new_type.m_value |= (ValueType{1} << (m_next_bit + ofs)); | ||
| 94 | new_type.m_next_bit += adv; | ||
| 95 | return new_type; | ||
| 96 | } | ||
| 97 | |||
| 98 | constexpr KMemoryRegionTypeValue DeriveSparse(size_t ofs, size_t n, size_t i) const { | ||
| 99 | KMemoryRegionTypeValue new_type = *this; | ||
| 100 | new_type.m_value |= (ValueType{1} << (m_next_bit + ofs)); | ||
| 101 | new_type.m_value |= (ValueType{1} << (m_next_bit + ofs + 1 + i)); | ||
| 102 | new_type.m_next_bit += ofs + n + 1; | ||
| 103 | return new_type; | ||
| 104 | } | ||
| 105 | |||
| 106 | constexpr KMemoryRegionTypeValue Derive(size_t n, size_t i) const { | ||
| 107 | size_t low = 0, high = 1; | ||
| 108 | for (size_t j = 0; j < i; ++j) { | ||
| 109 | if ((++low) == high) { | ||
| 110 | ++high; | ||
| 111 | low = 0; | ||
| 112 | } | ||
| 113 | } | ||
| 114 | |||
| 115 | KMemoryRegionTypeValue new_type = *this; | ||
| 116 | new_type.m_value |= (ValueType{1} << (m_next_bit + low)); | ||
| 117 | new_type.m_value |= (ValueType{1} << (m_next_bit + high)); | ||
| 118 | new_type.m_next_bit += BitsForDeriveDense(n); | ||
| 119 | return new_type; | ||
| 120 | } | ||
| 121 | |||
| 122 | constexpr KMemoryRegionTypeValue Advance(size_t n) const { | ||
| 123 | KMemoryRegionTypeValue new_type = *this; | ||
| 124 | new_type.m_next_bit += n; | ||
| 125 | return new_type; | ||
| 126 | } | ||
| 127 | |||
| 128 | constexpr bool IsAncestorOf(ValueType v) const { | ||
| 129 | return (m_value | v) == v; | ||
| 130 | } | ||
| 131 | |||
| 132 | private: | ||
| 133 | constexpr KMemoryRegionTypeValue(ValueType v) : m_value(v) {} | ||
| 134 | |||
| 135 | private: | ||
| 136 | ValueType m_value{}; | ||
| 137 | size_t m_next_bit{}; | ||
| 138 | bool m_finalized{}; | ||
| 139 | bool m_sparse_only{}; | ||
| 140 | bool m_dense_only{}; | ||
| 141 | }; | ||
| 142 | |||
| 143 | } // namespace impl | ||
| 144 | |||
| 145 | constexpr auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue(); | ||
| 146 | constexpr auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2); | ||
| 147 | constexpr auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2); | ||
| 148 | static_assert(KMemoryRegionType_Kernel.GetValue() == 0x1); | ||
| 149 | static_assert(KMemoryRegionType_Dram.GetValue() == 0x2); | ||
| 150 | |||
| 151 | constexpr auto KMemoryRegionType_DramKernelBase = | ||
| 152 | KMemoryRegionType_Dram.DeriveSparse(0, 3, 0) | ||
| 153 | .SetAttribute(KMemoryRegionAttr_NoUserMap) | ||
| 154 | .SetAttribute(KMemoryRegionAttr_CarveoutProtected); | ||
| 155 | constexpr auto KMemoryRegionType_DramReservedBase = KMemoryRegionType_Dram.DeriveSparse(0, 3, 1); | ||
| 156 | constexpr auto KMemoryRegionType_DramHeapBase = | ||
| 157 | KMemoryRegionType_Dram.DeriveSparse(0, 3, 2).SetAttribute(KMemoryRegionAttr_LinearMapped); | ||
| 158 | static_assert(KMemoryRegionType_DramKernelBase.GetValue() == | ||
| 159 | (0xE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap)); | ||
| 160 | static_assert(KMemoryRegionType_DramReservedBase.GetValue() == (0x16)); | ||
| 161 | static_assert(KMemoryRegionType_DramHeapBase.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped)); | ||
| 162 | |||
| 163 | constexpr auto KMemoryRegionType_DramKernelCode = | ||
| 164 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 0); | ||
| 165 | constexpr auto KMemoryRegionType_DramKernelSlab = | ||
| 166 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 1); | ||
| 167 | constexpr auto KMemoryRegionType_DramKernelPtHeap = | ||
| 168 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 2).SetAttribute( | ||
| 169 | KMemoryRegionAttr_LinearMapped); | ||
| 170 | constexpr auto KMemoryRegionType_DramKernelInitPt = | ||
| 171 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 3).SetAttribute( | ||
| 172 | KMemoryRegionAttr_LinearMapped); | ||
| 173 | static_assert(KMemoryRegionType_DramKernelCode.GetValue() == | ||
| 174 | (0xCE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap)); | ||
| 175 | static_assert(KMemoryRegionType_DramKernelSlab.GetValue() == | ||
| 176 | (0x14E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap)); | ||
| 177 | static_assert(KMemoryRegionType_DramKernelPtHeap.GetValue() == | ||
| 178 | (0x24E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | | ||
| 179 | KMemoryRegionAttr_LinearMapped)); | ||
| 180 | static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() == | ||
| 181 | (0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | | ||
| 182 | KMemoryRegionAttr_LinearMapped)); | ||
| 183 | |||
| 184 | constexpr auto KMemoryRegionType_DramReservedEarly = | ||
| 185 | KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); | ||
| 186 | static_assert(KMemoryRegionType_DramReservedEarly.GetValue() == | ||
| 187 | (0x16 | KMemoryRegionAttr_NoUserMap)); | ||
| 188 | |||
| 189 | constexpr auto KMemoryRegionType_KernelTraceBuffer = | ||
| 190 | KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 0) | ||
| 191 | .SetAttribute(KMemoryRegionAttr_LinearMapped) | ||
| 192 | .SetAttribute(KMemoryRegionAttr_UserReadOnly); | ||
| 193 | constexpr auto KMemoryRegionType_OnMemoryBootImage = | ||
| 194 | KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 1); | ||
| 195 | constexpr auto KMemoryRegionType_DTB = KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2); | ||
| 196 | static_assert(KMemoryRegionType_KernelTraceBuffer.GetValue() == | ||
| 197 | (0xD6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_UserReadOnly)); | ||
| 198 | static_assert(KMemoryRegionType_OnMemoryBootImage.GetValue() == 0x156); | ||
| 199 | static_assert(KMemoryRegionType_DTB.GetValue() == 0x256); | ||
| 200 | |||
| 201 | constexpr auto KMemoryRegionType_DramPoolPartition = | ||
| 202 | KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); | ||
| 203 | static_assert(KMemoryRegionType_DramPoolPartition.GetValue() == | ||
| 204 | (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | ||
| 205 | |||
| 206 | constexpr auto KMemoryRegionType_DramPoolManagement = | ||
| 207 | KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute( | ||
| 208 | KMemoryRegionAttr_CarveoutProtected); | ||
| 209 | constexpr auto KMemoryRegionType_DramUserPool = | ||
| 210 | KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition(); | ||
| 211 | static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == | ||
| 212 | (0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | | ||
| 213 | KMemoryRegionAttr_CarveoutProtected)); | ||
| 214 | static_assert(KMemoryRegionType_DramUserPool.GetValue() == | ||
| 215 | (0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | ||
| 216 | |||
| 217 | constexpr auto KMemoryRegionType_DramApplicationPool = KMemoryRegionType_DramUserPool.Derive(4, 0); | ||
| 218 | constexpr auto KMemoryRegionType_DramAppletPool = KMemoryRegionType_DramUserPool.Derive(4, 1); | ||
| 219 | constexpr auto KMemoryRegionType_DramSystemNonSecurePool = | ||
| 220 | KMemoryRegionType_DramUserPool.Derive(4, 2); | ||
| 221 | constexpr auto KMemoryRegionType_DramSystemPool = | ||
| 222 | KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected); | ||
| 223 | static_assert(KMemoryRegionType_DramApplicationPool.GetValue() == | ||
| 224 | (0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | ||
| 225 | static_assert(KMemoryRegionType_DramAppletPool.GetValue() == | ||
| 226 | (0xBA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | ||
| 227 | static_assert(KMemoryRegionType_DramSystemNonSecurePool.GetValue() == | ||
| 228 | (0xDA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | ||
| 229 | static_assert(KMemoryRegionType_DramSystemPool.GetValue() == | ||
| 230 | (0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | | ||
| 231 | KMemoryRegionAttr_CarveoutProtected)); | ||
| 232 | |||
| 233 | constexpr auto KMemoryRegionType_VirtualDramHeapBase = KMemoryRegionType_Dram.DeriveSparse(1, 3, 0); | ||
| 234 | constexpr auto KMemoryRegionType_VirtualDramKernelPtHeap = | ||
| 235 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 1); | ||
| 236 | constexpr auto KMemoryRegionType_VirtualDramKernelTraceBuffer = | ||
| 237 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 2); | ||
| 238 | static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A); | ||
| 239 | static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A); | ||
| 240 | static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A); | ||
| 241 | |||
| 242 | constexpr auto KMemoryRegionType_VirtualDramKernelInitPt = | ||
| 243 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0); | ||
| 244 | constexpr auto KMemoryRegionType_VirtualDramPoolManagement = | ||
| 245 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1); | ||
| 246 | constexpr auto KMemoryRegionType_VirtualDramUserPool = | ||
| 247 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2); | ||
| 248 | static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A); | ||
| 249 | static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A); | ||
| 250 | static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A); | ||
| 251 | |||
| 252 | // NOTE: For unknown reason, the pools are derived out-of-order here. It's worth eventually trying | ||
| 253 | // to understand why Nintendo made this choice. | ||
| 254 | // UNUSED: .Derive(6, 0); | ||
| 255 | // UNUSED: .Derive(6, 1); | ||
| 256 | constexpr auto KMemoryRegionType_VirtualDramAppletPool = | ||
| 257 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 2); | ||
| 258 | constexpr auto KMemoryRegionType_VirtualDramApplicationPool = | ||
| 259 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 3); | ||
| 260 | constexpr auto KMemoryRegionType_VirtualDramSystemNonSecurePool = | ||
| 261 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 4); | ||
| 262 | constexpr auto KMemoryRegionType_VirtualDramSystemPool = | ||
| 263 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 5); | ||
| 264 | static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A); | ||
| 265 | static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A); | ||
| 266 | static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A); | ||
| 267 | static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A); | ||
| 268 | |||
| 269 | constexpr auto KMemoryRegionType_ArchDeviceBase = | ||
| 270 | KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly(); | ||
| 271 | constexpr auto KMemoryRegionType_BoardDeviceBase = | ||
| 272 | KMemoryRegionType_Kernel.DeriveTransition(0, 2).SetDenseOnly(); | ||
| 273 | static_assert(KMemoryRegionType_ArchDeviceBase.GetValue() == 0x5); | ||
| 274 | static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5); | ||
| 275 | |||
| 276 | #if defined(ARCH_ARM64) | ||
| 277 | #include "core/hle/kernel/arch/arm64/k_memory_region_device_types.inc" | ||
| 278 | #elif defined(ARCH_ARM) | ||
| 279 | #error "Unimplemented" | ||
| 280 | #else | ||
| 281 | // Default to no architecture devices. | ||
| 282 | constexpr auto NumArchitectureDeviceRegions = 0; | ||
| 283 | #endif | ||
| 284 | static_assert(NumArchitectureDeviceRegions >= 0); | ||
| 285 | |||
| 286 | #if defined(BOARD_NINTENDO_NX) | ||
| 287 | #include "core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc" | ||
| 288 | #else | ||
| 289 | // Default to no board devices. | ||
| 290 | constexpr auto NumBoardDeviceRegions = 0; | ||
| 291 | #endif | ||
| 292 | static_assert(NumBoardDeviceRegions >= 0); | ||
| 293 | |||
| 294 | constexpr auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0); | ||
| 295 | constexpr auto KMemoryRegionType_KernelStack = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1); | ||
| 296 | constexpr auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2); | ||
| 297 | constexpr auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3); | ||
| 298 | static_assert(KMemoryRegionType_KernelCode.GetValue() == 0x19); | ||
| 299 | static_assert(KMemoryRegionType_KernelStack.GetValue() == 0x29); | ||
| 300 | static_assert(KMemoryRegionType_KernelMisc.GetValue() == 0x49); | ||
| 301 | static_assert(KMemoryRegionType_KernelSlab.GetValue() == 0x89); | ||
| 302 | |||
| 303 | constexpr auto KMemoryRegionType_KernelMiscDerivedBase = | ||
| 304 | KMemoryRegionType_KernelMisc.DeriveTransition(); | ||
| 305 | static_assert(KMemoryRegionType_KernelMiscDerivedBase.GetValue() == 0x149); | ||
| 306 | |||
| 307 | // UNUSED: .Derive(7, 0); | ||
| 308 | constexpr auto KMemoryRegionType_KernelMiscMainStack = | ||
| 309 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 1); | ||
| 310 | constexpr auto KMemoryRegionType_KernelMiscMappedDevice = | ||
| 311 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 2); | ||
| 312 | constexpr auto KMemoryRegionType_KernelMiscExceptionStack = | ||
| 313 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 3); | ||
| 314 | constexpr auto KMemoryRegionType_KernelMiscUnknownDebug = | ||
| 315 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 4); | ||
| 316 | // UNUSED: .Derive(7, 5); | ||
| 317 | constexpr auto KMemoryRegionType_KernelMiscIdleStack = | ||
| 318 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 6); | ||
| 319 | static_assert(KMemoryRegionType_KernelMiscMainStack.GetValue() == 0xB49); | ||
| 320 | static_assert(KMemoryRegionType_KernelMiscMappedDevice.GetValue() == 0xD49); | ||
| 321 | static_assert(KMemoryRegionType_KernelMiscExceptionStack.GetValue() == 0x1349); | ||
| 322 | static_assert(KMemoryRegionType_KernelMiscUnknownDebug.GetValue() == 0x1549); | ||
| 323 | static_assert(KMemoryRegionType_KernelMiscIdleStack.GetValue() == 0x2349); | ||
| 324 | |||
| 325 | constexpr auto KMemoryRegionType_KernelTemp = KMemoryRegionType_Kernel.Advance(2).Derive(2, 0); | ||
| 326 | static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31); | ||
| 327 | |||
| 328 | constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { | ||
| 329 | if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) { | ||
| 330 | return KMemoryRegionType_VirtualDramKernelTraceBuffer; | ||
| 331 | } else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { | ||
| 332 | return KMemoryRegionType_VirtualDramKernelPtHeap; | ||
| 333 | } else { | ||
| 334 | return KMemoryRegionType_Dram; | ||
| 335 | } | ||
| 336 | } | ||
| 337 | |||
| 338 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index e7de48476..d1df97305 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -62,7 +62,7 @@ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedul | |||
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { | 64 | u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { |
| 65 | std::scoped_lock lock{guard}; | 65 | KScopedSpinLock lk{guard}; |
| 66 | if (KThread* prev_highest_thread = state.highest_priority_thread; | 66 | if (KThread* prev_highest_thread = state.highest_priority_thread; |
| 67 | prev_highest_thread != highest_thread) { | 67 | prev_highest_thread != highest_thread) { |
| 68 | if (prev_highest_thread != nullptr) { | 68 | if (prev_highest_thread != nullptr) { |
| @@ -637,11 +637,11 @@ void KScheduler::RescheduleCurrentCore() { | |||
| 637 | if (phys_core.IsInterrupted()) { | 637 | if (phys_core.IsInterrupted()) { |
| 638 | phys_core.ClearInterrupt(); | 638 | phys_core.ClearInterrupt(); |
| 639 | } | 639 | } |
| 640 | guard.lock(); | 640 | guard.Lock(); |
| 641 | if (state.needs_scheduling.load()) { | 641 | if (state.needs_scheduling.load()) { |
| 642 | Schedule(); | 642 | Schedule(); |
| 643 | } else { | 643 | } else { |
| 644 | guard.unlock(); | 644 | guard.Unlock(); |
| 645 | } | 645 | } |
| 646 | } | 646 | } |
| 647 | 647 | ||
| @@ -669,7 +669,7 @@ void KScheduler::Unload(KThread* thread) { | |||
| 669 | } else { | 669 | } else { |
| 670 | prev_thread = nullptr; | 670 | prev_thread = nullptr; |
| 671 | } | 671 | } |
| 672 | thread->context_guard.unlock(); | 672 | thread->context_guard.Unlock(); |
| 673 | } | 673 | } |
| 674 | } | 674 | } |
| 675 | 675 | ||
| @@ -713,7 +713,7 @@ void KScheduler::ScheduleImpl() { | |||
| 713 | 713 | ||
| 714 | // If we're not actually switching thread, there's nothing to do. | 714 | // If we're not actually switching thread, there's nothing to do. |
| 715 | if (next_thread == current_thread.load()) { | 715 | if (next_thread == current_thread.load()) { |
| 716 | guard.unlock(); | 716 | guard.Unlock(); |
| 717 | return; | 717 | return; |
| 718 | } | 718 | } |
| 719 | 719 | ||
| @@ -732,7 +732,7 @@ void KScheduler::ScheduleImpl() { | |||
| 732 | } else { | 732 | } else { |
| 733 | old_context = &idle_thread->GetHostContext(); | 733 | old_context = &idle_thread->GetHostContext(); |
| 734 | } | 734 | } |
| 735 | guard.unlock(); | 735 | guard.Unlock(); |
| 736 | 736 | ||
| 737 | Common::Fiber::YieldTo(*old_context, *switch_fiber); | 737 | Common::Fiber::YieldTo(*old_context, *switch_fiber); |
| 738 | /// When a thread wakes up, the scheduler may have changed to other in another core. | 738 | /// When a thread wakes up, the scheduler may have changed to other in another core. |
| @@ -748,24 +748,24 @@ void KScheduler::OnSwitch(void* this_scheduler) { | |||
| 748 | void KScheduler::SwitchToCurrent() { | 748 | void KScheduler::SwitchToCurrent() { |
| 749 | while (true) { | 749 | while (true) { |
| 750 | { | 750 | { |
| 751 | std::scoped_lock lock{guard}; | 751 | KScopedSpinLock lk{guard}; |
| 752 | current_thread.store(state.highest_priority_thread); | 752 | current_thread.store(state.highest_priority_thread); |
| 753 | state.needs_scheduling.store(false); | 753 | state.needs_scheduling.store(false); |
| 754 | } | 754 | } |
| 755 | const auto is_switch_pending = [this] { | 755 | const auto is_switch_pending = [this] { |
| 756 | std::scoped_lock lock{guard}; | 756 | KScopedSpinLock lk{guard}; |
| 757 | return state.needs_scheduling.load(); | 757 | return state.needs_scheduling.load(); |
| 758 | }; | 758 | }; |
| 759 | do { | 759 | do { |
| 760 | auto next_thread = current_thread.load(); | 760 | auto next_thread = current_thread.load(); |
| 761 | if (next_thread != nullptr) { | 761 | if (next_thread != nullptr) { |
| 762 | next_thread->context_guard.lock(); | 762 | next_thread->context_guard.Lock(); |
| 763 | if (next_thread->GetRawState() != ThreadState::Runnable) { | 763 | if (next_thread->GetRawState() != ThreadState::Runnable) { |
| 764 | next_thread->context_guard.unlock(); | 764 | next_thread->context_guard.Unlock(); |
| 765 | break; | 765 | break; |
| 766 | } | 766 | } |
| 767 | if (next_thread->GetActiveCore() != core_id) { | 767 | if (next_thread->GetActiveCore() != core_id) { |
| 768 | next_thread->context_guard.unlock(); | 768 | next_thread->context_guard.Unlock(); |
| 769 | break; | 769 | break; |
| 770 | } | 770 | } |
| 771 | } | 771 | } |
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 70d6bfcee..8e32865aa 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h | |||
| @@ -2,19 +2,16 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | // This file references various implementation details from Atmosphere, an open-source firmware for | ||
| 6 | // the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. | ||
| 7 | |||
| 8 | #pragma once | 5 | #pragma once |
| 9 | 6 | ||
| 10 | #include <atomic> | 7 | #include <atomic> |
| 11 | 8 | ||
| 12 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 13 | #include "common/spin_lock.h" | ||
| 14 | #include "core/hle/kernel/global_scheduler_context.h" | 10 | #include "core/hle/kernel/global_scheduler_context.h" |
| 15 | #include "core/hle/kernel/k_priority_queue.h" | 11 | #include "core/hle/kernel/k_priority_queue.h" |
| 16 | #include "core/hle/kernel/k_scheduler_lock.h" | 12 | #include "core/hle/kernel/k_scheduler_lock.h" |
| 17 | #include "core/hle/kernel/k_scoped_lock.h" | 13 | #include "core/hle/kernel/k_scoped_lock.h" |
| 14 | #include "core/hle/kernel/k_spin_lock.h" | ||
| 18 | 15 | ||
| 19 | namespace Common { | 16 | namespace Common { |
| 20 | class Fiber; | 17 | class Fiber; |
| @@ -195,7 +192,7 @@ private: | |||
| 195 | u64 last_context_switch_time{}; | 192 | u64 last_context_switch_time{}; |
| 196 | const s32 core_id; | 193 | const s32 core_id; |
| 197 | 194 | ||
| 198 | Common::SpinLock guard{}; | 195 | KSpinLock guard{}; |
| 199 | }; | 196 | }; |
| 200 | 197 | ||
| 201 | class [[nodiscard]] KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> { | 198 | class [[nodiscard]] KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> { |
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index 169455d18..47e315555 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h | |||
| @@ -2,14 +2,11 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | // This file references various implementation details from Atmosphere, an open-source firmware for | ||
| 6 | // the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. | ||
| 7 | |||
| 8 | #pragma once | 5 | #pragma once |
| 9 | 6 | ||
| 10 | #include "common/assert.h" | 7 | #include "common/assert.h" |
| 11 | #include "common/spin_lock.h" | ||
| 12 | #include "core/hardware_properties.h" | 8 | #include "core/hardware_properties.h" |
| 9 | #include "core/hle/kernel/k_spin_lock.h" | ||
| 13 | #include "core/hle/kernel/k_thread.h" | 10 | #include "core/hle/kernel/k_thread.h" |
| 14 | #include "core/hle/kernel/kernel.h" | 11 | #include "core/hle/kernel/kernel.h" |
| 15 | 12 | ||
| @@ -34,7 +31,7 @@ public: | |||
| 34 | } else { | 31 | } else { |
| 35 | // Otherwise, we want to disable scheduling and acquire the spinlock. | 32 | // Otherwise, we want to disable scheduling and acquire the spinlock. |
| 36 | SchedulerType::DisableScheduling(kernel); | 33 | SchedulerType::DisableScheduling(kernel); |
| 37 | spin_lock.lock(); | 34 | spin_lock.Lock(); |
| 38 | 35 | ||
| 39 | // For debug, ensure that our state is valid. | 36 | // For debug, ensure that our state is valid. |
| 40 | ASSERT(lock_count == 0); | 37 | ASSERT(lock_count == 0); |
| @@ -58,7 +55,7 @@ public: | |||
| 58 | 55 | ||
| 59 | // Note that we no longer hold the lock, and unlock the spinlock. | 56 | // Note that we no longer hold the lock, and unlock the spinlock. |
| 60 | owner_thread = nullptr; | 57 | owner_thread = nullptr; |
| 61 | spin_lock.unlock(); | 58 | spin_lock.Unlock(); |
| 62 | 59 | ||
| 63 | // Enable scheduling, and perform a rescheduling operation. | 60 | // Enable scheduling, and perform a rescheduling operation. |
| 64 | SchedulerType::EnableScheduling(kernel, cores_needing_scheduling); | 61 | SchedulerType::EnableScheduling(kernel, cores_needing_scheduling); |
| @@ -67,7 +64,7 @@ public: | |||
| 67 | 64 | ||
| 68 | private: | 65 | private: |
| 69 | KernelCore& kernel; | 66 | KernelCore& kernel; |
| 70 | Common::SpinLock spin_lock{}; | 67 | KAlignedSpinLock spin_lock{}; |
| 71 | s32 lock_count{}; | 68 | s32 lock_count{}; |
| 72 | KThread* owner_thread{}; | 69 | KThread* owner_thread{}; |
| 73 | }; | 70 | }; |
diff --git a/src/core/hle/kernel/k_spin_lock.h b/src/core/hle/kernel/k_spin_lock.h index 12c4b2e88..4d87d006a 100644 --- a/src/core/hle/kernel/k_spin_lock.h +++ b/src/core/hle/kernel/k_spin_lock.h | |||
| @@ -28,6 +28,12 @@ private: | |||
| 28 | std::atomic_flag lck = ATOMIC_FLAG_INIT; | 28 | std::atomic_flag lck = ATOMIC_FLAG_INIT; |
| 29 | }; | 29 | }; |
| 30 | 30 | ||
| 31 | // TODO(bunnei): Alias for now, in case we want to implement these accurately in the future. | ||
| 32 | using KAlignedSpinLock = KSpinLock; | ||
| 33 | using KNotAlignedSpinLock = KSpinLock; | ||
| 34 | |||
| 31 | using KScopedSpinLock = KScopedLock<KSpinLock>; | 35 | using KScopedSpinLock = KScopedLock<KSpinLock>; |
| 36 | using KScopedAlignedSpinLock = KScopedLock<KAlignedSpinLock>; | ||
| 37 | using KScopedNotAlignedSpinLock = KScopedLock<KNotAlignedSpinLock>; | ||
| 32 | 38 | ||
| 33 | } // namespace Kernel | 39 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_system_control.cpp b/src/core/hle/kernel/k_system_control.cpp deleted file mode 100644 index aa1682f69..000000000 --- a/src/core/hle/kernel/k_system_control.cpp +++ /dev/null | |||
| @@ -1,42 +0,0 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <random> | ||
| 6 | |||
| 7 | #include "core/hle/kernel/k_system_control.h" | ||
| 8 | |||
| 9 | namespace Kernel { | ||
| 10 | |||
| 11 | namespace { | ||
| 12 | template <typename F> | ||
| 13 | u64 GenerateUniformRange(u64 min, u64 max, F f) { | ||
| 14 | // Handle the case where the difference is too large to represent. | ||
| 15 | if (max == std::numeric_limits<u64>::max() && min == std::numeric_limits<u64>::min()) { | ||
| 16 | return f(); | ||
| 17 | } | ||
| 18 | |||
| 19 | // Iterate until we get a value in range. | ||
| 20 | const u64 range_size = ((max + 1) - min); | ||
| 21 | const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size; | ||
| 22 | while (true) { | ||
| 23 | if (const u64 rnd = f(); rnd < effective_max) { | ||
| 24 | return min + (rnd % range_size); | ||
| 25 | } | ||
| 26 | } | ||
| 27 | } | ||
| 28 | |||
| 29 | } // Anonymous namespace | ||
| 30 | |||
| 31 | u64 KSystemControl::GenerateRandomU64() { | ||
| 32 | static std::random_device device; | ||
| 33 | static std::mt19937 gen(device()); | ||
| 34 | static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max()); | ||
| 35 | return distribution(gen); | ||
| 36 | } | ||
| 37 | |||
| 38 | u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) { | ||
| 39 | return GenerateUniformRange(min, max, GenerateRandomU64); | ||
| 40 | } | ||
| 41 | |||
| 42 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_system_control.h b/src/core/hle/kernel/k_system_control.h index 1d5b64ffa..d755082c2 100644 --- a/src/core/hle/kernel/k_system_control.h +++ b/src/core/hle/kernel/k_system_control.h | |||
| @@ -6,14 +6,18 @@ | |||
| 6 | 6 | ||
| 7 | #include "common/common_types.h" | 7 | #include "common/common_types.h" |
| 8 | 8 | ||
| 9 | namespace Kernel { | 9 | #define BOARD_NINTENDO_NX |
| 10 | |||
| 11 | #ifdef BOARD_NINTENDO_NX | ||
| 10 | 12 | ||
| 11 | class KSystemControl { | 13 | #include "core/hle/kernel/board/nintendo/nx/k_system_control.h" |
| 12 | public: | ||
| 13 | KSystemControl() = default; | ||
| 14 | 14 | ||
| 15 | static u64 GenerateRandomRange(u64 min, u64 max); | 15 | namespace Kernel { |
| 16 | static u64 GenerateRandomU64(); | 16 | |
| 17 | }; | 17 | using Kernel::Board::Nintendo::Nx::KSystemControl; |
| 18 | 18 | ||
| 19 | } // namespace Kernel | 19 | } // namespace Kernel |
| 20 | |||
| 21 | #else | ||
| 22 | #error "Unknown board for KSystemControl" | ||
| 23 | #endif | ||
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index 1c19b23dc..1c86fdd20 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h | |||
| @@ -14,10 +14,10 @@ | |||
| 14 | 14 | ||
| 15 | #include "common/common_types.h" | 15 | #include "common/common_types.h" |
| 16 | #include "common/intrusive_red_black_tree.h" | 16 | #include "common/intrusive_red_black_tree.h" |
| 17 | #include "common/spin_lock.h" | ||
| 18 | #include "core/arm/arm_interface.h" | 17 | #include "core/arm/arm_interface.h" |
| 19 | #include "core/hle/kernel/k_affinity_mask.h" | 18 | #include "core/hle/kernel/k_affinity_mask.h" |
| 20 | #include "core/hle/kernel/k_light_lock.h" | 19 | #include "core/hle/kernel/k_light_lock.h" |
| 20 | #include "core/hle/kernel/k_spin_lock.h" | ||
| 21 | #include "core/hle/kernel/k_synchronization_object.h" | 21 | #include "core/hle/kernel/k_synchronization_object.h" |
| 22 | #include "core/hle/kernel/object.h" | 22 | #include "core/hle/kernel/object.h" |
| 23 | #include "core/hle/kernel/svc_common.h" | 23 | #include "core/hle/kernel/svc_common.h" |
| @@ -732,7 +732,7 @@ private: | |||
| 732 | s8 priority_inheritance_count{}; | 732 | s8 priority_inheritance_count{}; |
| 733 | bool resource_limit_release_hint{}; | 733 | bool resource_limit_release_hint{}; |
| 734 | StackParameters stack_parameters{}; | 734 | StackParameters stack_parameters{}; |
| 735 | Common::SpinLock context_guard{}; | 735 | KSpinLock context_guard{}; |
| 736 | 736 | ||
| 737 | // For emulation | 737 | // For emulation |
| 738 | std::shared_ptr<Common::Fiber> host_context{}; | 738 | std::shared_ptr<Common::Fiber> host_context{}; |
diff --git a/src/core/hle/kernel/k_trace.h b/src/core/hle/kernel/k_trace.h new file mode 100644 index 000000000..91ebf9ab2 --- /dev/null +++ b/src/core/hle/kernel/k_trace.h | |||
| @@ -0,0 +1,12 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | namespace Kernel { | ||
| 8 | |||
| 9 | constexpr bool IsKTraceEnabled = false; | ||
| 10 | constexpr std::size_t KTraceBufferSize = IsKTraceEnabled ? 16 * 1024 * 1024 : 0; | ||
| 11 | |||
| 12 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index a1520e147..8fd990577 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | 1 | // Copyright 2021 yuzu Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <utility> | 12 | #include <utility> |
| 13 | 13 | ||
| 14 | #include "common/assert.h" | 14 | #include "common/assert.h" |
| 15 | #include "common/common_sizes.h" | ||
| 15 | #include "common/logging/log.h" | 16 | #include "common/logging/log.h" |
| 16 | #include "common/microprofile.h" | 17 | #include "common/microprofile.h" |
| 17 | #include "common/thread.h" | 18 | #include "common/thread.h" |
| @@ -268,45 +269,314 @@ struct KernelCore::Impl { | |||
| 268 | return schedulers[thread_id]->GetCurrentThread(); | 269 | return schedulers[thread_id]->GetCurrentThread(); |
| 269 | } | 270 | } |
| 270 | 271 | ||
| 272 | void DeriveInitialMemoryLayout(KMemoryLayout& memory_layout) { | ||
| 273 | // Insert the root region for the virtual memory tree, from which all other regions will | ||
| 274 | // derive. | ||
| 275 | memory_layout.GetVirtualMemoryRegionTree().InsertDirectly( | ||
| 276 | KernelVirtualAddressSpaceBase, | ||
| 277 | KernelVirtualAddressSpaceBase + KernelVirtualAddressSpaceSize - 1); | ||
| 278 | |||
| 279 | // Insert the root region for the physical memory tree, from which all other regions will | ||
| 280 | // derive. | ||
| 281 | memory_layout.GetPhysicalMemoryRegionTree().InsertDirectly( | ||
| 282 | KernelPhysicalAddressSpaceBase, | ||
| 283 | KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1); | ||
| 284 | |||
| 285 | // Save start and end for ease of use. | ||
| 286 | const VAddr code_start_virt_addr = KernelVirtualAddressCodeBase; | ||
| 287 | const VAddr code_end_virt_addr = KernelVirtualAddressCodeEnd; | ||
| 288 | |||
| 289 | // Setup the containing kernel region. | ||
| 290 | constexpr size_t KernelRegionSize = Common::Size_1_GB; | ||
| 291 | constexpr size_t KernelRegionAlign = Common::Size_1_GB; | ||
| 292 | constexpr VAddr kernel_region_start = | ||
| 293 | Common::AlignDown(code_start_virt_addr, KernelRegionAlign); | ||
| 294 | size_t kernel_region_size = KernelRegionSize; | ||
| 295 | if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) { | ||
| 296 | kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start; | ||
| 297 | } | ||
| 298 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | ||
| 299 | kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel)); | ||
| 300 | |||
| 301 | // Setup the code region. | ||
| 302 | constexpr size_t CodeRegionAlign = PageSize; | ||
| 303 | constexpr VAddr code_region_start = | ||
| 304 | Common::AlignDown(code_start_virt_addr, CodeRegionAlign); | ||
| 305 | constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign); | ||
| 306 | constexpr size_t code_region_size = code_region_end - code_region_start; | ||
| 307 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | ||
| 308 | code_region_start, code_region_size, KMemoryRegionType_KernelCode)); | ||
| 309 | |||
| 310 | // Setup board-specific device physical regions. | ||
| 311 | Init::SetupDevicePhysicalMemoryRegions(memory_layout); | ||
| 312 | |||
| 313 | // Determine the amount of space needed for the misc region. | ||
| 314 | size_t misc_region_needed_size; | ||
| 315 | { | ||
| 316 | // Each core has a one page stack for all three stack types (Main, Idle, Exception). | ||
| 317 | misc_region_needed_size = Core::Hardware::NUM_CPU_CORES * (3 * (PageSize + PageSize)); | ||
| 318 | |||
| 319 | // Account for each auto-map device. | ||
| 320 | for (const auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { | ||
| 321 | if (region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) { | ||
| 322 | // Check that the region is valid. | ||
| 323 | ASSERT(region.GetEndAddress() != 0); | ||
| 324 | |||
| 325 | // Account for the region. | ||
| 326 | misc_region_needed_size += | ||
| 327 | PageSize + (Common::AlignUp(region.GetLastAddress(), PageSize) - | ||
| 328 | Common::AlignDown(region.GetAddress(), PageSize)); | ||
| 329 | } | ||
| 330 | } | ||
| 331 | |||
| 332 | // Multiply the needed size by three, to account for the need for guard space. | ||
| 333 | misc_region_needed_size *= 3; | ||
| 334 | } | ||
| 335 | |||
| 336 | // Decide on the actual size for the misc region. | ||
| 337 | constexpr size_t MiscRegionAlign = KernelAslrAlignment; | ||
| 338 | constexpr size_t MiscRegionMinimumSize = Common::Size_32_MB; | ||
| 339 | const size_t misc_region_size = Common::AlignUp( | ||
| 340 | std::max(misc_region_needed_size, MiscRegionMinimumSize), MiscRegionAlign); | ||
| 341 | ASSERT(misc_region_size > 0); | ||
| 342 | |||
| 343 | // Setup the misc region. | ||
| 344 | const VAddr misc_region_start = | ||
| 345 | memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( | ||
| 346 | misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel); | ||
| 347 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | ||
| 348 | misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc)); | ||
| 349 | |||
| 350 | // Setup the stack region. | ||
| 351 | constexpr size_t StackRegionSize = Common::Size_14_MB; | ||
| 352 | constexpr size_t StackRegionAlign = KernelAslrAlignment; | ||
| 353 | const VAddr stack_region_start = | ||
| 354 | memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( | ||
| 355 | StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel); | ||
| 356 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | ||
| 357 | stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack)); | ||
| 358 | |||
| 359 | // Determine the size of the resource region. | ||
| 360 | const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit(); | ||
| 361 | |||
| 362 | // Determine the size of the slab region. | ||
| 363 | const size_t slab_region_size = Common::AlignUp(KernelSlabHeapSize, PageSize); | ||
| 364 | ASSERT(slab_region_size <= resource_region_size); | ||
| 365 | |||
| 366 | // Setup the slab region. | ||
| 367 | const PAddr code_start_phys_addr = KernelPhysicalAddressCodeBase; | ||
| 368 | const PAddr code_end_phys_addr = code_start_phys_addr + code_region_size; | ||
| 369 | const PAddr slab_start_phys_addr = code_end_phys_addr; | ||
| 370 | const PAddr slab_end_phys_addr = slab_start_phys_addr + slab_region_size; | ||
| 371 | constexpr size_t SlabRegionAlign = KernelAslrAlignment; | ||
| 372 | const size_t slab_region_needed_size = | ||
| 373 | Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) - | ||
| 374 | Common::AlignDown(code_end_phys_addr, SlabRegionAlign); | ||
| 375 | const VAddr slab_region_start = | ||
| 376 | memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( | ||
| 377 | slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) + | ||
| 378 | (code_end_phys_addr % SlabRegionAlign); | ||
| 379 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | ||
| 380 | slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab)); | ||
| 381 | |||
| 382 | // Setup the temp region. | ||
| 383 | constexpr size_t TempRegionSize = Common::Size_128_MB; | ||
| 384 | constexpr size_t TempRegionAlign = KernelAslrAlignment; | ||
| 385 | const VAddr temp_region_start = | ||
| 386 | memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( | ||
| 387 | TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel); | ||
| 388 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize, | ||
| 389 | KMemoryRegionType_KernelTemp)); | ||
| 390 | |||
| 391 | // Automatically map in devices that have auto-map attributes. | ||
| 392 | for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { | ||
| 393 | // We only care about kernel regions. | ||
| 394 | if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) { | ||
| 395 | continue; | ||
| 396 | } | ||
| 397 | |||
| 398 | // Check whether we should map the region. | ||
| 399 | if (!region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) { | ||
| 400 | continue; | ||
| 401 | } | ||
| 402 | |||
| 403 | // If this region has already been mapped, no need to consider it. | ||
| 404 | if (region.HasTypeAttribute(KMemoryRegionAttr_DidKernelMap)) { | ||
| 405 | continue; | ||
| 406 | } | ||
| 407 | |||
| 408 | // Check that the region is valid. | ||
| 409 | ASSERT(region.GetEndAddress() != 0); | ||
| 410 | |||
| 411 | // Set the attribute to note we've mapped this region. | ||
| 412 | region.SetTypeAttribute(KMemoryRegionAttr_DidKernelMap); | ||
| 413 | |||
| 414 | // Create a virtual pair region and insert it into the tree. | ||
| 415 | const PAddr map_phys_addr = Common::AlignDown(region.GetAddress(), PageSize); | ||
| 416 | const size_t map_size = | ||
| 417 | Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr; | ||
| 418 | const VAddr map_virt_addr = | ||
| 419 | memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( | ||
| 420 | map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize); | ||
| 421 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | ||
| 422 | map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice)); | ||
| 423 | region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr); | ||
| 424 | } | ||
| 425 | |||
| 426 | Init::SetupDramPhysicalMemoryRegions(memory_layout); | ||
| 427 | |||
| 428 | // Insert a physical region for the kernel code region. | ||
| 429 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 430 | code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode)); | ||
| 431 | |||
| 432 | // Insert a physical region for the kernel slab region. | ||
| 433 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 434 | slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab)); | ||
| 435 | |||
| 436 | // Determine size available for kernel page table heaps, requiring > 8 MB. | ||
| 437 | const PAddr resource_end_phys_addr = slab_start_phys_addr + resource_region_size; | ||
| 438 | const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr; | ||
| 439 | ASSERT(page_table_heap_size / Common::Size_4_MB > 2); | ||
| 440 | |||
| 441 | // Insert a physical region for the kernel page table heap region | ||
| 442 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 443 | slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap)); | ||
| 444 | |||
| 445 | // All DRAM regions that we haven't tagged by this point will be mapped under the linear | ||
| 446 | // mapping. Tag them. | ||
| 447 | for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { | ||
| 448 | if (region.GetType() == KMemoryRegionType_Dram) { | ||
| 449 | // Check that the region is valid. | ||
| 450 | ASSERT(region.GetEndAddress() != 0); | ||
| 451 | |||
| 452 | // Set the linear map attribute. | ||
| 453 | region.SetTypeAttribute(KMemoryRegionAttr_LinearMapped); | ||
| 454 | } | ||
| 455 | } | ||
| 456 | |||
| 457 | // Get the linear region extents. | ||
| 458 | const auto linear_extents = | ||
| 459 | memory_layout.GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 460 | KMemoryRegionAttr_LinearMapped); | ||
| 461 | ASSERT(linear_extents.GetEndAddress() != 0); | ||
| 462 | |||
| 463 | // Setup the linear mapping region. | ||
| 464 | constexpr size_t LinearRegionAlign = Common::Size_1_GB; | ||
| 465 | const PAddr aligned_linear_phys_start = | ||
| 466 | Common::AlignDown(linear_extents.GetAddress(), LinearRegionAlign); | ||
| 467 | const size_t linear_region_size = | ||
| 468 | Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) - | ||
| 469 | aligned_linear_phys_start; | ||
| 470 | const VAddr linear_region_start = | ||
| 471 | memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( | ||
| 472 | linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign); | ||
| 473 | |||
| 474 | const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start; | ||
| 475 | |||
| 476 | // Map and create regions for all the linearly-mapped data. | ||
| 477 | { | ||
| 478 | PAddr cur_phys_addr = 0; | ||
| 479 | u64 cur_size = 0; | ||
| 480 | for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { | ||
| 481 | if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) { | ||
| 482 | continue; | ||
| 483 | } | ||
| 484 | |||
| 485 | ASSERT(region.GetEndAddress() != 0); | ||
| 486 | |||
| 487 | if (cur_size == 0) { | ||
| 488 | cur_phys_addr = region.GetAddress(); | ||
| 489 | cur_size = region.GetSize(); | ||
| 490 | } else if (cur_phys_addr + cur_size == region.GetAddress()) { | ||
| 491 | cur_size += region.GetSize(); | ||
| 492 | } else { | ||
| 493 | cur_phys_addr = region.GetAddress(); | ||
| 494 | cur_size = region.GetSize(); | ||
| 495 | } | ||
| 496 | |||
| 497 | const VAddr region_virt_addr = | ||
| 498 | region.GetAddress() + linear_region_phys_to_virt_diff; | ||
| 499 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | ||
| 500 | region_virt_addr, region.GetSize(), | ||
| 501 | GetTypeForVirtualLinearMapping(region.GetType()))); | ||
| 502 | region.SetPairAddress(region_virt_addr); | ||
| 503 | |||
| 504 | KMemoryRegion* virt_region = | ||
| 505 | memory_layout.GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr); | ||
| 506 | ASSERT(virt_region != nullptr); | ||
| 507 | virt_region->SetPairAddress(region.GetAddress()); | ||
| 508 | } | ||
| 509 | } | ||
| 510 | |||
| 511 | // Insert regions for the initial page table region. | ||
| 512 | ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( | ||
| 513 | resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt)); | ||
| 514 | ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( | ||
| 515 | resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize, | ||
| 516 | KMemoryRegionType_VirtualDramKernelInitPt)); | ||
| 517 | |||
| 518 | // All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to | ||
| 519 | // some pool partition. Tag them. | ||
| 520 | for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { | ||
| 521 | if (region.GetType() == (KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped)) { | ||
| 522 | region.SetType(KMemoryRegionType_DramPoolPartition); | ||
| 523 | } | ||
| 524 | } | ||
| 525 | |||
| 526 | // Setup all other memory regions needed to arrange the pool partitions. | ||
| 527 | Init::SetupPoolPartitionMemoryRegions(memory_layout); | ||
| 528 | |||
| 529 | // Cache all linear regions in their own trees for faster access, later. | ||
| 530 | memory_layout.InitializeLinearMemoryRegionTrees(aligned_linear_phys_start, | ||
| 531 | linear_region_start); | ||
| 532 | } | ||
| 533 | |||
| 271 | void InitializeMemoryLayout() { | 534 | void InitializeMemoryLayout() { |
| 272 | // Initialize memory layout | 535 | // Derive the initial memory layout from the emulated board |
| 273 | constexpr KMemoryLayout layout{KMemoryLayout::GetDefaultLayout()}; | 536 | KMemoryLayout memory_layout; |
| 537 | DeriveInitialMemoryLayout(memory_layout); | ||
| 538 | |||
| 539 | const auto system_pool = memory_layout.GetKernelSystemPoolRegionPhysicalExtents(); | ||
| 540 | const auto applet_pool = memory_layout.GetKernelAppletPoolRegionPhysicalExtents(); | ||
| 541 | const auto application_pool = memory_layout.GetKernelApplicationPoolRegionPhysicalExtents(); | ||
| 542 | |||
| 543 | // Initialize memory managers | ||
| 544 | memory_manager = std::make_unique<KMemoryManager>(); | ||
| 545 | memory_manager->InitializeManager(KMemoryManager::Pool::Application, | ||
| 546 | application_pool.GetAddress(), | ||
| 547 | application_pool.GetEndAddress()); | ||
| 548 | memory_manager->InitializeManager(KMemoryManager::Pool::Applet, applet_pool.GetAddress(), | ||
| 549 | applet_pool.GetEndAddress()); | ||
| 550 | memory_manager->InitializeManager(KMemoryManager::Pool::System, system_pool.GetAddress(), | ||
| 551 | system_pool.GetEndAddress()); | ||
| 552 | |||
| 553 | // Setup memory regions for emulated processes | ||
| 554 | // TODO(bunnei): These should not be hardcoded regions initialized within the kernel | ||
| 274 | constexpr std::size_t hid_size{0x40000}; | 555 | constexpr std::size_t hid_size{0x40000}; |
| 275 | constexpr std::size_t font_size{0x1100000}; | 556 | constexpr std::size_t font_size{0x1100000}; |
| 276 | constexpr std::size_t irs_size{0x8000}; | 557 | constexpr std::size_t irs_size{0x8000}; |
| 277 | constexpr std::size_t time_size{0x1000}; | 558 | constexpr std::size_t time_size{0x1000}; |
| 278 | constexpr PAddr hid_addr{layout.System().StartAddress()}; | ||
| 279 | constexpr PAddr font_pa{layout.System().StartAddress() + hid_size}; | ||
| 280 | constexpr PAddr irs_addr{layout.System().StartAddress() + hid_size + font_size}; | ||
| 281 | constexpr PAddr time_addr{layout.System().StartAddress() + hid_size + font_size + irs_size}; | ||
| 282 | 559 | ||
| 283 | // Initialize memory manager | 560 | const PAddr hid_phys_addr{system_pool.GetAddress()}; |
| 284 | memory_manager = std::make_unique<KMemoryManager>(); | 561 | const PAddr font_phys_addr{system_pool.GetAddress() + hid_size}; |
| 285 | memory_manager->InitializeManager(KMemoryManager::Pool::Application, | 562 | const PAddr irs_phys_addr{system_pool.GetAddress() + hid_size + font_size}; |
| 286 | layout.Application().StartAddress(), | 563 | const PAddr time_phys_addr{system_pool.GetAddress() + hid_size + font_size + irs_size}; |
| 287 | layout.Application().EndAddress()); | ||
| 288 | memory_manager->InitializeManager(KMemoryManager::Pool::Applet, | ||
| 289 | layout.Applet().StartAddress(), | ||
| 290 | layout.Applet().EndAddress()); | ||
| 291 | memory_manager->InitializeManager(KMemoryManager::Pool::System, | ||
| 292 | layout.System().StartAddress(), | ||
| 293 | layout.System().EndAddress()); | ||
| 294 | 564 | ||
| 295 | hid_shared_mem = Kernel::KSharedMemory::Create( | 565 | hid_shared_mem = Kernel::KSharedMemory::Create( |
| 296 | system.Kernel(), system.DeviceMemory(), nullptr, {hid_addr, hid_size / PageSize}, | 566 | system.Kernel(), system.DeviceMemory(), nullptr, {hid_phys_addr, hid_size / PageSize}, |
| 297 | KMemoryPermission::None, KMemoryPermission::Read, hid_addr, hid_size, | 567 | KMemoryPermission::None, KMemoryPermission::Read, hid_phys_addr, hid_size, |
| 298 | "HID:SharedMemory"); | 568 | "HID:SharedMemory"); |
| 299 | font_shared_mem = Kernel::KSharedMemory::Create( | 569 | font_shared_mem = Kernel::KSharedMemory::Create( |
| 300 | system.Kernel(), system.DeviceMemory(), nullptr, {font_pa, font_size / PageSize}, | 570 | system.Kernel(), system.DeviceMemory(), nullptr, {font_phys_addr, font_size / PageSize}, |
| 301 | KMemoryPermission::None, KMemoryPermission::Read, font_pa, font_size, | 571 | KMemoryPermission::None, KMemoryPermission::Read, font_phys_addr, font_size, |
| 302 | "Font:SharedMemory"); | 572 | "Font:SharedMemory"); |
| 303 | irs_shared_mem = Kernel::KSharedMemory::Create( | 573 | irs_shared_mem = Kernel::KSharedMemory::Create( |
| 304 | system.Kernel(), system.DeviceMemory(), nullptr, {irs_addr, irs_size / PageSize}, | 574 | system.Kernel(), system.DeviceMemory(), nullptr, {irs_phys_addr, irs_size / PageSize}, |
| 305 | KMemoryPermission::None, KMemoryPermission::Read, irs_addr, irs_size, | 575 | KMemoryPermission::None, KMemoryPermission::Read, irs_phys_addr, irs_size, |
| 306 | "IRS:SharedMemory"); | 576 | "IRS:SharedMemory"); |
| 307 | time_shared_mem = Kernel::KSharedMemory::Create( | 577 | time_shared_mem = Kernel::KSharedMemory::Create( |
| 308 | system.Kernel(), system.DeviceMemory(), nullptr, {time_addr, time_size / PageSize}, | 578 | system.Kernel(), system.DeviceMemory(), nullptr, {time_phys_addr, time_size / PageSize}, |
| 309 | KMemoryPermission::None, KMemoryPermission::Read, time_addr, time_size, | 579 | KMemoryPermission::None, KMemoryPermission::Read, time_phys_addr, time_size, |
| 310 | "Time:SharedMemory"); | 580 | "Time:SharedMemory"); |
| 311 | 581 | ||
| 312 | // Allocate slab heaps | 582 | // Allocate slab heaps |
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 56906f2da..a500e63bc 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project / PPSSPP Project | 1 | // Copyright 2021 yuzu Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||