diff options
Diffstat (limited to 'src')
57 files changed, 3463 insertions, 964 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 113e663b5..f6e082c36 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -190,11 +190,13 @@ add_library(core STATIC | |||
| 190 | hle/kernel/k_code_memory.h | 190 | hle/kernel/k_code_memory.h |
| 191 | hle/kernel/k_condition_variable.cpp | 191 | hle/kernel/k_condition_variable.cpp |
| 192 | hle/kernel/k_condition_variable.h | 192 | hle/kernel/k_condition_variable.h |
| 193 | hle/kernel/k_debug.h | ||
| 193 | hle/kernel/k_dynamic_page_manager.h | 194 | hle/kernel/k_dynamic_page_manager.h |
| 194 | hle/kernel/k_dynamic_resource_manager.h | 195 | hle/kernel/k_dynamic_resource_manager.h |
| 195 | hle/kernel/k_dynamic_slab_heap.h | 196 | hle/kernel/k_dynamic_slab_heap.h |
| 196 | hle/kernel/k_event.cpp | 197 | hle/kernel/k_event.cpp |
| 197 | hle/kernel/k_event.h | 198 | hle/kernel/k_event.h |
| 199 | hle/kernel/k_event_info.h | ||
| 198 | hle/kernel/k_handle_table.cpp | 200 | hle/kernel/k_handle_table.cpp |
| 199 | hle/kernel/k_handle_table.h | 201 | hle/kernel/k_handle_table.h |
| 200 | hle/kernel/k_interrupt_manager.cpp | 202 | hle/kernel/k_interrupt_manager.cpp |
| @@ -222,6 +224,8 @@ add_library(core STATIC | |||
| 222 | hle/kernel/k_page_group.h | 224 | hle/kernel/k_page_group.h |
| 223 | hle/kernel/k_page_table.cpp | 225 | hle/kernel/k_page_table.cpp |
| 224 | hle/kernel/k_page_table.h | 226 | hle/kernel/k_page_table.h |
| 227 | hle/kernel/k_page_table_manager.h | ||
| 228 | hle/kernel/k_page_table_slab_heap.h | ||
| 225 | hle/kernel/k_port.cpp | 229 | hle/kernel/k_port.cpp |
| 226 | hle/kernel/k_port.h | 230 | hle/kernel/k_port.h |
| 227 | hle/kernel/k_priority_queue.h | 231 | hle/kernel/k_priority_queue.h |
| @@ -254,6 +258,8 @@ add_library(core STATIC | |||
| 254 | hle/kernel/k_synchronization_object.cpp | 258 | hle/kernel/k_synchronization_object.cpp |
| 255 | hle/kernel/k_synchronization_object.h | 259 | hle/kernel/k_synchronization_object.h |
| 256 | hle/kernel/k_system_control.h | 260 | hle/kernel/k_system_control.h |
| 261 | hle/kernel/k_system_resource.cpp | ||
| 262 | hle/kernel/k_system_resource.h | ||
| 257 | hle/kernel/k_thread.cpp | 263 | hle/kernel/k_thread.cpp |
| 258 | hle/kernel/k_thread.h | 264 | hle/kernel/k_thread.h |
| 259 | hle/kernel/k_thread_local_page.cpp | 265 | hle/kernel/k_thread_local_page.cpp |
diff --git a/src/core/hle/ipc_helpers.h b/src/core/hle/ipc_helpers.h index 3bb111748..a86bec252 100644 --- a/src/core/hle/ipc_helpers.h +++ b/src/core/hle/ipc_helpers.h | |||
| @@ -149,7 +149,7 @@ public: | |||
| 149 | context->AddDomainObject(std::move(iface)); | 149 | context->AddDomainObject(std::move(iface)); |
| 150 | } else { | 150 | } else { |
| 151 | kernel.CurrentProcess()->GetResourceLimit()->Reserve( | 151 | kernel.CurrentProcess()->GetResourceLimit()->Reserve( |
| 152 | Kernel::LimitableResource::Sessions, 1); | 152 | Kernel::LimitableResource::SessionCountMax, 1); |
| 153 | 153 | ||
| 154 | auto* session = Kernel::KSession::Create(kernel); | 154 | auto* session = Kernel::KSession::Create(kernel); |
| 155 | session->Initialize(nullptr, iface->GetServiceName()); | 155 | session->Initialize(nullptr, iface->GetServiceName()); |
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h index fe375769e..4b717d091 100644 --- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h | |||
| @@ -9,6 +9,10 @@ namespace Kernel::Board::Nintendo::Nx { | |||
| 9 | 9 | ||
| 10 | class KSystemControl { | 10 | class KSystemControl { |
| 11 | public: | 11 | public: |
| 12 | // This can be overridden as needed. | ||
| 13 | static constexpr size_t SecureAppletMemorySize = 4 * 1024 * 1024; // 4_MB | ||
| 14 | |||
| 15 | public: | ||
| 12 | class Init { | 16 | class Init { |
| 13 | public: | 17 | public: |
| 14 | // Initialization. | 18 | // Initialization. |
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index fd354d484..06010b8d1 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp | |||
| @@ -27,16 +27,12 @@ namespace Kernel { | |||
| 27 | 27 | ||
| 28 | SessionRequestHandler::SessionRequestHandler(KernelCore& kernel_, const char* service_name_, | 28 | SessionRequestHandler::SessionRequestHandler(KernelCore& kernel_, const char* service_name_, |
| 29 | ServiceThreadType thread_type) | 29 | ServiceThreadType thread_type) |
| 30 | : kernel{kernel_} { | 30 | : kernel{kernel_}, service_thread{thread_type == ServiceThreadType::CreateNew |
| 31 | if (thread_type == ServiceThreadType::CreateNew) { | 31 | ? kernel.CreateServiceThread(service_name_) |
| 32 | service_thread = kernel.CreateServiceThread(service_name_); | 32 | : kernel.GetDefaultServiceThread()} {} |
| 33 | } else { | ||
| 34 | service_thread = kernel.GetDefaultServiceThread(); | ||
| 35 | } | ||
| 36 | } | ||
| 37 | 33 | ||
| 38 | SessionRequestHandler::~SessionRequestHandler() { | 34 | SessionRequestHandler::~SessionRequestHandler() { |
| 39 | kernel.ReleaseServiceThread(service_thread.lock()); | 35 | kernel.ReleaseServiceThread(service_thread); |
| 40 | } | 36 | } |
| 41 | 37 | ||
| 42 | void SessionRequestHandler::AcceptSession(KServerPort* server_port) { | 38 | void SessionRequestHandler::AcceptSession(KServerPort* server_port) { |
| @@ -49,7 +45,7 @@ void SessionRequestHandler::AcceptSession(KServerPort* server_port) { | |||
| 49 | void SessionRequestHandler::RegisterSession(KServerSession* server_session, | 45 | void SessionRequestHandler::RegisterSession(KServerSession* server_session, |
| 50 | std::shared_ptr<SessionRequestManager> manager) { | 46 | std::shared_ptr<SessionRequestManager> manager) { |
| 51 | manager->SetSessionHandler(shared_from_this()); | 47 | manager->SetSessionHandler(shared_from_this()); |
| 52 | service_thread.lock()->RegisterServerSession(server_session, manager); | 48 | service_thread.RegisterServerSession(server_session, manager); |
| 53 | server_session->Close(); | 49 | server_session->Close(); |
| 54 | } | 50 | } |
| 55 | 51 | ||
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index 67da8e7e1..d87be72d6 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h | |||
| @@ -82,13 +82,13 @@ public: | |||
| 82 | void RegisterSession(KServerSession* server_session, | 82 | void RegisterSession(KServerSession* server_session, |
| 83 | std::shared_ptr<SessionRequestManager> manager); | 83 | std::shared_ptr<SessionRequestManager> manager); |
| 84 | 84 | ||
| 85 | std::weak_ptr<ServiceThread> GetServiceThread() const { | 85 | ServiceThread& GetServiceThread() const { |
| 86 | return service_thread; | 86 | return service_thread; |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | protected: | 89 | protected: |
| 90 | KernelCore& kernel; | 90 | KernelCore& kernel; |
| 91 | std::weak_ptr<ServiceThread> service_thread; | 91 | ServiceThread& service_thread; |
| 92 | }; | 92 | }; |
| 93 | 93 | ||
| 94 | using SessionRequestHandlerWeakPtr = std::weak_ptr<SessionRequestHandler>; | 94 | using SessionRequestHandlerWeakPtr = std::weak_ptr<SessionRequestHandler>; |
| @@ -154,7 +154,7 @@ public: | |||
| 154 | session_handler = std::move(handler); | 154 | session_handler = std::move(handler); |
| 155 | } | 155 | } |
| 156 | 156 | ||
| 157 | std::weak_ptr<ServiceThread> GetServiceThread() const { | 157 | ServiceThread& GetServiceThread() const { |
| 158 | return session_handler->GetServiceThread(); | 158 | return session_handler->GetServiceThread(); |
| 159 | } | 159 | } |
| 160 | 160 | ||
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index 477e4e407..bda098511 100644 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp | |||
| @@ -10,7 +10,9 @@ | |||
| 10 | #include "core/hardware_properties.h" | 10 | #include "core/hardware_properties.h" |
| 11 | #include "core/hle/kernel/init/init_slab_setup.h" | 11 | #include "core/hle/kernel/init/init_slab_setup.h" |
| 12 | #include "core/hle/kernel/k_code_memory.h" | 12 | #include "core/hle/kernel/k_code_memory.h" |
| 13 | #include "core/hle/kernel/k_debug.h" | ||
| 13 | #include "core/hle/kernel/k_event.h" | 14 | #include "core/hle/kernel/k_event.h" |
| 15 | #include "core/hle/kernel/k_event_info.h" | ||
| 14 | #include "core/hle/kernel/k_memory_layout.h" | 16 | #include "core/hle/kernel/k_memory_layout.h" |
| 15 | #include "core/hle/kernel/k_memory_manager.h" | 17 | #include "core/hle/kernel/k_memory_manager.h" |
| 16 | #include "core/hle/kernel/k_page_buffer.h" | 18 | #include "core/hle/kernel/k_page_buffer.h" |
| @@ -22,6 +24,7 @@ | |||
| 22 | #include "core/hle/kernel/k_shared_memory.h" | 24 | #include "core/hle/kernel/k_shared_memory.h" |
| 23 | #include "core/hle/kernel/k_shared_memory_info.h" | 25 | #include "core/hle/kernel/k_shared_memory_info.h" |
| 24 | #include "core/hle/kernel/k_system_control.h" | 26 | #include "core/hle/kernel/k_system_control.h" |
| 27 | #include "core/hle/kernel/k_system_resource.h" | ||
| 25 | #include "core/hle/kernel/k_thread.h" | 28 | #include "core/hle/kernel/k_thread.h" |
| 26 | #include "core/hle/kernel/k_thread_local_page.h" | 29 | #include "core/hle/kernel/k_thread_local_page.h" |
| 27 | #include "core/hle/kernel/k_transfer_memory.h" | 30 | #include "core/hle/kernel/k_transfer_memory.h" |
| @@ -44,7 +47,10 @@ namespace Kernel::Init { | |||
| 44 | HANDLER(KThreadLocalPage, \ | 47 | HANDLER(KThreadLocalPage, \ |
| 45 | (SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \ | 48 | (SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \ |
| 46 | ##__VA_ARGS__) \ | 49 | ##__VA_ARGS__) \ |
| 47 | HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) | 50 | HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) \ |
| 51 | HANDLER(KEventInfo, (SLAB_COUNT(KThread) + SLAB_COUNT(KDebug)), ##__VA_ARGS__) \ | ||
| 52 | HANDLER(KDebug, (SLAB_COUNT(KDebug)), ##__VA_ARGS__) \ | ||
| 53 | HANDLER(KSecureSystemResource, (SLAB_COUNT(KProcess)), ##__VA_ARGS__) | ||
| 48 | 54 | ||
| 49 | namespace { | 55 | namespace { |
| 50 | 56 | ||
| @@ -73,8 +79,20 @@ constexpr size_t SlabCountKResourceLimit = 5; | |||
| 73 | constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES; | 79 | constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES; |
| 74 | constexpr size_t SlabCountKIoPool = 1; | 80 | constexpr size_t SlabCountKIoPool = 1; |
| 75 | constexpr size_t SlabCountKIoRegion = 6; | 81 | constexpr size_t SlabCountKIoRegion = 6; |
| 82 | constexpr size_t SlabcountKSessionRequestMappings = 40; | ||
| 76 | 83 | ||
| 77 | constexpr size_t SlabCountExtraKThread = 160; | 84 | constexpr size_t SlabCountExtraKThread = (1024 + 256 + 256) - SlabCountKThread; |
| 85 | |||
| 86 | namespace test { | ||
| 87 | |||
| 88 | static_assert(KernelPageBufferHeapSize == | ||
| 89 | 2 * PageSize + (SlabCountKProcess + SlabCountKThread + | ||
| 90 | (SlabCountKProcess + SlabCountKThread) / 8) * | ||
| 91 | PageSize); | ||
| 92 | static_assert(KernelPageBufferAdditionalSize == | ||
| 93 | (SlabCountExtraKThread + (SlabCountExtraKThread / 8)) * PageSize); | ||
| 94 | |||
| 95 | } // namespace test | ||
| 78 | 96 | ||
| 79 | /// Helper function to translate from the slab virtual address to the reserved location in physical | 97 | /// Helper function to translate from the slab virtual address to the reserved location in physical |
| 80 | /// memory. | 98 | /// memory. |
| @@ -109,7 +127,7 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd | |||
| 109 | } | 127 | } |
| 110 | 128 | ||
| 111 | size_t CalculateSlabHeapGapSize() { | 129 | size_t CalculateSlabHeapGapSize() { |
| 112 | constexpr size_t KernelSlabHeapGapSize = 2_MiB - 296_KiB; | 130 | constexpr size_t KernelSlabHeapGapSize = 2_MiB - 320_KiB; |
| 113 | static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax); | 131 | static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax); |
| 114 | return KernelSlabHeapGapSize; | 132 | return KernelSlabHeapGapSize; |
| 115 | } | 133 | } |
| @@ -134,6 +152,7 @@ KSlabResourceCounts KSlabResourceCounts::CreateDefault() { | |||
| 134 | .num_KDebug = SlabCountKDebug, | 152 | .num_KDebug = SlabCountKDebug, |
| 135 | .num_KIoPool = SlabCountKIoPool, | 153 | .num_KIoPool = SlabCountKIoPool, |
| 136 | .num_KIoRegion = SlabCountKIoRegion, | 154 | .num_KIoRegion = SlabCountKIoRegion, |
| 155 | .num_KSessionRequestMappings = SlabcountKSessionRequestMappings, | ||
| 137 | }; | 156 | }; |
| 138 | } | 157 | } |
| 139 | 158 | ||
| @@ -164,29 +183,6 @@ size_t CalculateTotalSlabHeapSize(const KernelCore& kernel) { | |||
| 164 | return size; | 183 | return size; |
| 165 | } | 184 | } |
| 166 | 185 | ||
| 167 | void InitializeKPageBufferSlabHeap(Core::System& system) { | ||
| 168 | auto& kernel = system.Kernel(); | ||
| 169 | |||
| 170 | const auto& counts = kernel.SlabResourceCounts(); | ||
| 171 | const size_t num_pages = | ||
| 172 | counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8; | ||
| 173 | const size_t slab_size = num_pages * PageSize; | ||
| 174 | |||
| 175 | // Reserve memory from the system resource limit. | ||
| 176 | ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size)); | ||
| 177 | |||
| 178 | // Allocate memory for the slab. | ||
| 179 | constexpr auto AllocateOption = KMemoryManager::EncodeOption( | ||
| 180 | KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront); | ||
| 181 | const PAddr slab_address = | ||
| 182 | kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption); | ||
| 183 | ASSERT(slab_address != 0); | ||
| 184 | |||
| 185 | // Initialize the slabheap. | ||
| 186 | KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address), | ||
| 187 | slab_size); | ||
| 188 | } | ||
| 189 | |||
| 190 | void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { | 186 | void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { |
| 191 | auto& kernel = system.Kernel(); | 187 | auto& kernel = system.Kernel(); |
| 192 | 188 | ||
| @@ -258,3 +254,30 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { | |||
| 258 | } | 254 | } |
| 259 | 255 | ||
| 260 | } // namespace Kernel::Init | 256 | } // namespace Kernel::Init |
| 257 | |||
| 258 | namespace Kernel { | ||
| 259 | |||
| 260 | void KPageBufferSlabHeap::Initialize(Core::System& system) { | ||
| 261 | auto& kernel = system.Kernel(); | ||
| 262 | const auto& counts = kernel.SlabResourceCounts(); | ||
| 263 | const size_t num_pages = | ||
| 264 | counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8; | ||
| 265 | const size_t slab_size = num_pages * PageSize; | ||
| 266 | |||
| 267 | // Reserve memory from the system resource limit. | ||
| 268 | ASSERT( | ||
| 269 | kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemoryMax, slab_size)); | ||
| 270 | |||
| 271 | // Allocate memory for the slab. | ||
| 272 | constexpr auto AllocateOption = KMemoryManager::EncodeOption( | ||
| 273 | KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront); | ||
| 274 | const PAddr slab_address = | ||
| 275 | kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption); | ||
| 276 | ASSERT(slab_address != 0); | ||
| 277 | |||
| 278 | // Initialize the slabheap. | ||
| 279 | KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address), | ||
| 280 | slab_size); | ||
| 281 | } | ||
| 282 | |||
| 283 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/init/init_slab_setup.h b/src/core/hle/kernel/init/init_slab_setup.h index 13be63c87..5e22821bc 100644 --- a/src/core/hle/kernel/init/init_slab_setup.h +++ b/src/core/hle/kernel/init/init_slab_setup.h | |||
| @@ -33,11 +33,11 @@ struct KSlabResourceCounts { | |||
| 33 | size_t num_KDebug; | 33 | size_t num_KDebug; |
| 34 | size_t num_KIoPool; | 34 | size_t num_KIoPool; |
| 35 | size_t num_KIoRegion; | 35 | size_t num_KIoRegion; |
| 36 | size_t num_KSessionRequestMappings; | ||
| 36 | }; | 37 | }; |
| 37 | 38 | ||
| 38 | void InitializeSlabResourceCounts(KernelCore& kernel); | 39 | void InitializeSlabResourceCounts(KernelCore& kernel); |
| 39 | size_t CalculateTotalSlabHeapSize(const KernelCore& kernel); | 40 | size_t CalculateTotalSlabHeapSize(const KernelCore& kernel); |
| 40 | void InitializeKPageBufferSlabHeap(Core::System& system); | ||
| 41 | void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout); | 41 | void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout); |
| 42 | 42 | ||
| 43 | } // namespace Kernel::Init | 43 | } // namespace Kernel::Init |
diff --git a/src/core/hle/kernel/k_class_token.cpp b/src/core/hle/kernel/k_class_token.cpp index 10265c23c..a850db3c4 100644 --- a/src/core/hle/kernel/k_class_token.cpp +++ b/src/core/hle/kernel/k_class_token.cpp | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include "core/hle/kernel/k_session.h" | 16 | #include "core/hle/kernel/k_session.h" |
| 17 | #include "core/hle/kernel/k_shared_memory.h" | 17 | #include "core/hle/kernel/k_shared_memory.h" |
| 18 | #include "core/hle/kernel/k_synchronization_object.h" | 18 | #include "core/hle/kernel/k_synchronization_object.h" |
| 19 | #include "core/hle/kernel/k_system_resource.h" | ||
| 19 | #include "core/hle/kernel/k_thread.h" | 20 | #include "core/hle/kernel/k_thread.h" |
| 20 | #include "core/hle/kernel/k_transfer_memory.h" | 21 | #include "core/hle/kernel/k_transfer_memory.h" |
| 21 | 22 | ||
| @@ -119,4 +120,6 @@ static_assert(std::is_final_v<KTransferMemory> && std::is_base_of_v<KAutoObject, | |||
| 119 | // static_assert(std::is_final_v<KCodeMemory> && | 120 | // static_assert(std::is_final_v<KCodeMemory> && |
| 120 | // std::is_base_of_v<KAutoObject, KCodeMemory>); | 121 | // std::is_base_of_v<KAutoObject, KCodeMemory>); |
| 121 | 122 | ||
| 123 | static_assert(std::is_base_of_v<KAutoObject, KSystemResource>); | ||
| 124 | |||
| 122 | } // namespace Kernel | 125 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_class_token.h b/src/core/hle/kernel/k_class_token.h index ab20e00ff..e75b1c035 100644 --- a/src/core/hle/kernel/k_class_token.h +++ b/src/core/hle/kernel/k_class_token.h | |||
| @@ -10,6 +10,8 @@ namespace Kernel { | |||
| 10 | 10 | ||
| 11 | class KAutoObject; | 11 | class KAutoObject; |
| 12 | 12 | ||
| 13 | class KSystemResource; | ||
| 14 | |||
| 13 | class KClassTokenGenerator { | 15 | class KClassTokenGenerator { |
| 14 | public: | 16 | public: |
| 15 | using TokenBaseType = u16; | 17 | using TokenBaseType = u16; |
| @@ -58,7 +60,7 @@ private: | |||
| 58 | if constexpr (std::is_same<T, KAutoObject>::value) { | 60 | if constexpr (std::is_same<T, KAutoObject>::value) { |
| 59 | static_assert(T::ObjectType == ObjectType::KAutoObject); | 61 | static_assert(T::ObjectType == ObjectType::KAutoObject); |
| 60 | return 0; | 62 | return 0; |
| 61 | } else if constexpr (!std::is_final<T>::value) { | 63 | } else if constexpr (!std::is_final<T>::value && !std::same_as<T, KSystemResource>) { |
| 62 | static_assert(ObjectType::BaseClassesStart <= T::ObjectType && | 64 | static_assert(ObjectType::BaseClassesStart <= T::ObjectType && |
| 63 | T::ObjectType < ObjectType::BaseClassesEnd); | 65 | T::ObjectType < ObjectType::BaseClassesEnd); |
| 64 | constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) - | 66 | constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) - |
| @@ -108,6 +110,8 @@ public: | |||
| 108 | KSessionRequest, | 110 | KSessionRequest, |
| 109 | KCodeMemory, | 111 | KCodeMemory, |
| 110 | 112 | ||
| 113 | KSystemResource, | ||
| 114 | |||
| 111 | // NOTE: True order for these has not been determined yet. | 115 | // NOTE: True order for these has not been determined yet. |
| 112 | KAlpha, | 116 | KAlpha, |
| 113 | KBeta, | 117 | KBeta, |
diff --git a/src/core/hle/kernel/k_client_port.cpp b/src/core/hle/kernel/k_client_port.cpp index eaa2e094c..2ec623a58 100644 --- a/src/core/hle/kernel/k_client_port.cpp +++ b/src/core/hle/kernel/k_client_port.cpp | |||
| @@ -61,7 +61,7 @@ bool KClientPort::IsSignaled() const { | |||
| 61 | Result KClientPort::CreateSession(KClientSession** out) { | 61 | Result KClientPort::CreateSession(KClientSession** out) { |
| 62 | // Reserve a new session from the resource limit. | 62 | // Reserve a new session from the resource limit. |
| 63 | KScopedResourceReservation session_reservation(kernel.CurrentProcess()->GetResourceLimit(), | 63 | KScopedResourceReservation session_reservation(kernel.CurrentProcess()->GetResourceLimit(), |
| 64 | LimitableResource::Sessions); | 64 | LimitableResource::SessionCountMax); |
| 65 | R_UNLESS(session_reservation.Succeeded(), ResultLimitReached); | 65 | R_UNLESS(session_reservation.Succeeded(), ResultLimitReached); |
| 66 | 66 | ||
| 67 | // Update the session counts. | 67 | // Update the session counts. |
diff --git a/src/core/hle/kernel/k_debug.h b/src/core/hle/kernel/k_debug.h new file mode 100644 index 000000000..e3a0689c8 --- /dev/null +++ b/src/core/hle/kernel/k_debug.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include "core/hle/kernel/k_auto_object.h" | ||
| 7 | #include "core/hle/kernel/slab_helpers.h" | ||
| 8 | |||
| 9 | namespace Kernel { | ||
| 10 | |||
| 11 | class KDebug final : public KAutoObjectWithSlabHeapAndContainer<KDebug, KAutoObjectWithList> { | ||
| 12 | KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject); | ||
| 13 | |||
| 14 | public: | ||
| 15 | explicit KDebug(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} | ||
| 16 | |||
| 17 | static void PostDestroy([[maybe_unused]] uintptr_t arg) {} | ||
| 18 | }; | ||
| 19 | |||
| 20 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h index 9076c8fa3..ac80d60a1 100644 --- a/src/core/hle/kernel/k_dynamic_page_manager.h +++ b/src/core/hle/kernel/k_dynamic_page_manager.h | |||
| @@ -3,6 +3,8 @@ | |||
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <vector> | ||
| 7 | |||
| 6 | #include "common/alignment.h" | 8 | #include "common/alignment.h" |
| 7 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 8 | #include "core/hle/kernel/k_page_bitmap.h" | 10 | #include "core/hle/kernel/k_page_bitmap.h" |
| @@ -33,28 +35,36 @@ public: | |||
| 33 | return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); | 35 | return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); |
| 34 | } | 36 | } |
| 35 | 37 | ||
| 36 | Result Initialize(VAddr addr, size_t sz) { | 38 | Result Initialize(VAddr memory, size_t size, size_t align) { |
| 37 | // We need to have positive size. | 39 | // We need to have positive size. |
| 38 | R_UNLESS(sz > 0, ResultOutOfMemory); | 40 | R_UNLESS(size > 0, ResultOutOfMemory); |
| 39 | m_backing_memory.resize(sz); | 41 | m_backing_memory.resize(size); |
| 42 | |||
| 43 | // Set addresses. | ||
| 44 | m_address = memory; | ||
| 45 | m_aligned_address = Common::AlignDown(memory, align); | ||
| 40 | 46 | ||
| 41 | // Calculate management overhead. | 47 | // Calculate extents. |
| 42 | const size_t management_size = | 48 | const size_t managed_size = m_address + size - m_aligned_address; |
| 43 | KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer)); | 49 | const size_t overhead_size = Common::AlignUp( |
| 44 | const size_t allocatable_size = sz - management_size; | 50 | KPageBitmap::CalculateManagementOverheadSize(managed_size / sizeof(PageBuffer)), |
| 51 | sizeof(PageBuffer)); | ||
| 52 | R_UNLESS(overhead_size < size, ResultOutOfMemory); | ||
| 45 | 53 | ||
| 46 | // Set tracking fields. | 54 | // Set tracking fields. |
| 47 | m_address = addr; | 55 | m_size = Common::AlignDown(size - overhead_size, sizeof(PageBuffer)); |
| 48 | m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer)); | 56 | m_count = m_size / sizeof(PageBuffer); |
| 49 | m_count = allocatable_size / sizeof(PageBuffer); | ||
| 50 | R_UNLESS(m_count > 0, ResultOutOfMemory); | ||
| 51 | 57 | ||
| 52 | // Clear the management region. | 58 | // Clear the management region. |
| 53 | u64* management_ptr = GetPointer<u64>(m_address + allocatable_size); | 59 | u64* management_ptr = GetPointer<u64>(m_address + size - overhead_size); |
| 54 | std::memset(management_ptr, 0, management_size); | 60 | std::memset(management_ptr, 0, overhead_size); |
| 55 | 61 | ||
| 56 | // Initialize the bitmap. | 62 | // Initialize the bitmap. |
| 57 | m_page_bitmap.Initialize(management_ptr, m_count); | 63 | const size_t allocatable_region_size = |
| 64 | (m_address + size - overhead_size) - m_aligned_address; | ||
| 65 | ASSERT(allocatable_region_size >= sizeof(PageBuffer)); | ||
| 66 | |||
| 67 | m_page_bitmap.Initialize(management_ptr, allocatable_region_size / sizeof(PageBuffer)); | ||
| 58 | 68 | ||
| 59 | // Free the pages to the bitmap. | 69 | // Free the pages to the bitmap. |
| 60 | for (size_t i = 0; i < m_count; i++) { | 70 | for (size_t i = 0; i < m_count; i++) { |
| @@ -62,7 +72,8 @@ public: | |||
| 62 | std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize); | 72 | std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize); |
| 63 | 73 | ||
| 64 | // Set the bit for the free page. | 74 | // Set the bit for the free page. |
| 65 | m_page_bitmap.SetBit(i); | 75 | m_page_bitmap.SetBit((m_address + (i * sizeof(PageBuffer)) - m_aligned_address) / |
| 76 | sizeof(PageBuffer)); | ||
| 66 | } | 77 | } |
| 67 | 78 | ||
| 68 | R_SUCCEED(); | 79 | R_SUCCEED(); |
| @@ -101,7 +112,28 @@ public: | |||
| 101 | m_page_bitmap.ClearBit(offset); | 112 | m_page_bitmap.ClearBit(offset); |
| 102 | m_peak = std::max(m_peak, (++m_used)); | 113 | m_peak = std::max(m_peak, (++m_used)); |
| 103 | 114 | ||
| 104 | return GetPointer<PageBuffer>(m_address) + offset; | 115 | return GetPointer<PageBuffer>(m_aligned_address) + offset; |
| 116 | } | ||
| 117 | |||
| 118 | PageBuffer* Allocate(size_t count) { | ||
| 119 | // Take the lock. | ||
| 120 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. | ||
| 121 | KScopedSpinLock lk(m_lock); | ||
| 122 | |||
| 123 | // Find a random free block. | ||
| 124 | s64 soffset = m_page_bitmap.FindFreeRange(count); | ||
| 125 | if (soffset < 0) [[likely]] { | ||
| 126 | return nullptr; | ||
| 127 | } | ||
| 128 | |||
| 129 | const size_t offset = static_cast<size_t>(soffset); | ||
| 130 | |||
| 131 | // Update our tracking. | ||
| 132 | m_page_bitmap.ClearRange(offset, count); | ||
| 133 | m_used += count; | ||
| 134 | m_peak = std::max(m_peak, m_used); | ||
| 135 | |||
| 136 | return GetPointer<PageBuffer>(m_aligned_address) + offset; | ||
| 105 | } | 137 | } |
| 106 | 138 | ||
| 107 | void Free(PageBuffer* pb) { | 139 | void Free(PageBuffer* pb) { |
| @@ -113,7 +145,7 @@ public: | |||
| 113 | KScopedSpinLock lk(m_lock); | 145 | KScopedSpinLock lk(m_lock); |
| 114 | 146 | ||
| 115 | // Set the bit for the free page. | 147 | // Set the bit for the free page. |
| 116 | size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_address) / sizeof(PageBuffer); | 148 | size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_aligned_address) / sizeof(PageBuffer); |
| 117 | m_page_bitmap.SetBit(offset); | 149 | m_page_bitmap.SetBit(offset); |
| 118 | 150 | ||
| 119 | // Decrement our used count. | 151 | // Decrement our used count. |
| @@ -127,6 +159,7 @@ private: | |||
| 127 | size_t m_peak{}; | 159 | size_t m_peak{}; |
| 128 | size_t m_count{}; | 160 | size_t m_count{}; |
| 129 | VAddr m_address{}; | 161 | VAddr m_address{}; |
| 162 | VAddr m_aligned_address{}; | ||
| 130 | size_t m_size{}; | 163 | size_t m_size{}; |
| 131 | 164 | ||
| 132 | // TODO(bunnei): Back by host memory until we emulate kernel virtual address space. | 165 | // TODO(bunnei): Back by host memory until we emulate kernel virtual address space. |
diff --git a/src/core/hle/kernel/k_dynamic_resource_manager.h b/src/core/hle/kernel/k_dynamic_resource_manager.h index 1ce517e8e..b6a27d648 100644 --- a/src/core/hle/kernel/k_dynamic_resource_manager.h +++ b/src/core/hle/kernel/k_dynamic_resource_manager.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include "common/common_funcs.h" | 6 | #include "common/common_funcs.h" |
| 7 | #include "core/hle/kernel/k_dynamic_slab_heap.h" | 7 | #include "core/hle/kernel/k_dynamic_slab_heap.h" |
| 8 | #include "core/hle/kernel/k_memory_block.h" | 8 | #include "core/hle/kernel/k_memory_block.h" |
| 9 | #include "core/hle/kernel/k_page_group.h" | ||
| 9 | 10 | ||
| 10 | namespace Kernel { | 11 | namespace Kernel { |
| 11 | 12 | ||
| @@ -51,8 +52,10 @@ private: | |||
| 51 | DynamicSlabType* m_slab_heap{}; | 52 | DynamicSlabType* m_slab_heap{}; |
| 52 | }; | 53 | }; |
| 53 | 54 | ||
| 55 | class KBlockInfoManager : public KDynamicResourceManager<KBlockInfo> {}; | ||
| 54 | class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {}; | 56 | class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {}; |
| 55 | 57 | ||
| 58 | using KBlockInfoSlabHeap = typename KBlockInfoManager::DynamicSlabType; | ||
| 56 | using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType; | 59 | using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType; |
| 57 | 60 | ||
| 58 | } // namespace Kernel | 61 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_event.cpp b/src/core/hle/kernel/k_event.cpp index 78ca59463..27f70e5c5 100644 --- a/src/core/hle/kernel/k_event.cpp +++ b/src/core/hle/kernel/k_event.cpp | |||
| @@ -50,7 +50,7 @@ Result KEvent::Clear() { | |||
| 50 | void KEvent::PostDestroy(uintptr_t arg) { | 50 | void KEvent::PostDestroy(uintptr_t arg) { |
| 51 | // Release the event count resource the owner process holds. | 51 | // Release the event count resource the owner process holds. |
| 52 | KProcess* owner = reinterpret_cast<KProcess*>(arg); | 52 | KProcess* owner = reinterpret_cast<KProcess*>(arg); |
| 53 | owner->GetResourceLimit()->Release(LimitableResource::Events, 1); | 53 | owner->GetResourceLimit()->Release(LimitableResource::EventCountMax, 1); |
| 54 | owner->Close(); | 54 | owner->Close(); |
| 55 | } | 55 | } |
| 56 | 56 | ||
diff --git a/src/core/hle/kernel/k_event_info.h b/src/core/hle/kernel/k_event_info.h new file mode 100644 index 000000000..25b3ff594 --- /dev/null +++ b/src/core/hle/kernel/k_event_info.h | |||
| @@ -0,0 +1,64 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <array> | ||
| 7 | |||
| 8 | #include <boost/intrusive/list.hpp> | ||
| 9 | |||
| 10 | #include "core/hle/kernel/slab_helpers.h" | ||
| 11 | #include "core/hle/kernel/svc_types.h" | ||
| 12 | |||
| 13 | namespace Kernel { | ||
| 14 | |||
| 15 | class KEventInfo : public KSlabAllocated<KEventInfo>, public boost::intrusive::list_base_hook<> { | ||
| 16 | public: | ||
| 17 | struct InfoCreateThread { | ||
| 18 | u32 thread_id{}; | ||
| 19 | uintptr_t tls_address{}; | ||
| 20 | }; | ||
| 21 | |||
| 22 | struct InfoExitProcess { | ||
| 23 | Svc::ProcessExitReason reason{}; | ||
| 24 | }; | ||
| 25 | |||
| 26 | struct InfoExitThread { | ||
| 27 | Svc::ThreadExitReason reason{}; | ||
| 28 | }; | ||
| 29 | |||
| 30 | struct InfoException { | ||
| 31 | Svc::DebugException exception_type{}; | ||
| 32 | s32 exception_data_count{}; | ||
| 33 | uintptr_t exception_address{}; | ||
| 34 | std::array<uintptr_t, 4> exception_data{}; | ||
| 35 | }; | ||
| 36 | |||
| 37 | struct InfoSystemCall { | ||
| 38 | s64 tick{}; | ||
| 39 | s32 id{}; | ||
| 40 | }; | ||
| 41 | |||
| 42 | public: | ||
| 43 | KEventInfo() = default; | ||
| 44 | ~KEventInfo() = default; | ||
| 45 | |||
| 46 | public: | ||
| 47 | Svc::DebugEvent event{}; | ||
| 48 | u32 thread_id{}; | ||
| 49 | u32 flags{}; | ||
| 50 | bool is_attached{}; | ||
| 51 | bool continue_flag{}; | ||
| 52 | bool ignore_continue{}; | ||
| 53 | bool close_once{}; | ||
| 54 | union { | ||
| 55 | InfoCreateThread create_thread; | ||
| 56 | InfoExitProcess exit_process; | ||
| 57 | InfoExitThread exit_thread; | ||
| 58 | InfoException exception; | ||
| 59 | InfoSystemCall system_call; | ||
| 60 | } info{}; | ||
| 61 | KThread* debug_thread{}; | ||
| 62 | }; | ||
| 63 | |||
| 64 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp index e830ca46e..1c7a766c8 100644 --- a/src/core/hle/kernel/k_handle_table.cpp +++ b/src/core/hle/kernel/k_handle_table.cpp | |||
| @@ -5,14 +5,11 @@ | |||
| 5 | 5 | ||
| 6 | namespace Kernel { | 6 | namespace Kernel { |
| 7 | 7 | ||
| 8 | KHandleTable::KHandleTable(KernelCore& kernel_) : kernel{kernel_} {} | ||
| 9 | KHandleTable::~KHandleTable() = default; | ||
| 10 | |||
| 11 | Result KHandleTable::Finalize() { | 8 | Result KHandleTable::Finalize() { |
| 12 | // Get the table and clear our record of it. | 9 | // Get the table and clear our record of it. |
| 13 | u16 saved_table_size = 0; | 10 | u16 saved_table_size = 0; |
| 14 | { | 11 | { |
| 15 | KScopedDisableDispatch dd(kernel); | 12 | KScopedDisableDispatch dd{m_kernel}; |
| 16 | KScopedSpinLock lk(m_lock); | 13 | KScopedSpinLock lk(m_lock); |
| 17 | 14 | ||
| 18 | std::swap(m_table_size, saved_table_size); | 15 | std::swap(m_table_size, saved_table_size); |
| @@ -25,28 +22,28 @@ Result KHandleTable::Finalize() { | |||
| 25 | } | 22 | } |
| 26 | } | 23 | } |
| 27 | 24 | ||
| 28 | return ResultSuccess; | 25 | R_SUCCEED(); |
| 29 | } | 26 | } |
| 30 | 27 | ||
| 31 | bool KHandleTable::Remove(Handle handle) { | 28 | bool KHandleTable::Remove(Handle handle) { |
| 32 | // Don't allow removal of a pseudo-handle. | 29 | // Don't allow removal of a pseudo-handle. |
| 33 | if (Svc::IsPseudoHandle(handle)) { | 30 | if (Svc::IsPseudoHandle(handle)) [[unlikely]] { |
| 34 | return false; | 31 | return false; |
| 35 | } | 32 | } |
| 36 | 33 | ||
| 37 | // Handles must not have reserved bits set. | 34 | // Handles must not have reserved bits set. |
| 38 | const auto handle_pack = HandlePack(handle); | 35 | const auto handle_pack = HandlePack(handle); |
| 39 | if (handle_pack.reserved != 0) { | 36 | if (handle_pack.reserved != 0) [[unlikely]] { |
| 40 | return false; | 37 | return false; |
| 41 | } | 38 | } |
| 42 | 39 | ||
| 43 | // Find the object and free the entry. | 40 | // Find the object and free the entry. |
| 44 | KAutoObject* obj = nullptr; | 41 | KAutoObject* obj = nullptr; |
| 45 | { | 42 | { |
| 46 | KScopedDisableDispatch dd(kernel); | 43 | KScopedDisableDispatch dd{m_kernel}; |
| 47 | KScopedSpinLock lk(m_lock); | 44 | KScopedSpinLock lk(m_lock); |
| 48 | 45 | ||
| 49 | if (this->IsValidHandle(handle)) { | 46 | if (this->IsValidHandle(handle)) [[likely]] { |
| 50 | const auto index = handle_pack.index; | 47 | const auto index = handle_pack.index; |
| 51 | 48 | ||
| 52 | obj = m_objects[index]; | 49 | obj = m_objects[index]; |
| @@ -57,13 +54,13 @@ bool KHandleTable::Remove(Handle handle) { | |||
| 57 | } | 54 | } |
| 58 | 55 | ||
| 59 | // Close the object. | 56 | // Close the object. |
| 60 | kernel.UnregisterInUseObject(obj); | 57 | m_kernel.UnregisterInUseObject(obj); |
| 61 | obj->Close(); | 58 | obj->Close(); |
| 62 | return true; | 59 | return true; |
| 63 | } | 60 | } |
| 64 | 61 | ||
| 65 | Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) { | 62 | Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) { |
| 66 | KScopedDisableDispatch dd(kernel); | 63 | KScopedDisableDispatch dd{m_kernel}; |
| 67 | KScopedSpinLock lk(m_lock); | 64 | KScopedSpinLock lk(m_lock); |
| 68 | 65 | ||
| 69 | // Never exceed our capacity. | 66 | // Never exceed our capacity. |
| @@ -82,22 +79,22 @@ Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) { | |||
| 82 | *out_handle = EncodeHandle(static_cast<u16>(index), linear_id); | 79 | *out_handle = EncodeHandle(static_cast<u16>(index), linear_id); |
| 83 | } | 80 | } |
| 84 | 81 | ||
| 85 | return ResultSuccess; | 82 | R_SUCCEED(); |
| 86 | } | 83 | } |
| 87 | 84 | ||
| 88 | Result KHandleTable::Reserve(Handle* out_handle) { | 85 | Result KHandleTable::Reserve(Handle* out_handle) { |
| 89 | KScopedDisableDispatch dd(kernel); | 86 | KScopedDisableDispatch dd{m_kernel}; |
| 90 | KScopedSpinLock lk(m_lock); | 87 | KScopedSpinLock lk(m_lock); |
| 91 | 88 | ||
| 92 | // Never exceed our capacity. | 89 | // Never exceed our capacity. |
| 93 | R_UNLESS(m_count < m_table_size, ResultOutOfHandles); | 90 | R_UNLESS(m_count < m_table_size, ResultOutOfHandles); |
| 94 | 91 | ||
| 95 | *out_handle = EncodeHandle(static_cast<u16>(this->AllocateEntry()), this->AllocateLinearId()); | 92 | *out_handle = EncodeHandle(static_cast<u16>(this->AllocateEntry()), this->AllocateLinearId()); |
| 96 | return ResultSuccess; | 93 | R_SUCCEED(); |
| 97 | } | 94 | } |
| 98 | 95 | ||
| 99 | void KHandleTable::Unreserve(Handle handle) { | 96 | void KHandleTable::Unreserve(Handle handle) { |
| 100 | KScopedDisableDispatch dd(kernel); | 97 | KScopedDisableDispatch dd{m_kernel}; |
| 101 | KScopedSpinLock lk(m_lock); | 98 | KScopedSpinLock lk(m_lock); |
| 102 | 99 | ||
| 103 | // Unpack the handle. | 100 | // Unpack the handle. |
| @@ -108,7 +105,7 @@ void KHandleTable::Unreserve(Handle handle) { | |||
| 108 | ASSERT(reserved == 0); | 105 | ASSERT(reserved == 0); |
| 109 | ASSERT(linear_id != 0); | 106 | ASSERT(linear_id != 0); |
| 110 | 107 | ||
| 111 | if (index < m_table_size) { | 108 | if (index < m_table_size) [[likely]] { |
| 112 | // NOTE: This code does not check the linear id. | 109 | // NOTE: This code does not check the linear id. |
| 113 | ASSERT(m_objects[index] == nullptr); | 110 | ASSERT(m_objects[index] == nullptr); |
| 114 | this->FreeEntry(index); | 111 | this->FreeEntry(index); |
| @@ -116,7 +113,7 @@ void KHandleTable::Unreserve(Handle handle) { | |||
| 116 | } | 113 | } |
| 117 | 114 | ||
| 118 | void KHandleTable::Register(Handle handle, KAutoObject* obj) { | 115 | void KHandleTable::Register(Handle handle, KAutoObject* obj) { |
| 119 | KScopedDisableDispatch dd(kernel); | 116 | KScopedDisableDispatch dd{m_kernel}; |
| 120 | KScopedSpinLock lk(m_lock); | 117 | KScopedSpinLock lk(m_lock); |
| 121 | 118 | ||
| 122 | // Unpack the handle. | 119 | // Unpack the handle. |
| @@ -127,7 +124,7 @@ void KHandleTable::Register(Handle handle, KAutoObject* obj) { | |||
| 127 | ASSERT(reserved == 0); | 124 | ASSERT(reserved == 0); |
| 128 | ASSERT(linear_id != 0); | 125 | ASSERT(linear_id != 0); |
| 129 | 126 | ||
| 130 | if (index < m_table_size) { | 127 | if (index < m_table_size) [[likely]] { |
| 131 | // Set the entry. | 128 | // Set the entry. |
| 132 | ASSERT(m_objects[index] == nullptr); | 129 | ASSERT(m_objects[index] == nullptr); |
| 133 | 130 | ||
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h index 0864a737c..65cae3b27 100644 --- a/src/core/hle/kernel/k_handle_table.h +++ b/src/core/hle/kernel/k_handle_table.h | |||
| @@ -21,33 +21,38 @@ namespace Kernel { | |||
| 21 | class KernelCore; | 21 | class KernelCore; |
| 22 | 22 | ||
| 23 | class KHandleTable { | 23 | class KHandleTable { |
| 24 | public: | ||
| 25 | YUZU_NON_COPYABLE(KHandleTable); | 24 | YUZU_NON_COPYABLE(KHandleTable); |
| 26 | YUZU_NON_MOVEABLE(KHandleTable); | 25 | YUZU_NON_MOVEABLE(KHandleTable); |
| 27 | 26 | ||
| 27 | public: | ||
| 28 | static constexpr size_t MaxTableSize = 1024; | 28 | static constexpr size_t MaxTableSize = 1024; |
| 29 | 29 | ||
| 30 | explicit KHandleTable(KernelCore& kernel_); | 30 | public: |
| 31 | ~KHandleTable(); | 31 | explicit KHandleTable(KernelCore& kernel) : m_kernel(kernel) {} |
| 32 | 32 | ||
| 33 | Result Initialize(s32 size) { | 33 | Result Initialize(s32 size) { |
| 34 | // Check that the table size is valid. | ||
| 34 | R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory); | 35 | R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory); |
| 35 | 36 | ||
| 37 | // Lock. | ||
| 38 | KScopedDisableDispatch dd{m_kernel}; | ||
| 39 | KScopedSpinLock lk(m_lock); | ||
| 40 | |||
| 36 | // Initialize all fields. | 41 | // Initialize all fields. |
| 37 | m_max_count = 0; | 42 | m_max_count = 0; |
| 38 | m_table_size = static_cast<u16>((size <= 0) ? MaxTableSize : size); | 43 | m_table_size = static_cast<s16>((size <= 0) ? MaxTableSize : size); |
| 39 | m_next_linear_id = MinLinearId; | 44 | m_next_linear_id = MinLinearId; |
| 40 | m_count = 0; | 45 | m_count = 0; |
| 41 | m_free_head_index = -1; | 46 | m_free_head_index = -1; |
| 42 | 47 | ||
| 43 | // Free all entries. | 48 | // Free all entries. |
| 44 | for (s16 i = 0; i < static_cast<s16>(m_table_size); ++i) { | 49 | for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) { |
| 45 | m_objects[i] = nullptr; | 50 | m_objects[i] = nullptr; |
| 46 | m_entry_infos[i].next_free_index = i - 1; | 51 | m_entry_infos[i].next_free_index = static_cast<s16>(i - 1); |
| 47 | m_free_head_index = i; | 52 | m_free_head_index = i; |
| 48 | } | 53 | } |
| 49 | 54 | ||
| 50 | return ResultSuccess; | 55 | R_SUCCEED(); |
| 51 | } | 56 | } |
| 52 | 57 | ||
| 53 | size_t GetTableSize() const { | 58 | size_t GetTableSize() const { |
| @@ -66,13 +71,13 @@ public: | |||
| 66 | template <typename T = KAutoObject> | 71 | template <typename T = KAutoObject> |
| 67 | KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { | 72 | KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { |
| 68 | // Lock and look up in table. | 73 | // Lock and look up in table. |
| 69 | KScopedDisableDispatch dd(kernel); | 74 | KScopedDisableDispatch dd{m_kernel}; |
| 70 | KScopedSpinLock lk(m_lock); | 75 | KScopedSpinLock lk(m_lock); |
| 71 | 76 | ||
| 72 | if constexpr (std::is_same_v<T, KAutoObject>) { | 77 | if constexpr (std::is_same_v<T, KAutoObject>) { |
| 73 | return this->GetObjectImpl(handle); | 78 | return this->GetObjectImpl(handle); |
| 74 | } else { | 79 | } else { |
| 75 | if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) { | 80 | if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) [[likely]] { |
| 76 | return obj->DynamicCast<T*>(); | 81 | return obj->DynamicCast<T*>(); |
| 77 | } else { | 82 | } else { |
| 78 | return nullptr; | 83 | return nullptr; |
| @@ -85,13 +90,13 @@ public: | |||
| 85 | // Handle pseudo-handles. | 90 | // Handle pseudo-handles. |
| 86 | if constexpr (std::derived_from<KProcess, T>) { | 91 | if constexpr (std::derived_from<KProcess, T>) { |
| 87 | if (handle == Svc::PseudoHandle::CurrentProcess) { | 92 | if (handle == Svc::PseudoHandle::CurrentProcess) { |
| 88 | auto* const cur_process = kernel.CurrentProcess(); | 93 | auto* const cur_process = m_kernel.CurrentProcess(); |
| 89 | ASSERT(cur_process != nullptr); | 94 | ASSERT(cur_process != nullptr); |
| 90 | return cur_process; | 95 | return cur_process; |
| 91 | } | 96 | } |
| 92 | } else if constexpr (std::derived_from<KThread, T>) { | 97 | } else if constexpr (std::derived_from<KThread, T>) { |
| 93 | if (handle == Svc::PseudoHandle::CurrentThread) { | 98 | if (handle == Svc::PseudoHandle::CurrentThread) { |
| 94 | auto* const cur_thread = GetCurrentThreadPointer(kernel); | 99 | auto* const cur_thread = GetCurrentThreadPointer(m_kernel); |
| 95 | ASSERT(cur_thread != nullptr); | 100 | ASSERT(cur_thread != nullptr); |
| 96 | return cur_thread; | 101 | return cur_thread; |
| 97 | } | 102 | } |
| @@ -100,6 +105,37 @@ public: | |||
| 100 | return this->template GetObjectWithoutPseudoHandle<T>(handle); | 105 | return this->template GetObjectWithoutPseudoHandle<T>(handle); |
| 101 | } | 106 | } |
| 102 | 107 | ||
| 108 | KScopedAutoObject<KAutoObject> GetObjectForIpcWithoutPseudoHandle(Handle handle) const { | ||
| 109 | // Lock and look up in table. | ||
| 110 | KScopedDisableDispatch dd{m_kernel}; | ||
| 111 | KScopedSpinLock lk(m_lock); | ||
| 112 | |||
| 113 | return this->GetObjectImpl(handle); | ||
| 114 | } | ||
| 115 | |||
| 116 | KScopedAutoObject<KAutoObject> GetObjectForIpc(Handle handle, KThread* cur_thread) const { | ||
| 117 | // Handle pseudo-handles. | ||
| 118 | ASSERT(cur_thread != nullptr); | ||
| 119 | if (handle == Svc::PseudoHandle::CurrentProcess) { | ||
| 120 | auto* const cur_process = | ||
| 121 | static_cast<KAutoObject*>(static_cast<void*>(cur_thread->GetOwnerProcess())); | ||
| 122 | ASSERT(cur_process != nullptr); | ||
| 123 | return cur_process; | ||
| 124 | } | ||
| 125 | if (handle == Svc::PseudoHandle::CurrentThread) { | ||
| 126 | return static_cast<KAutoObject*>(cur_thread); | ||
| 127 | } | ||
| 128 | |||
| 129 | return GetObjectForIpcWithoutPseudoHandle(handle); | ||
| 130 | } | ||
| 131 | |||
| 132 | KScopedAutoObject<KAutoObject> GetObjectByIndex(Handle* out_handle, size_t index) const { | ||
| 133 | KScopedDisableDispatch dd{m_kernel}; | ||
| 134 | KScopedSpinLock lk(m_lock); | ||
| 135 | |||
| 136 | return this->GetObjectByIndexImpl(out_handle, index); | ||
| 137 | } | ||
| 138 | |||
| 103 | Result Reserve(Handle* out_handle); | 139 | Result Reserve(Handle* out_handle); |
| 104 | void Unreserve(Handle handle); | 140 | void Unreserve(Handle handle); |
| 105 | 141 | ||
| @@ -112,7 +148,7 @@ public: | |||
| 112 | size_t num_opened; | 148 | size_t num_opened; |
| 113 | { | 149 | { |
| 114 | // Lock the table. | 150 | // Lock the table. |
| 115 | KScopedDisableDispatch dd(kernel); | 151 | KScopedDisableDispatch dd{m_kernel}; |
| 116 | KScopedSpinLock lk(m_lock); | 152 | KScopedSpinLock lk(m_lock); |
| 117 | for (num_opened = 0; num_opened < num_handles; num_opened++) { | 153 | for (num_opened = 0; num_opened < num_handles; num_opened++) { |
| 118 | // Get the current handle. | 154 | // Get the current handle. |
| @@ -120,13 +156,13 @@ public: | |||
| 120 | 156 | ||
| 121 | // Get the object for the current handle. | 157 | // Get the object for the current handle. |
| 122 | KAutoObject* cur_object = this->GetObjectImpl(cur_handle); | 158 | KAutoObject* cur_object = this->GetObjectImpl(cur_handle); |
| 123 | if (cur_object == nullptr) { | 159 | if (cur_object == nullptr) [[unlikely]] { |
| 124 | break; | 160 | break; |
| 125 | } | 161 | } |
| 126 | 162 | ||
| 127 | // Cast the current object to the desired type. | 163 | // Cast the current object to the desired type. |
| 128 | T* cur_t = cur_object->DynamicCast<T*>(); | 164 | T* cur_t = cur_object->DynamicCast<T*>(); |
| 129 | if (cur_t == nullptr) { | 165 | if (cur_t == nullptr) [[unlikely]] { |
| 130 | break; | 166 | break; |
| 131 | } | 167 | } |
| 132 | 168 | ||
| @@ -137,7 +173,7 @@ public: | |||
| 137 | } | 173 | } |
| 138 | 174 | ||
| 139 | // If we converted every object, succeed. | 175 | // If we converted every object, succeed. |
| 140 | if (num_opened == num_handles) { | 176 | if (num_opened == num_handles) [[likely]] { |
| 141 | return true; | 177 | return true; |
| 142 | } | 178 | } |
| 143 | 179 | ||
| @@ -191,21 +227,21 @@ private: | |||
| 191 | ASSERT(reserved == 0); | 227 | ASSERT(reserved == 0); |
| 192 | 228 | ||
| 193 | // Validate our indexing information. | 229 | // Validate our indexing information. |
| 194 | if (raw_value == 0) { | 230 | if (raw_value == 0) [[unlikely]] { |
| 195 | return false; | 231 | return false; |
| 196 | } | 232 | } |
| 197 | if (linear_id == 0) { | 233 | if (linear_id == 0) [[unlikely]] { |
| 198 | return false; | 234 | return false; |
| 199 | } | 235 | } |
| 200 | if (index >= m_table_size) { | 236 | if (index >= m_table_size) [[unlikely]] { |
| 201 | return false; | 237 | return false; |
| 202 | } | 238 | } |
| 203 | 239 | ||
| 204 | // Check that there's an object, and our serial id is correct. | 240 | // Check that there's an object, and our serial id is correct. |
| 205 | if (m_objects[index] == nullptr) { | 241 | if (m_objects[index] == nullptr) [[unlikely]] { |
| 206 | return false; | 242 | return false; |
| 207 | } | 243 | } |
| 208 | if (m_entry_infos[index].GetLinearId() != linear_id) { | 244 | if (m_entry_infos[index].GetLinearId() != linear_id) [[unlikely]] { |
| 209 | return false; | 245 | return false; |
| 210 | } | 246 | } |
| 211 | 247 | ||
| @@ -215,11 +251,11 @@ private: | |||
| 215 | KAutoObject* GetObjectImpl(Handle handle) const { | 251 | KAutoObject* GetObjectImpl(Handle handle) const { |
| 216 | // Handles must not have reserved bits set. | 252 | // Handles must not have reserved bits set. |
| 217 | const auto handle_pack = HandlePack(handle); | 253 | const auto handle_pack = HandlePack(handle); |
| 218 | if (handle_pack.reserved != 0) { | 254 | if (handle_pack.reserved != 0) [[unlikely]] { |
| 219 | return nullptr; | 255 | return nullptr; |
| 220 | } | 256 | } |
| 221 | 257 | ||
| 222 | if (this->IsValidHandle(handle)) { | 258 | if (this->IsValidHandle(handle)) [[likely]] { |
| 223 | return m_objects[handle_pack.index]; | 259 | return m_objects[handle_pack.index]; |
| 224 | } else { | 260 | } else { |
| 225 | return nullptr; | 261 | return nullptr; |
| @@ -227,9 +263,8 @@ private: | |||
| 227 | } | 263 | } |
| 228 | 264 | ||
| 229 | KAutoObject* GetObjectByIndexImpl(Handle* out_handle, size_t index) const { | 265 | KAutoObject* GetObjectByIndexImpl(Handle* out_handle, size_t index) const { |
| 230 | |||
| 231 | // Index must be in bounds. | 266 | // Index must be in bounds. |
| 232 | if (index >= m_table_size) { | 267 | if (index >= m_table_size) [[unlikely]] { |
| 233 | return nullptr; | 268 | return nullptr; |
| 234 | } | 269 | } |
| 235 | 270 | ||
| @@ -244,18 +279,15 @@ private: | |||
| 244 | 279 | ||
| 245 | private: | 280 | private: |
| 246 | union HandlePack { | 281 | union HandlePack { |
| 247 | HandlePack() = default; | 282 | constexpr HandlePack() = default; |
| 248 | HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {} | 283 | constexpr HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {} |
| 249 | 284 | ||
| 250 | u32 raw; | 285 | u32 raw{}; |
| 251 | BitField<0, 15, u32> index; | 286 | BitField<0, 15, u32> index; |
| 252 | BitField<15, 15, u32> linear_id; | 287 | BitField<15, 15, u32> linear_id; |
| 253 | BitField<30, 2, u32> reserved; | 288 | BitField<30, 2, u32> reserved; |
| 254 | }; | 289 | }; |
| 255 | 290 | ||
| 256 | static constexpr u16 MinLinearId = 1; | ||
| 257 | static constexpr u16 MaxLinearId = 0x7FFF; | ||
| 258 | |||
| 259 | static constexpr Handle EncodeHandle(u16 index, u16 linear_id) { | 291 | static constexpr Handle EncodeHandle(u16 index, u16 linear_id) { |
| 260 | HandlePack handle{}; | 292 | HandlePack handle{}; |
| 261 | handle.index.Assign(index); | 293 | handle.index.Assign(index); |
| @@ -264,6 +296,10 @@ private: | |||
| 264 | return handle.raw; | 296 | return handle.raw; |
| 265 | } | 297 | } |
| 266 | 298 | ||
| 299 | private: | ||
| 300 | static constexpr u16 MinLinearId = 1; | ||
| 301 | static constexpr u16 MaxLinearId = 0x7FFF; | ||
| 302 | |||
| 267 | union EntryInfo { | 303 | union EntryInfo { |
| 268 | u16 linear_id; | 304 | u16 linear_id; |
| 269 | s16 next_free_index; | 305 | s16 next_free_index; |
| @@ -271,21 +307,21 @@ private: | |||
| 271 | constexpr u16 GetLinearId() const { | 307 | constexpr u16 GetLinearId() const { |
| 272 | return linear_id; | 308 | return linear_id; |
| 273 | } | 309 | } |
| 274 | constexpr s16 GetNextFreeIndex() const { | 310 | constexpr s32 GetNextFreeIndex() const { |
| 275 | return next_free_index; | 311 | return next_free_index; |
| 276 | } | 312 | } |
| 277 | }; | 313 | }; |
| 278 | 314 | ||
| 279 | private: | 315 | private: |
| 316 | KernelCore& m_kernel; | ||
| 280 | std::array<EntryInfo, MaxTableSize> m_entry_infos{}; | 317 | std::array<EntryInfo, MaxTableSize> m_entry_infos{}; |
| 281 | std::array<KAutoObject*, MaxTableSize> m_objects{}; | 318 | std::array<KAutoObject*, MaxTableSize> m_objects{}; |
| 282 | s32 m_free_head_index{-1}; | 319 | mutable KSpinLock m_lock; |
| 320 | s32 m_free_head_index{}; | ||
| 283 | u16 m_table_size{}; | 321 | u16 m_table_size{}; |
| 284 | u16 m_max_count{}; | 322 | u16 m_max_count{}; |
| 285 | u16 m_next_linear_id{MinLinearId}; | 323 | u16 m_next_linear_id{}; |
| 286 | u16 m_count{}; | 324 | u16 m_count{}; |
| 287 | mutable KSpinLock m_lock; | ||
| 288 | KernelCore& kernel; | ||
| 289 | }; | 325 | }; |
| 290 | 326 | ||
| 291 | } // namespace Kernel | 327 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h index 9444f6bd2..3b6e7baff 100644 --- a/src/core/hle/kernel/k_memory_block.h +++ b/src/core/hle/kernel/k_memory_block.h | |||
| @@ -35,26 +35,32 @@ enum class KMemoryState : u32 { | |||
| 35 | FlagCanMapProcess = (1 << 23), | 35 | FlagCanMapProcess = (1 << 23), |
| 36 | FlagCanChangeAttribute = (1 << 24), | 36 | FlagCanChangeAttribute = (1 << 24), |
| 37 | FlagCanCodeMemory = (1 << 25), | 37 | FlagCanCodeMemory = (1 << 25), |
| 38 | FlagLinearMapped = (1 << 26), | ||
| 38 | 39 | ||
| 39 | FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | | 40 | FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | |
| 40 | FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical | | 41 | FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical | |
| 41 | FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer | | 42 | FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer | |
| 42 | FlagReferenceCounted | FlagCanChangeAttribute, | 43 | FlagReferenceCounted | FlagCanChangeAttribute | FlagLinearMapped, |
| 43 | 44 | ||
| 44 | FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | | 45 | FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | |
| 45 | FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap | | 46 | FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap | |
| 46 | FlagCanAlignedDeviceMap | FlagReferenceCounted, | 47 | FlagCanAlignedDeviceMap | FlagReferenceCounted | FlagLinearMapped, |
| 47 | 48 | ||
| 48 | FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap, | 49 | FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap | |
| 50 | FlagLinearMapped, | ||
| 49 | 51 | ||
| 50 | Free = static_cast<u32>(Svc::MemoryState::Free), | 52 | Free = static_cast<u32>(Svc::MemoryState::Free), |
| 51 | Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped, | 53 | Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap | |
| 54 | FlagCanAlignedDeviceMap, | ||
| 52 | Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical, | 55 | Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical, |
| 53 | Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess, | 56 | Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess, |
| 54 | CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess | | 57 | CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess | |
| 55 | FlagCanCodeMemory, | 58 | FlagCanCodeMemory, |
| 56 | Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted, | ||
| 57 | Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory, | 59 | Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory, |
| 60 | Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted | | ||
| 61 | FlagLinearMapped, | ||
| 62 | |||
| 63 | // Alias was removed after 1.0.0. | ||
| 58 | 64 | ||
| 59 | AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess | | 65 | AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess | |
| 60 | FlagCanCodeAlias, | 66 | FlagCanCodeAlias, |
| @@ -67,18 +73,18 @@ enum class KMemoryState : u32 { | |||
| 67 | Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap | | 73 | Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap | |
| 68 | FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | 74 | FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, |
| 69 | 75 | ||
| 70 | ThreadLocal = | 76 | ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagLinearMapped, |
| 71 | static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted, | ||
| 72 | 77 | ||
| 73 | Transfered = static_cast<u32>(Svc::MemoryState::Transferred) | FlagsMisc | | 78 | Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc | |
| 74 | FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | | 79 | FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | |
| 75 | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | 80 | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, |
| 76 | 81 | ||
| 77 | SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransferred) | FlagsMisc | | 82 | SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransfered) | FlagsMisc | |
| 78 | FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | 83 | FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, |
| 79 | 84 | ||
| 80 | SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped | | 85 | SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped | |
| 81 | FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | 86 | FlagReferenceCounted | FlagLinearMapped | FlagCanUseNonSecureIpc | |
| 87 | FlagCanUseNonDeviceIpc, | ||
| 82 | 88 | ||
| 83 | Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible), | 89 | Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible), |
| 84 | 90 | ||
| @@ -91,69 +97,69 @@ enum class KMemoryState : u32 { | |||
| 91 | Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped, | 97 | Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped, |
| 92 | 98 | ||
| 93 | GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped | | 99 | GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped | |
| 94 | FlagReferenceCounted | FlagCanDebug, | 100 | FlagReferenceCounted | FlagCanDebug | FlagLinearMapped, |
| 95 | CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted, | 101 | CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted | |
| 102 | FlagLinearMapped, | ||
| 96 | 103 | ||
| 97 | Coverage = static_cast<u32>(Svc::MemoryState::Coverage) | FlagMapped, | 104 | Coverage = static_cast<u32>(Svc::MemoryState::Coverage) | FlagMapped, |
| 105 | |||
| 106 | Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted | | ||
| 107 | FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap | | ||
| 108 | FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | ||
| 98 | }; | 109 | }; |
| 99 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryState); | 110 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryState); |
| 100 | 111 | ||
| 101 | static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000); | 112 | static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000); |
| 102 | static_assert(static_cast<u32>(KMemoryState::Io) == 0x00002001); | 113 | static_assert(static_cast<u32>(KMemoryState::Io) == 0x00182001); |
| 103 | static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002); | 114 | static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002); |
| 104 | static_assert(static_cast<u32>(KMemoryState::Code) == 0x00DC7E03); | 115 | static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03); |
| 105 | static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x03FEBD04); | 116 | static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x07FEBD04); |
| 106 | static_assert(static_cast<u32>(KMemoryState::Normal) == 0x037EBD05); | 117 | static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05); |
| 107 | static_assert(static_cast<u32>(KMemoryState::Shared) == 0x00402006); | 118 | static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006); |
| 108 | static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x00DD7E08); | 119 | |
| 109 | static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x03FFBD09); | 120 | static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08); |
| 110 | static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x005C3C0A); | 121 | static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x07FFBD09); |
| 111 | static_assert(static_cast<u32>(KMemoryState::Stack) == 0x005C3C0B); | 122 | static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A); |
| 112 | static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0040200C); | 123 | static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B); |
| 113 | static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x015C3C0D); | 124 | static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400200C); |
| 114 | static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x005C380E); | 125 | static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D); |
| 115 | static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0040380F); | 126 | static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E); |
| 127 | static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F); | ||
| 116 | static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010); | 128 | static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010); |
| 117 | static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x005C3811); | 129 | static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811); |
| 118 | static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x004C2812); | 130 | static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812); |
| 119 | static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013); | 131 | static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013); |
| 120 | static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x00402214); | 132 | static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214); |
| 121 | static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x00402015); | 133 | static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015); |
| 122 | static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016); | 134 | static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016); |
| 135 | static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x05583817); | ||
| 123 | 136 | ||
| 124 | enum class KMemoryPermission : u8 { | 137 | enum class KMemoryPermission : u8 { |
| 125 | None = 0, | 138 | None = 0, |
| 126 | All = static_cast<u8>(~None), | 139 | All = static_cast<u8>(~None), |
| 127 | 140 | ||
| 128 | Read = 1 << 0, | ||
| 129 | Write = 1 << 1, | ||
| 130 | Execute = 1 << 2, | ||
| 131 | |||
| 132 | ReadAndWrite = Read | Write, | ||
| 133 | ReadAndExecute = Read | Execute, | ||
| 134 | |||
| 135 | UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write | | ||
| 136 | Svc::MemoryPermission::Execute), | ||
| 137 | |||
| 138 | KernelShift = 3, | 141 | KernelShift = 3, |
| 139 | 142 | ||
| 140 | KernelRead = Read << KernelShift, | 143 | KernelRead = static_cast<u8>(Svc::MemoryPermission::Read) << KernelShift, |
| 141 | KernelWrite = Write << KernelShift, | 144 | KernelWrite = static_cast<u8>(Svc::MemoryPermission::Write) << KernelShift, |
| 142 | KernelExecute = Execute << KernelShift, | 145 | KernelExecute = static_cast<u8>(Svc::MemoryPermission::Execute) << KernelShift, |
| 143 | 146 | ||
| 144 | NotMapped = (1 << (2 * KernelShift)), | 147 | NotMapped = (1 << (2 * KernelShift)), |
| 145 | 148 | ||
| 146 | KernelReadWrite = KernelRead | KernelWrite, | 149 | KernelReadWrite = KernelRead | KernelWrite, |
| 147 | KernelReadExecute = KernelRead | KernelExecute, | 150 | KernelReadExecute = KernelRead | KernelExecute, |
| 148 | 151 | ||
| 149 | UserRead = Read | KernelRead, | 152 | UserRead = static_cast<u8>(Svc::MemoryPermission::Read) | KernelRead, |
| 150 | UserWrite = Write | KernelWrite, | 153 | UserWrite = static_cast<u8>(Svc::MemoryPermission::Write) | KernelWrite, |
| 151 | UserExecute = Execute, | 154 | UserExecute = static_cast<u8>(Svc::MemoryPermission::Execute), |
| 152 | 155 | ||
| 153 | UserReadWrite = UserRead | UserWrite, | 156 | UserReadWrite = UserRead | UserWrite, |
| 154 | UserReadExecute = UserRead | UserExecute, | 157 | UserReadExecute = UserRead | UserExecute, |
| 155 | 158 | ||
| 156 | IpcLockChangeMask = NotMapped | UserReadWrite | 159 | UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write | |
| 160 | Svc::MemoryPermission::Execute), | ||
| 161 | |||
| 162 | IpcLockChangeMask = NotMapped | UserReadWrite, | ||
| 157 | }; | 163 | }; |
| 158 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission); | 164 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission); |
| 159 | 165 | ||
| @@ -210,13 +216,15 @@ struct KMemoryInfo { | |||
| 210 | 216 | ||
| 211 | constexpr Svc::MemoryInfo GetSvcMemoryInfo() const { | 217 | constexpr Svc::MemoryInfo GetSvcMemoryInfo() const { |
| 212 | return { | 218 | return { |
| 213 | .addr = m_address, | 219 | .base_address = m_address, |
| 214 | .size = m_size, | 220 | .size = m_size, |
| 215 | .state = static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask), | 221 | .state = static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask), |
| 216 | .attr = static_cast<Svc::MemoryAttribute>(m_attribute & KMemoryAttribute::UserMask), | 222 | .attribute = |
| 217 | .perm = static_cast<Svc::MemoryPermission>(m_permission & KMemoryPermission::UserMask), | 223 | static_cast<Svc::MemoryAttribute>(m_attribute & KMemoryAttribute::UserMask), |
| 218 | .ipc_refcount = m_ipc_lock_count, | 224 | .permission = |
| 219 | .device_refcount = m_device_use_count, | 225 | static_cast<Svc::MemoryPermission>(m_permission & KMemoryPermission::UserMask), |
| 226 | .ipc_count = m_ipc_lock_count, | ||
| 227 | .device_count = m_device_use_count, | ||
| 220 | .padding = {}, | 228 | .padding = {}, |
| 221 | }; | 229 | }; |
| 222 | } | 230 | } |
| @@ -468,6 +476,7 @@ public: | |||
| 468 | 476 | ||
| 469 | constexpr void UpdateDeviceDisableMergeStateForShareLeft( | 477 | constexpr void UpdateDeviceDisableMergeStateForShareLeft( |
| 470 | [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { | 478 | [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { |
| 479 | // New permission/right aren't used. | ||
| 471 | if (left) { | 480 | if (left) { |
| 472 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( | 481 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( |
| 473 | m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft); | 482 | m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft); |
| @@ -478,6 +487,7 @@ public: | |||
| 478 | 487 | ||
| 479 | constexpr void UpdateDeviceDisableMergeStateForShareRight( | 488 | constexpr void UpdateDeviceDisableMergeStateForShareRight( |
| 480 | [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { | 489 | [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { |
| 490 | // New permission/left aren't used. | ||
| 481 | if (right) { | 491 | if (right) { |
| 482 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( | 492 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( |
| 483 | m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight); | 493 | m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight); |
| @@ -494,6 +504,8 @@ public: | |||
| 494 | 504 | ||
| 495 | constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, | 505 | constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, |
| 496 | bool right) { | 506 | bool right) { |
| 507 | // New permission isn't used. | ||
| 508 | |||
| 497 | // We must either be shared or have a zero lock count. | 509 | // We must either be shared or have a zero lock count. |
| 498 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || | 510 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || |
| 499 | m_device_use_count == 0); | 511 | m_device_use_count == 0); |
| @@ -509,6 +521,7 @@ public: | |||
| 509 | 521 | ||
| 510 | constexpr void UpdateDeviceDisableMergeStateForUnshareLeft( | 522 | constexpr void UpdateDeviceDisableMergeStateForUnshareLeft( |
| 511 | [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { | 523 | [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { |
| 524 | // New permission/right aren't used. | ||
| 512 | 525 | ||
| 513 | if (left) { | 526 | if (left) { |
| 514 | if (!m_device_disable_merge_left_count) { | 527 | if (!m_device_disable_merge_left_count) { |
| @@ -528,6 +541,8 @@ public: | |||
| 528 | 541 | ||
| 529 | constexpr void UpdateDeviceDisableMergeStateForUnshareRight( | 542 | constexpr void UpdateDeviceDisableMergeStateForUnshareRight( |
| 530 | [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { | 543 | [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { |
| 544 | // New permission/left aren't used. | ||
| 545 | |||
| 531 | if (right) { | 546 | if (right) { |
| 532 | const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--; | 547 | const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--; |
| 533 | ASSERT(old_device_disable_merge_right_count > 0); | 548 | ASSERT(old_device_disable_merge_right_count > 0); |
| @@ -546,6 +561,8 @@ public: | |||
| 546 | 561 | ||
| 547 | constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, | 562 | constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, |
| 548 | bool right) { | 563 | bool right) { |
| 564 | // New permission isn't used. | ||
| 565 | |||
| 549 | // We must be shared. | 566 | // We must be shared. |
| 550 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); | 567 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); |
| 551 | 568 | ||
| @@ -563,6 +580,7 @@ public: | |||
| 563 | 580 | ||
| 564 | constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left, | 581 | constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left, |
| 565 | bool right) { | 582 | bool right) { |
| 583 | // New permission isn't used. | ||
| 566 | 584 | ||
| 567 | // We must be shared. | 585 | // We must be shared. |
| 568 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); | 586 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); |
| @@ -613,6 +631,8 @@ public: | |||
| 613 | 631 | ||
| 614 | constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left, | 632 | constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left, |
| 615 | [[maybe_unused]] bool right) { | 633 | [[maybe_unused]] bool right) { |
| 634 | // New permission isn't used. | ||
| 635 | |||
| 616 | // We must be locked. | 636 | // We must be locked. |
| 617 | ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked); | 637 | ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked); |
| 618 | 638 | ||
diff --git a/src/core/hle/kernel/k_memory_layout.cpp b/src/core/hle/kernel/k_memory_layout.cpp index 55dc296d0..72c3ee4b7 100644 --- a/src/core/hle/kernel/k_memory_layout.cpp +++ b/src/core/hle/kernel/k_memory_layout.cpp | |||
| @@ -153,13 +153,9 @@ void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_ | |||
| 153 | } | 153 | } |
| 154 | } | 154 | } |
| 155 | 155 | ||
| 156 | size_t KMemoryLayout::GetResourceRegionSizeForInit() { | 156 | size_t KMemoryLayout::GetResourceRegionSizeForInit(bool use_extra_resource) { |
| 157 | // Calculate resource region size based on whether we allow extra threads. | 157 | return KernelResourceSize + KSystemControl::SecureAppletMemorySize + |
| 158 | const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit(); | 158 | (use_extra_resource ? KernelSlabHeapAdditionalSize + KernelPageBufferAdditionalSize : 0); |
| 159 | size_t resource_region_size = | ||
| 160 | KernelResourceSize + (use_extra_resources ? KernelSlabHeapAdditionalSize : 0); | ||
| 161 | |||
| 162 | return resource_region_size; | ||
| 163 | } | 159 | } |
| 164 | 160 | ||
| 165 | } // namespace Kernel | 161 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index 884fc623a..fd6e1d3e6 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h | |||
| @@ -60,10 +60,12 @@ constexpr std::size_t KernelSlabHeapGapsSizeMax = 2_MiB - 64_KiB; | |||
| 60 | constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax; | 60 | constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax; |
| 61 | 61 | ||
| 62 | // NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860. | 62 | // NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860. |
| 63 | constexpr std::size_t KernelSlabHeapAdditionalSize = 0x68000; | 63 | constexpr size_t KernelPageBufferHeapSize = 0x3E0000; |
| 64 | constexpr size_t KernelSlabHeapAdditionalSize = 0x148000; | ||
| 65 | constexpr size_t KernelPageBufferAdditionalSize = 0x33C000; | ||
| 64 | 66 | ||
| 65 | constexpr std::size_t KernelResourceSize = | 67 | constexpr std::size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize + |
| 66 | KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize; | 68 | KernelSlabHeapSize + KernelPageBufferHeapSize; |
| 67 | 69 | ||
| 68 | constexpr bool IsKernelAddressKey(VAddr key) { | 70 | constexpr bool IsKernelAddressKey(VAddr key) { |
| 69 | return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast; | 71 | return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast; |
| @@ -168,6 +170,11 @@ public: | |||
| 168 | KMemoryRegionType_VirtualDramKernelTraceBuffer)); | 170 | KMemoryRegionType_VirtualDramKernelTraceBuffer)); |
| 169 | } | 171 | } |
| 170 | 172 | ||
| 173 | const KMemoryRegion& GetSecureAppletMemoryRegion() { | ||
| 174 | return Dereference(GetVirtualMemoryRegionTree().FindByType( | ||
| 175 | KMemoryRegionType_VirtualDramKernelSecureAppletMemory)); | ||
| 176 | } | ||
| 177 | |||
| 171 | const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const { | 178 | const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const { |
| 172 | return Dereference(FindVirtualLinear(address)); | 179 | return Dereference(FindVirtualLinear(address)); |
| 173 | } | 180 | } |
| @@ -229,7 +236,7 @@ public: | |||
| 229 | 236 | ||
| 230 | void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, | 237 | void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, |
| 231 | VAddr linear_virtual_start); | 238 | VAddr linear_virtual_start); |
| 232 | static size_t GetResourceRegionSizeForInit(); | 239 | static size_t GetResourceRegionSizeForInit(bool use_extra_resource); |
| 233 | 240 | ||
| 234 | auto GetKernelRegionExtents() const { | 241 | auto GetKernelRegionExtents() const { |
| 235 | return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel); | 242 | return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel); |
| @@ -279,6 +286,10 @@ public: | |||
| 279 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | 286 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( |
| 280 | KMemoryRegionType_DramKernelSlab); | 287 | KMemoryRegionType_DramKernelSlab); |
| 281 | } | 288 | } |
| 289 | auto GetKernelSecureAppletMemoryRegionPhysicalExtents() { | ||
| 290 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 291 | KMemoryRegionType_DramKernelSecureAppletMemory); | ||
| 292 | } | ||
| 282 | auto GetKernelPageTableHeapRegionPhysicalExtents() const { | 293 | auto GetKernelPageTableHeapRegionPhysicalExtents() const { |
| 283 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | 294 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( |
| 284 | KMemoryRegionType_DramKernelPtHeap); | 295 | KMemoryRegionType_DramKernelPtHeap); |
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 646711505..c4bf306e8 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp | |||
| @@ -29,43 +29,44 @@ constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) { | |||
| 29 | } else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) { | 29 | } else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) { |
| 30 | return KMemoryManager::Pool::SystemNonSecure; | 30 | return KMemoryManager::Pool::SystemNonSecure; |
| 31 | } else { | 31 | } else { |
| 32 | ASSERT_MSG(false, "InvalidMemoryRegionType for conversion to Pool"); | 32 | UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool"); |
| 33 | return {}; | ||
| 34 | } | 33 | } |
| 35 | } | 34 | } |
| 36 | 35 | ||
| 37 | } // namespace | 36 | } // namespace |
| 38 | 37 | ||
| 39 | KMemoryManager::KMemoryManager(Core::System& system_) | 38 | KMemoryManager::KMemoryManager(Core::System& system) |
| 40 | : system{system_}, pool_locks{ | 39 | : m_system{system}, m_memory_layout{system.Kernel().MemoryLayout()}, |
| 41 | KLightLock{system_.Kernel()}, | 40 | m_pool_locks{ |
| 42 | KLightLock{system_.Kernel()}, | 41 | KLightLock{system.Kernel()}, |
| 43 | KLightLock{system_.Kernel()}, | 42 | KLightLock{system.Kernel()}, |
| 44 | KLightLock{system_.Kernel()}, | 43 | KLightLock{system.Kernel()}, |
| 45 | } {} | 44 | KLightLock{system.Kernel()}, |
| 45 | } {} | ||
| 46 | 46 | ||
| 47 | void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) { | 47 | void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) { |
| 48 | 48 | ||
| 49 | // Clear the management region to zero. | 49 | // Clear the management region to zero. |
| 50 | const VAddr management_region_end = management_region + management_region_size; | 50 | const VAddr management_region_end = management_region + management_region_size; |
| 51 | // std::memset(GetVoidPointer(management_region), 0, management_region_size); | ||
| 51 | 52 | ||
| 52 | // Reset our manager count. | 53 | // Reset our manager count. |
| 53 | num_managers = 0; | 54 | m_num_managers = 0; |
| 54 | 55 | ||
| 55 | // Traverse the virtual memory layout tree, initializing each manager as appropriate. | 56 | // Traverse the virtual memory layout tree, initializing each manager as appropriate. |
| 56 | while (num_managers != MaxManagerCount) { | 57 | while (m_num_managers != MaxManagerCount) { |
| 57 | // Locate the region that should initialize the current manager. | 58 | // Locate the region that should initialize the current manager. |
| 58 | PAddr region_address = 0; | 59 | PAddr region_address = 0; |
| 59 | size_t region_size = 0; | 60 | size_t region_size = 0; |
| 60 | Pool region_pool = Pool::Count; | 61 | Pool region_pool = Pool::Count; |
| 61 | for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { | 62 | for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { |
| 62 | // We only care about regions that we need to create managers for. | 63 | // We only care about regions that we need to create managers for. |
| 63 | if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { | 64 | if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { |
| 64 | continue; | 65 | continue; |
| 65 | } | 66 | } |
| 66 | 67 | ||
| 67 | // We want to initialize the managers in order. | 68 | // We want to initialize the managers in order. |
| 68 | if (it.GetAttributes() != num_managers) { | 69 | if (it.GetAttributes() != m_num_managers) { |
| 69 | continue; | 70 | continue; |
| 70 | } | 71 | } |
| 71 | 72 | ||
| @@ -97,8 +98,8 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | |||
| 97 | } | 98 | } |
| 98 | 99 | ||
| 99 | // Initialize a new manager for the region. | 100 | // Initialize a new manager for the region. |
| 100 | Impl* manager = std::addressof(managers[num_managers++]); | 101 | Impl* manager = std::addressof(m_managers[m_num_managers++]); |
| 101 | ASSERT(num_managers <= managers.size()); | 102 | ASSERT(m_num_managers <= m_managers.size()); |
| 102 | 103 | ||
| 103 | const size_t cur_size = manager->Initialize(region_address, region_size, management_region, | 104 | const size_t cur_size = manager->Initialize(region_address, region_size, management_region, |
| 104 | management_region_end, region_pool); | 105 | management_region_end, region_pool); |
| @@ -107,13 +108,13 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | |||
| 107 | 108 | ||
| 108 | // Insert the manager into the pool list. | 109 | // Insert the manager into the pool list. |
| 109 | const auto region_pool_index = static_cast<u32>(region_pool); | 110 | const auto region_pool_index = static_cast<u32>(region_pool); |
| 110 | if (pool_managers_tail[region_pool_index] == nullptr) { | 111 | if (m_pool_managers_tail[region_pool_index] == nullptr) { |
| 111 | pool_managers_head[region_pool_index] = manager; | 112 | m_pool_managers_head[region_pool_index] = manager; |
| 112 | } else { | 113 | } else { |
| 113 | pool_managers_tail[region_pool_index]->SetNext(manager); | 114 | m_pool_managers_tail[region_pool_index]->SetNext(manager); |
| 114 | manager->SetPrev(pool_managers_tail[region_pool_index]); | 115 | manager->SetPrev(m_pool_managers_tail[region_pool_index]); |
| 115 | } | 116 | } |
| 116 | pool_managers_tail[region_pool_index] = manager; | 117 | m_pool_managers_tail[region_pool_index] = manager; |
| 117 | } | 118 | } |
| 118 | 119 | ||
| 119 | // Free each region to its corresponding heap. | 120 | // Free each region to its corresponding heap. |
| @@ -121,11 +122,10 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | |||
| 121 | const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress(); | 122 | const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress(); |
| 122 | const PAddr ini_end = ini_start + InitialProcessBinarySizeMax; | 123 | const PAddr ini_end = ini_start + InitialProcessBinarySizeMax; |
| 123 | const PAddr ini_last = ini_end - 1; | 124 | const PAddr ini_last = ini_end - 1; |
| 124 | for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { | 125 | for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { |
| 125 | if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { | 126 | if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { |
| 126 | // Get the manager for the region. | 127 | // Get the manager for the region. |
| 127 | auto index = it.GetAttributes(); | 128 | auto& manager = m_managers[it.GetAttributes()]; |
| 128 | auto& manager = managers[index]; | ||
| 129 | 129 | ||
| 130 | const PAddr cur_start = it.GetAddress(); | 130 | const PAddr cur_start = it.GetAddress(); |
| 131 | const PAddr cur_last = it.GetLastAddress(); | 131 | const PAddr cur_last = it.GetLastAddress(); |
| @@ -162,11 +162,19 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | |||
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | // Update the used size for all managers. | 164 | // Update the used size for all managers. |
| 165 | for (size_t i = 0; i < num_managers; ++i) { | 165 | for (size_t i = 0; i < m_num_managers; ++i) { |
| 166 | managers[i].SetInitialUsedHeapSize(reserved_sizes[i]); | 166 | m_managers[i].SetInitialUsedHeapSize(reserved_sizes[i]); |
| 167 | } | 167 | } |
| 168 | } | 168 | } |
| 169 | 169 | ||
| 170 | Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) { | ||
| 171 | UNREACHABLE(); | ||
| 172 | } | ||
| 173 | |||
| 174 | void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) { | ||
| 175 | UNREACHABLE(); | ||
| 176 | } | ||
| 177 | |||
| 170 | PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) { | 178 | PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) { |
| 171 | // Early return if we're allocating no pages. | 179 | // Early return if we're allocating no pages. |
| 172 | if (num_pages == 0) { | 180 | if (num_pages == 0) { |
| @@ -175,7 +183,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | |||
| 175 | 183 | ||
| 176 | // Lock the pool that we're allocating from. | 184 | // Lock the pool that we're allocating from. |
| 177 | const auto [pool, dir] = DecodeOption(option); | 185 | const auto [pool, dir] = DecodeOption(option); |
| 178 | KScopedLightLock lk(pool_locks[static_cast<std::size_t>(pool)]); | 186 | KScopedLightLock lk(m_pool_locks[static_cast<std::size_t>(pool)]); |
| 179 | 187 | ||
| 180 | // Choose a heap based on our page size request. | 188 | // Choose a heap based on our page size request. |
| 181 | const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages); | 189 | const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages); |
| @@ -185,7 +193,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | |||
| 185 | PAddr allocated_block = 0; | 193 | PAddr allocated_block = 0; |
| 186 | for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; | 194 | for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; |
| 187 | chosen_manager = this->GetNextManager(chosen_manager, dir)) { | 195 | chosen_manager = this->GetNextManager(chosen_manager, dir)) { |
| 188 | allocated_block = chosen_manager->AllocateBlock(heap_index, true); | 196 | allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages); |
| 189 | if (allocated_block != 0) { | 197 | if (allocated_block != 0) { |
| 190 | break; | 198 | break; |
| 191 | } | 199 | } |
| @@ -196,10 +204,9 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | |||
| 196 | return 0; | 204 | return 0; |
| 197 | } | 205 | } |
| 198 | 206 | ||
| 199 | // If we allocated more than we need, free some. | 207 | // Maintain the optimized memory bitmap, if we should. |
| 200 | const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index); | 208 | if (m_has_optimized_process[static_cast<size_t>(pool)]) { |
| 201 | if (allocated_pages > num_pages) { | 209 | UNIMPLEMENTED(); |
| 202 | chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); | ||
| 203 | } | 210 | } |
| 204 | 211 | ||
| 205 | // Open the first reference to the pages. | 212 | // Open the first reference to the pages. |
| @@ -209,20 +216,21 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | |||
| 209 | } | 216 | } |
| 210 | 217 | ||
| 211 | Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, | 218 | Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, |
| 212 | Direction dir, bool random) { | 219 | Direction dir, bool unoptimized, bool random) { |
| 213 | // Choose a heap based on our page size request. | 220 | // Choose a heap based on our page size request. |
| 214 | const s32 heap_index = KPageHeap::GetBlockIndex(num_pages); | 221 | const s32 heap_index = KPageHeap::GetBlockIndex(num_pages); |
| 215 | R_UNLESS(0 <= heap_index, ResultOutOfMemory); | 222 | R_UNLESS(0 <= heap_index, ResultOutOfMemory); |
| 216 | 223 | ||
| 217 | // Ensure that we don't leave anything un-freed. | 224 | // Ensure that we don't leave anything un-freed. |
| 218 | auto group_guard = SCOPE_GUARD({ | 225 | ON_RESULT_FAILURE { |
| 219 | for (const auto& it : out->Nodes()) { | 226 | for (const auto& it : out->Nodes()) { |
| 220 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), it.GetAddress()); | 227 | auto& manager = this->GetManager(it.GetAddress()); |
| 221 | const size_t num_pages_to_free = | 228 | const size_t node_num_pages = |
| 222 | std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); | 229 | std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); |
| 223 | manager.Free(it.GetAddress(), num_pages_to_free); | 230 | manager.Free(it.GetAddress(), node_num_pages); |
| 224 | } | 231 | } |
| 225 | }); | 232 | out->Finalize(); |
| 233 | }; | ||
| 226 | 234 | ||
| 227 | // Keep allocating until we've allocated all our pages. | 235 | // Keep allocating until we've allocated all our pages. |
| 228 | for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) { | 236 | for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) { |
| @@ -236,12 +244,17 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, | |||
| 236 | break; | 244 | break; |
| 237 | } | 245 | } |
| 238 | 246 | ||
| 239 | // Safely add it to our group. | 247 | // Ensure we don't leak the block if we fail. |
| 240 | { | 248 | ON_RESULT_FAILURE_2 { |
| 241 | auto block_guard = | 249 | cur_manager->Free(allocated_block, pages_per_alloc); |
| 242 | SCOPE_GUARD({ cur_manager->Free(allocated_block, pages_per_alloc); }); | 250 | }; |
| 243 | R_TRY(out->AddBlock(allocated_block, pages_per_alloc)); | 251 | |
| 244 | block_guard.Cancel(); | 252 | // Add the block to our group. |
| 253 | R_TRY(out->AddBlock(allocated_block, pages_per_alloc)); | ||
| 254 | |||
| 255 | // Maintain the optimized memory bitmap, if we should. | ||
| 256 | if (unoptimized) { | ||
| 257 | UNIMPLEMENTED(); | ||
| 245 | } | 258 | } |
| 246 | 259 | ||
| 247 | num_pages -= pages_per_alloc; | 260 | num_pages -= pages_per_alloc; |
| @@ -253,8 +266,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, | |||
| 253 | R_UNLESS(num_pages == 0, ResultOutOfMemory); | 266 | R_UNLESS(num_pages == 0, ResultOutOfMemory); |
| 254 | 267 | ||
| 255 | // We succeeded! | 268 | // We succeeded! |
| 256 | group_guard.Cancel(); | 269 | R_SUCCEED(); |
| 257 | return ResultSuccess; | ||
| 258 | } | 270 | } |
| 259 | 271 | ||
| 260 | Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) { | 272 | Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) { |
| @@ -266,10 +278,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op | |||
| 266 | 278 | ||
| 267 | // Lock the pool that we're allocating from. | 279 | // Lock the pool that we're allocating from. |
| 268 | const auto [pool, dir] = DecodeOption(option); | 280 | const auto [pool, dir] = DecodeOption(option); |
| 269 | KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]); | 281 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); |
| 270 | 282 | ||
| 271 | // Allocate the page group. | 283 | // Allocate the page group. |
| 272 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false)); | 284 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, |
| 285 | m_has_optimized_process[static_cast<size_t>(pool)], true)); | ||
| 273 | 286 | ||
| 274 | // Open the first reference to the pages. | 287 | // Open the first reference to the pages. |
| 275 | for (const auto& block : out->Nodes()) { | 288 | for (const auto& block : out->Nodes()) { |
| @@ -277,7 +290,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op | |||
| 277 | size_t remaining_pages = block.GetNumPages(); | 290 | size_t remaining_pages = block.GetNumPages(); |
| 278 | while (remaining_pages > 0) { | 291 | while (remaining_pages > 0) { |
| 279 | // Get the manager for the current address. | 292 | // Get the manager for the current address. |
| 280 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address); | 293 | auto& manager = this->GetManager(cur_address); |
| 281 | 294 | ||
| 282 | // Process part or all of the block. | 295 | // Process part or all of the block. |
| 283 | const size_t cur_pages = | 296 | const size_t cur_pages = |
| @@ -290,11 +303,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op | |||
| 290 | } | 303 | } |
| 291 | } | 304 | } |
| 292 | 305 | ||
| 293 | return ResultSuccess; | 306 | R_SUCCEED(); |
| 294 | } | 307 | } |
| 295 | 308 | ||
| 296 | Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, | 309 | Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, |
| 297 | u64 process_id, u8 fill_pattern) { | 310 | u64 process_id, u8 fill_pattern) { |
| 298 | ASSERT(out != nullptr); | 311 | ASSERT(out != nullptr); |
| 299 | ASSERT(out->GetNumPages() == 0); | 312 | ASSERT(out->GetNumPages() == 0); |
| 300 | 313 | ||
| @@ -302,83 +315,89 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag | |||
| 302 | const auto [pool, dir] = DecodeOption(option); | 315 | const auto [pool, dir] = DecodeOption(option); |
| 303 | 316 | ||
| 304 | // Allocate the memory. | 317 | // Allocate the memory. |
| 318 | bool optimized; | ||
| 305 | { | 319 | { |
| 306 | // Lock the pool that we're allocating from. | 320 | // Lock the pool that we're allocating from. |
| 307 | KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]); | 321 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); |
| 322 | |||
| 323 | // Check if we have an optimized process. | ||
| 324 | const bool has_optimized = m_has_optimized_process[static_cast<size_t>(pool)]; | ||
| 325 | const bool is_optimized = m_optimized_process_ids[static_cast<size_t>(pool)] == process_id; | ||
| 308 | 326 | ||
| 309 | // Allocate the page group. | 327 | // Allocate the page group. |
| 310 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false)); | 328 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, |
| 329 | false)); | ||
| 311 | 330 | ||
| 312 | // Open the first reference to the pages. | 331 | // Set whether we should optimize. |
| 313 | for (const auto& block : out->Nodes()) { | 332 | optimized = has_optimized && is_optimized; |
| 314 | PAddr cur_address = block.GetAddress(); | ||
| 315 | size_t remaining_pages = block.GetNumPages(); | ||
| 316 | while (remaining_pages > 0) { | ||
| 317 | // Get the manager for the current address. | ||
| 318 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address); | ||
| 319 | |||
| 320 | // Process part or all of the block. | ||
| 321 | const size_t cur_pages = | ||
| 322 | std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); | ||
| 323 | manager.OpenFirst(cur_address, cur_pages); | ||
| 324 | |||
| 325 | // Advance. | ||
| 326 | cur_address += cur_pages * PageSize; | ||
| 327 | remaining_pages -= cur_pages; | ||
| 328 | } | ||
| 329 | } | ||
| 330 | } | 333 | } |
| 331 | 334 | ||
| 332 | // Set all the allocated memory. | 335 | // Perform optimized memory tracking, if we should. |
| 333 | for (const auto& block : out->Nodes()) { | 336 | if (optimized) { |
| 334 | std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, | 337 | // Iterate over the allocated blocks. |
| 335 | block.GetSize()); | 338 | for (const auto& block : out->Nodes()) { |
| 336 | } | 339 | // Get the block extents. |
| 340 | const PAddr block_address = block.GetAddress(); | ||
| 341 | const size_t block_pages = block.GetNumPages(); | ||
| 337 | 342 | ||
| 338 | return ResultSuccess; | 343 | // If it has no pages, we don't need to do anything. |
| 339 | } | 344 | if (block_pages == 0) { |
| 345 | continue; | ||
| 346 | } | ||
| 340 | 347 | ||
| 341 | void KMemoryManager::Open(PAddr address, size_t num_pages) { | 348 | // Fill all the pages that we need to fill. |
| 342 | // Repeatedly open references until we've done so for all pages. | 349 | bool any_new = false; |
| 343 | while (num_pages) { | 350 | { |
| 344 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address); | 351 | PAddr cur_address = block_address; |
| 345 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | 352 | size_t remaining_pages = block_pages; |
| 353 | while (remaining_pages > 0) { | ||
| 354 | // Get the manager for the current address. | ||
| 355 | auto& manager = this->GetManager(cur_address); | ||
| 356 | |||
| 357 | // Process part or all of the block. | ||
| 358 | const size_t cur_pages = | ||
| 359 | std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); | ||
| 360 | any_new = | ||
| 361 | manager.ProcessOptimizedAllocation(cur_address, cur_pages, fill_pattern); | ||
| 362 | |||
| 363 | // Advance. | ||
| 364 | cur_address += cur_pages * PageSize; | ||
| 365 | remaining_pages -= cur_pages; | ||
| 366 | } | ||
| 367 | } | ||
| 346 | 368 | ||
| 347 | { | 369 | // If there are new pages, update tracking for the allocation. |
| 348 | KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]); | 370 | if (any_new) { |
| 349 | manager.Open(address, cur_pages); | 371 | // Update tracking for the allocation. |
| 372 | PAddr cur_address = block_address; | ||
| 373 | size_t remaining_pages = block_pages; | ||
| 374 | while (remaining_pages > 0) { | ||
| 375 | // Get the manager for the current address. | ||
| 376 | auto& manager = this->GetManager(cur_address); | ||
| 377 | |||
| 378 | // Lock the pool for the manager. | ||
| 379 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 380 | |||
| 381 | // Track some or all of the current pages. | ||
| 382 | const size_t cur_pages = | ||
| 383 | std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); | ||
| 384 | manager.TrackOptimizedAllocation(cur_address, cur_pages); | ||
| 385 | |||
| 386 | // Advance. | ||
| 387 | cur_address += cur_pages * PageSize; | ||
| 388 | remaining_pages -= cur_pages; | ||
| 389 | } | ||
| 390 | } | ||
| 350 | } | 391 | } |
| 351 | 392 | } else { | |
| 352 | num_pages -= cur_pages; | 393 | // Set all the allocated memory. |
| 353 | address += cur_pages * PageSize; | 394 | for (const auto& block : out->Nodes()) { |
| 354 | } | 395 | std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, |
| 355 | } | 396 | block.GetSize()); |
| 356 | |||
| 357 | void KMemoryManager::Close(PAddr address, size_t num_pages) { | ||
| 358 | // Repeatedly close references until we've done so for all pages. | ||
| 359 | while (num_pages) { | ||
| 360 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address); | ||
| 361 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||
| 362 | |||
| 363 | { | ||
| 364 | KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 365 | manager.Close(address, cur_pages); | ||
| 366 | } | 397 | } |
| 367 | |||
| 368 | num_pages -= cur_pages; | ||
| 369 | address += cur_pages * PageSize; | ||
| 370 | } | 398 | } |
| 371 | } | ||
| 372 | 399 | ||
| 373 | void KMemoryManager::Close(const KPageGroup& pg) { | 400 | R_SUCCEED(); |
| 374 | for (const auto& node : pg.Nodes()) { | ||
| 375 | Close(node.GetAddress(), node.GetNumPages()); | ||
| 376 | } | ||
| 377 | } | ||
| 378 | void KMemoryManager::Open(const KPageGroup& pg) { | ||
| 379 | for (const auto& node : pg.Nodes()) { | ||
| 380 | Open(node.GetAddress(), node.GetNumPages()); | ||
| 381 | } | ||
| 382 | } | 401 | } |
| 383 | 402 | ||
| 384 | size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management, | 403 | size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management, |
| @@ -394,18 +413,31 @@ size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr manage | |||
| 394 | ASSERT(Common::IsAligned(total_management_size, PageSize)); | 413 | ASSERT(Common::IsAligned(total_management_size, PageSize)); |
| 395 | 414 | ||
| 396 | // Setup region. | 415 | // Setup region. |
| 397 | pool = p; | 416 | m_pool = p; |
| 398 | management_region = management; | 417 | m_management_region = management; |
| 399 | page_reference_counts.resize( | 418 | m_page_reference_counts.resize( |
| 400 | Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize); | 419 | Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize); |
| 401 | ASSERT(Common::IsAligned(management_region, PageSize)); | 420 | ASSERT(Common::IsAligned(m_management_region, PageSize)); |
| 402 | 421 | ||
| 403 | // Initialize the manager's KPageHeap. | 422 | // Initialize the manager's KPageHeap. |
| 404 | heap.Initialize(address, size, management + manager_size, page_heap_size); | 423 | m_heap.Initialize(address, size, management + manager_size, page_heap_size); |
| 405 | 424 | ||
| 406 | return total_management_size; | 425 | return total_management_size; |
| 407 | } | 426 | } |
| 408 | 427 | ||
| 428 | void KMemoryManager::Impl::TrackUnoptimizedAllocation(PAddr block, size_t num_pages) { | ||
| 429 | UNREACHABLE(); | ||
| 430 | } | ||
| 431 | |||
| 432 | void KMemoryManager::Impl::TrackOptimizedAllocation(PAddr block, size_t num_pages) { | ||
| 433 | UNREACHABLE(); | ||
| 434 | } | ||
| 435 | |||
| 436 | bool KMemoryManager::Impl::ProcessOptimizedAllocation(PAddr block, size_t num_pages, | ||
| 437 | u8 fill_pattern) { | ||
| 438 | UNREACHABLE(); | ||
| 439 | } | ||
| 440 | |||
| 409 | size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) { | 441 | size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) { |
| 410 | const size_t ref_count_size = (region_size / PageSize) * sizeof(u16); | 442 | const size_t ref_count_size = (region_size / PageSize) * sizeof(u16); |
| 411 | const size_t optimize_map_size = | 443 | const size_t optimize_map_size = |
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h index dcb9b6348..401d4e644 100644 --- a/src/core/hle/kernel/k_memory_manager.h +++ b/src/core/hle/kernel/k_memory_manager.h | |||
| @@ -21,11 +21,8 @@ namespace Kernel { | |||
| 21 | 21 | ||
| 22 | class KPageGroup; | 22 | class KPageGroup; |
| 23 | 23 | ||
| 24 | class KMemoryManager final { | 24 | class KMemoryManager { |
| 25 | public: | 25 | public: |
| 26 | YUZU_NON_COPYABLE(KMemoryManager); | ||
| 27 | YUZU_NON_MOVEABLE(KMemoryManager); | ||
| 28 | |||
| 29 | enum class Pool : u32 { | 26 | enum class Pool : u32 { |
| 30 | Application = 0, | 27 | Application = 0, |
| 31 | Applet = 1, | 28 | Applet = 1, |
| @@ -45,16 +42,85 @@ public: | |||
| 45 | enum class Direction : u32 { | 42 | enum class Direction : u32 { |
| 46 | FromFront = 0, | 43 | FromFront = 0, |
| 47 | FromBack = 1, | 44 | FromBack = 1, |
| 48 | |||
| 49 | Shift = 0, | 45 | Shift = 0, |
| 50 | Mask = (0xF << Shift), | 46 | Mask = (0xF << Shift), |
| 51 | }; | 47 | }; |
| 52 | 48 | ||
| 53 | explicit KMemoryManager(Core::System& system_); | 49 | static constexpr size_t MaxManagerCount = 10; |
| 50 | |||
| 51 | explicit KMemoryManager(Core::System& system); | ||
| 54 | 52 | ||
| 55 | void Initialize(VAddr management_region, size_t management_region_size); | 53 | void Initialize(VAddr management_region, size_t management_region_size); |
| 56 | 54 | ||
| 57 | constexpr size_t GetSize(Pool pool) const { | 55 | Result InitializeOptimizedMemory(u64 process_id, Pool pool); |
| 56 | void FinalizeOptimizedMemory(u64 process_id, Pool pool); | ||
| 57 | |||
| 58 | PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); | ||
| 59 | Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option); | ||
| 60 | Result AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id, | ||
| 61 | u8 fill_pattern); | ||
| 62 | |||
| 63 | Pool GetPool(PAddr address) const { | ||
| 64 | return this->GetManager(address).GetPool(); | ||
| 65 | } | ||
| 66 | |||
| 67 | void Open(PAddr address, size_t num_pages) { | ||
| 68 | // Repeatedly open references until we've done so for all pages. | ||
| 69 | while (num_pages) { | ||
| 70 | auto& manager = this->GetManager(address); | ||
| 71 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||
| 72 | |||
| 73 | { | ||
| 74 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 75 | manager.Open(address, cur_pages); | ||
| 76 | } | ||
| 77 | |||
| 78 | num_pages -= cur_pages; | ||
| 79 | address += cur_pages * PageSize; | ||
| 80 | } | ||
| 81 | } | ||
| 82 | |||
| 83 | void OpenFirst(PAddr address, size_t num_pages) { | ||
| 84 | // Repeatedly open references until we've done so for all pages. | ||
| 85 | while (num_pages) { | ||
| 86 | auto& manager = this->GetManager(address); | ||
| 87 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||
| 88 | |||
| 89 | { | ||
| 90 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 91 | manager.OpenFirst(address, cur_pages); | ||
| 92 | } | ||
| 93 | |||
| 94 | num_pages -= cur_pages; | ||
| 95 | address += cur_pages * PageSize; | ||
| 96 | } | ||
| 97 | } | ||
| 98 | |||
| 99 | void Close(PAddr address, size_t num_pages) { | ||
| 100 | // Repeatedly close references until we've done so for all pages. | ||
| 101 | while (num_pages) { | ||
| 102 | auto& manager = this->GetManager(address); | ||
| 103 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||
| 104 | |||
| 105 | { | ||
| 106 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 107 | manager.Close(address, cur_pages); | ||
| 108 | } | ||
| 109 | |||
| 110 | num_pages -= cur_pages; | ||
| 111 | address += cur_pages * PageSize; | ||
| 112 | } | ||
| 113 | } | ||
| 114 | |||
| 115 | size_t GetSize() { | ||
| 116 | size_t total = 0; | ||
| 117 | for (size_t i = 0; i < m_num_managers; i++) { | ||
| 118 | total += m_managers[i].GetSize(); | ||
| 119 | } | ||
| 120 | return total; | ||
| 121 | } | ||
| 122 | |||
| 123 | size_t GetSize(Pool pool) { | ||
| 58 | constexpr Direction GetSizeDirection = Direction::FromFront; | 124 | constexpr Direction GetSizeDirection = Direction::FromFront; |
| 59 | size_t total = 0; | 125 | size_t total = 0; |
| 60 | for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; | 126 | for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; |
| @@ -64,18 +130,36 @@ public: | |||
| 64 | return total; | 130 | return total; |
| 65 | } | 131 | } |
| 66 | 132 | ||
| 67 | PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); | 133 | size_t GetFreeSize() { |
| 68 | Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option); | 134 | size_t total = 0; |
| 69 | Result AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id, | 135 | for (size_t i = 0; i < m_num_managers; i++) { |
| 70 | u8 fill_pattern); | 136 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(m_managers[i].GetPool())]); |
| 137 | total += m_managers[i].GetFreeSize(); | ||
| 138 | } | ||
| 139 | return total; | ||
| 140 | } | ||
| 71 | 141 | ||
| 72 | static constexpr size_t MaxManagerCount = 10; | 142 | size_t GetFreeSize(Pool pool) { |
| 143 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); | ||
| 144 | |||
| 145 | constexpr Direction GetSizeDirection = Direction::FromFront; | ||
| 146 | size_t total = 0; | ||
| 147 | for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; | ||
| 148 | manager = this->GetNextManager(manager, GetSizeDirection)) { | ||
| 149 | total += manager->GetFreeSize(); | ||
| 150 | } | ||
| 151 | return total; | ||
| 152 | } | ||
| 73 | 153 | ||
| 74 | void Close(PAddr address, size_t num_pages); | 154 | void DumpFreeList(Pool pool) { |
| 75 | void Close(const KPageGroup& pg); | 155 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); |
| 76 | 156 | ||
| 77 | void Open(PAddr address, size_t num_pages); | 157 | constexpr Direction DumpDirection = Direction::FromFront; |
| 78 | void Open(const KPageGroup& pg); | 158 | for (auto* manager = this->GetFirstManager(pool, DumpDirection); manager != nullptr; |
| 159 | manager = this->GetNextManager(manager, DumpDirection)) { | ||
| 160 | manager->DumpFreeList(); | ||
| 161 | } | ||
| 162 | } | ||
| 79 | 163 | ||
| 80 | public: | 164 | public: |
| 81 | static size_t CalculateManagementOverheadSize(size_t region_size) { | 165 | static size_t CalculateManagementOverheadSize(size_t region_size) { |
| @@ -88,14 +172,13 @@ public: | |||
| 88 | } | 172 | } |
| 89 | 173 | ||
| 90 | static constexpr Pool GetPool(u32 option) { | 174 | static constexpr Pool GetPool(u32 option) { |
| 91 | return static_cast<Pool>((static_cast<u32>(option) & static_cast<u32>(Pool::Mask)) >> | 175 | return static_cast<Pool>((option & static_cast<u32>(Pool::Mask)) >> |
| 92 | static_cast<u32>(Pool::Shift)); | 176 | static_cast<u32>(Pool::Shift)); |
| 93 | } | 177 | } |
| 94 | 178 | ||
| 95 | static constexpr Direction GetDirection(u32 option) { | 179 | static constexpr Direction GetDirection(u32 option) { |
| 96 | return static_cast<Direction>( | 180 | return static_cast<Direction>((option & static_cast<u32>(Direction::Mask)) >> |
| 97 | (static_cast<u32>(option) & static_cast<u32>(Direction::Mask)) >> | 181 | static_cast<u32>(Direction::Shift)); |
| 98 | static_cast<u32>(Direction::Shift)); | ||
| 99 | } | 182 | } |
| 100 | 183 | ||
| 101 | static constexpr std::tuple<Pool, Direction> DecodeOption(u32 option) { | 184 | static constexpr std::tuple<Pool, Direction> DecodeOption(u32 option) { |
| @@ -103,74 +186,88 @@ public: | |||
| 103 | } | 186 | } |
| 104 | 187 | ||
| 105 | private: | 188 | private: |
| 106 | class Impl final { | 189 | class Impl { |
| 107 | public: | 190 | public: |
| 108 | YUZU_NON_COPYABLE(Impl); | 191 | static size_t CalculateManagementOverheadSize(size_t region_size); |
| 109 | YUZU_NON_MOVEABLE(Impl); | 192 | |
| 193 | static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) { | ||
| 194 | return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / | ||
| 195 | Common::BitSize<u64>()) * | ||
| 196 | sizeof(u64); | ||
| 197 | } | ||
| 110 | 198 | ||
| 199 | public: | ||
| 111 | Impl() = default; | 200 | Impl() = default; |
| 112 | ~Impl() = default; | ||
| 113 | 201 | ||
| 114 | size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end, | 202 | size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end, |
| 115 | Pool p); | 203 | Pool p); |
| 116 | 204 | ||
| 117 | VAddr AllocateBlock(s32 index, bool random) { | 205 | PAddr AllocateBlock(s32 index, bool random) { |
| 118 | return heap.AllocateBlock(index, random); | 206 | return m_heap.AllocateBlock(index, random); |
| 119 | } | 207 | } |
| 120 | 208 | PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { | |
| 121 | void Free(VAddr addr, size_t num_pages) { | 209 | return m_heap.AllocateAligned(index, num_pages, align_pages); |
| 122 | heap.Free(addr, num_pages); | 210 | } |
| 211 | void Free(PAddr addr, size_t num_pages) { | ||
| 212 | m_heap.Free(addr, num_pages); | ||
| 123 | } | 213 | } |
| 124 | 214 | ||
| 125 | void SetInitialUsedHeapSize(size_t reserved_size) { | 215 | void SetInitialUsedHeapSize(size_t reserved_size) { |
| 126 | heap.SetInitialUsedSize(reserved_size); | 216 | m_heap.SetInitialUsedSize(reserved_size); |
| 127 | } | 217 | } |
| 128 | 218 | ||
| 129 | constexpr Pool GetPool() const { | 219 | void InitializeOptimizedMemory() { |
| 130 | return pool; | 220 | UNIMPLEMENTED(); |
| 131 | } | 221 | } |
| 132 | 222 | ||
| 223 | void TrackUnoptimizedAllocation(PAddr block, size_t num_pages); | ||
| 224 | void TrackOptimizedAllocation(PAddr block, size_t num_pages); | ||
| 225 | |||
| 226 | bool ProcessOptimizedAllocation(PAddr block, size_t num_pages, u8 fill_pattern); | ||
| 227 | |||
| 228 | constexpr Pool GetPool() const { | ||
| 229 | return m_pool; | ||
| 230 | } | ||
| 133 | constexpr size_t GetSize() const { | 231 | constexpr size_t GetSize() const { |
| 134 | return heap.GetSize(); | 232 | return m_heap.GetSize(); |
| 233 | } | ||
| 234 | constexpr PAddr GetEndAddress() const { | ||
| 235 | return m_heap.GetEndAddress(); | ||
| 135 | } | 236 | } |
| 136 | 237 | ||
| 137 | constexpr VAddr GetAddress() const { | 238 | size_t GetFreeSize() const { |
| 138 | return heap.GetAddress(); | 239 | return m_heap.GetFreeSize(); |
| 139 | } | 240 | } |
| 140 | 241 | ||
| 141 | constexpr VAddr GetEndAddress() const { | 242 | void DumpFreeList() const { |
| 142 | return heap.GetEndAddress(); | 243 | UNIMPLEMENTED(); |
| 143 | } | 244 | } |
| 144 | 245 | ||
| 145 | constexpr size_t GetPageOffset(PAddr address) const { | 246 | constexpr size_t GetPageOffset(PAddr address) const { |
| 146 | return heap.GetPageOffset(address); | 247 | return m_heap.GetPageOffset(address); |
| 147 | } | 248 | } |
| 148 | |||
| 149 | constexpr size_t GetPageOffsetToEnd(PAddr address) const { | 249 | constexpr size_t GetPageOffsetToEnd(PAddr address) const { |
| 150 | return heap.GetPageOffsetToEnd(address); | 250 | return m_heap.GetPageOffsetToEnd(address); |
| 151 | } | 251 | } |
| 152 | 252 | ||
| 153 | constexpr void SetNext(Impl* n) { | 253 | constexpr void SetNext(Impl* n) { |
| 154 | next = n; | 254 | m_next = n; |
| 155 | } | 255 | } |
| 156 | |||
| 157 | constexpr void SetPrev(Impl* n) { | 256 | constexpr void SetPrev(Impl* n) { |
| 158 | prev = n; | 257 | m_prev = n; |
| 159 | } | 258 | } |
| 160 | |||
| 161 | constexpr Impl* GetNext() const { | 259 | constexpr Impl* GetNext() const { |
| 162 | return next; | 260 | return m_next; |
| 163 | } | 261 | } |
| 164 | |||
| 165 | constexpr Impl* GetPrev() const { | 262 | constexpr Impl* GetPrev() const { |
| 166 | return prev; | 263 | return m_prev; |
| 167 | } | 264 | } |
| 168 | 265 | ||
| 169 | void OpenFirst(PAddr address, size_t num_pages) { | 266 | void OpenFirst(PAddr address, size_t num_pages) { |
| 170 | size_t index = this->GetPageOffset(address); | 267 | size_t index = this->GetPageOffset(address); |
| 171 | const size_t end = index + num_pages; | 268 | const size_t end = index + num_pages; |
| 172 | while (index < end) { | 269 | while (index < end) { |
| 173 | const RefCount ref_count = (++page_reference_counts[index]); | 270 | const RefCount ref_count = (++m_page_reference_counts[index]); |
| 174 | ASSERT(ref_count == 1); | 271 | ASSERT(ref_count == 1); |
| 175 | 272 | ||
| 176 | index++; | 273 | index++; |
| @@ -181,7 +278,7 @@ private: | |||
| 181 | size_t index = this->GetPageOffset(address); | 278 | size_t index = this->GetPageOffset(address); |
| 182 | const size_t end = index + num_pages; | 279 | const size_t end = index + num_pages; |
| 183 | while (index < end) { | 280 | while (index < end) { |
| 184 | const RefCount ref_count = (++page_reference_counts[index]); | 281 | const RefCount ref_count = (++m_page_reference_counts[index]); |
| 185 | ASSERT(ref_count > 1); | 282 | ASSERT(ref_count > 1); |
| 186 | 283 | ||
| 187 | index++; | 284 | index++; |
| @@ -195,8 +292,8 @@ private: | |||
| 195 | size_t free_start = 0; | 292 | size_t free_start = 0; |
| 196 | size_t free_count = 0; | 293 | size_t free_count = 0; |
| 197 | while (index < end) { | 294 | while (index < end) { |
| 198 | ASSERT(page_reference_counts[index] > 0); | 295 | ASSERT(m_page_reference_counts[index] > 0); |
| 199 | const RefCount ref_count = (--page_reference_counts[index]); | 296 | const RefCount ref_count = (--m_page_reference_counts[index]); |
| 200 | 297 | ||
| 201 | // Keep track of how many zero refcounts we see in a row, to minimize calls to free. | 298 | // Keep track of how many zero refcounts we see in a row, to minimize calls to free. |
| 202 | if (ref_count == 0) { | 299 | if (ref_count == 0) { |
| @@ -208,7 +305,7 @@ private: | |||
| 208 | } | 305 | } |
| 209 | } else { | 306 | } else { |
| 210 | if (free_count > 0) { | 307 | if (free_count > 0) { |
| 211 | this->Free(heap.GetAddress() + free_start * PageSize, free_count); | 308 | this->Free(m_heap.GetAddress() + free_start * PageSize, free_count); |
| 212 | free_count = 0; | 309 | free_count = 0; |
| 213 | } | 310 | } |
| 214 | } | 311 | } |
| @@ -217,44 +314,36 @@ private: | |||
| 217 | } | 314 | } |
| 218 | 315 | ||
| 219 | if (free_count > 0) { | 316 | if (free_count > 0) { |
| 220 | this->Free(heap.GetAddress() + free_start * PageSize, free_count); | 317 | this->Free(m_heap.GetAddress() + free_start * PageSize, free_count); |
| 221 | } | 318 | } |
| 222 | } | 319 | } |
| 223 | 320 | ||
| 224 | static size_t CalculateManagementOverheadSize(size_t region_size); | ||
| 225 | |||
| 226 | static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) { | ||
| 227 | return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / | ||
| 228 | Common::BitSize<u64>()) * | ||
| 229 | sizeof(u64); | ||
| 230 | } | ||
| 231 | |||
| 232 | private: | 321 | private: |
| 233 | using RefCount = u16; | 322 | using RefCount = u16; |
| 234 | 323 | ||
| 235 | KPageHeap heap; | 324 | KPageHeap m_heap; |
| 236 | std::vector<RefCount> page_reference_counts; | 325 | std::vector<RefCount> m_page_reference_counts; |
| 237 | VAddr management_region{}; | 326 | VAddr m_management_region{}; |
| 238 | Pool pool{}; | 327 | Pool m_pool{}; |
| 239 | Impl* next{}; | 328 | Impl* m_next{}; |
| 240 | Impl* prev{}; | 329 | Impl* m_prev{}; |
| 241 | }; | 330 | }; |
| 242 | 331 | ||
| 243 | private: | 332 | private: |
| 244 | Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) { | 333 | Impl& GetManager(PAddr address) { |
| 245 | return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; | 334 | return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; |
| 246 | } | 335 | } |
| 247 | 336 | ||
| 248 | const Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) const { | 337 | const Impl& GetManager(PAddr address) const { |
| 249 | return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; | 338 | return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; |
| 250 | } | 339 | } |
| 251 | 340 | ||
| 252 | constexpr Impl* GetFirstManager(Pool pool, Direction dir) const { | 341 | constexpr Impl* GetFirstManager(Pool pool, Direction dir) { |
| 253 | return dir == Direction::FromBack ? pool_managers_tail[static_cast<size_t>(pool)] | 342 | return dir == Direction::FromBack ? m_pool_managers_tail[static_cast<size_t>(pool)] |
| 254 | : pool_managers_head[static_cast<size_t>(pool)]; | 343 | : m_pool_managers_head[static_cast<size_t>(pool)]; |
| 255 | } | 344 | } |
| 256 | 345 | ||
| 257 | constexpr Impl* GetNextManager(Impl* cur, Direction dir) const { | 346 | constexpr Impl* GetNextManager(Impl* cur, Direction dir) { |
| 258 | if (dir == Direction::FromBack) { | 347 | if (dir == Direction::FromBack) { |
| 259 | return cur->GetPrev(); | 348 | return cur->GetPrev(); |
| 260 | } else { | 349 | } else { |
| @@ -263,15 +352,21 @@ private: | |||
| 263 | } | 352 | } |
| 264 | 353 | ||
| 265 | Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir, | 354 | Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir, |
| 266 | bool random); | 355 | bool unoptimized, bool random); |
| 267 | 356 | ||
| 268 | private: | 357 | private: |
| 269 | Core::System& system; | 358 | template <typename T> |
| 270 | std::array<KLightLock, static_cast<size_t>(Pool::Count)> pool_locks; | 359 | using PoolArray = std::array<T, static_cast<size_t>(Pool::Count)>; |
| 271 | std::array<Impl*, MaxManagerCount> pool_managers_head{}; | 360 | |
| 272 | std::array<Impl*, MaxManagerCount> pool_managers_tail{}; | 361 | Core::System& m_system; |
| 273 | std::array<Impl, MaxManagerCount> managers; | 362 | const KMemoryLayout& m_memory_layout; |
| 274 | size_t num_managers{}; | 363 | PoolArray<KLightLock> m_pool_locks; |
| 364 | std::array<Impl*, MaxManagerCount> m_pool_managers_head{}; | ||
| 365 | std::array<Impl*, MaxManagerCount> m_pool_managers_tail{}; | ||
| 366 | std::array<Impl, MaxManagerCount> m_managers; | ||
| 367 | size_t m_num_managers{}; | ||
| 368 | PoolArray<u64> m_optimized_process_ids{}; | ||
| 369 | PoolArray<bool> m_has_optimized_process{}; | ||
| 275 | }; | 370 | }; |
| 276 | 371 | ||
| 277 | } // namespace Kernel | 372 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h index 7e2fcccdc..e5630c1ac 100644 --- a/src/core/hle/kernel/k_memory_region_type.h +++ b/src/core/hle/kernel/k_memory_region_type.h | |||
| @@ -142,32 +142,38 @@ private: | |||
| 142 | 142 | ||
| 143 | } // namespace impl | 143 | } // namespace impl |
| 144 | 144 | ||
| 145 | constexpr auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue(); | 145 | constexpr inline auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue(); |
| 146 | constexpr auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2); | 146 | |
| 147 | constexpr auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2); | 147 | constexpr inline auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2); |
| 148 | constexpr inline auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2); | ||
| 148 | static_assert(KMemoryRegionType_Kernel.GetValue() == 0x1); | 149 | static_assert(KMemoryRegionType_Kernel.GetValue() == 0x1); |
| 149 | static_assert(KMemoryRegionType_Dram.GetValue() == 0x2); | 150 | static_assert(KMemoryRegionType_Dram.GetValue() == 0x2); |
| 150 | 151 | ||
| 151 | constexpr auto KMemoryRegionType_DramKernelBase = | 152 | // constexpr inline auto KMemoryRegionType_CoreLocalRegion = |
| 153 | // KMemoryRegionType_None.DeriveInitial(2).Finalize(); | ||
| 154 | // static_assert(KMemoryRegionType_CoreLocalRegion.GetValue() == 0x4); | ||
| 155 | |||
| 156 | constexpr inline auto KMemoryRegionType_DramKernelBase = | ||
| 152 | KMemoryRegionType_Dram.DeriveSparse(0, 3, 0) | 157 | KMemoryRegionType_Dram.DeriveSparse(0, 3, 0) |
| 153 | .SetAttribute(KMemoryRegionAttr_NoUserMap) | 158 | .SetAttribute(KMemoryRegionAttr_NoUserMap) |
| 154 | .SetAttribute(KMemoryRegionAttr_CarveoutProtected); | 159 | .SetAttribute(KMemoryRegionAttr_CarveoutProtected); |
| 155 | constexpr auto KMemoryRegionType_DramReservedBase = KMemoryRegionType_Dram.DeriveSparse(0, 3, 1); | 160 | constexpr inline auto KMemoryRegionType_DramReservedBase = |
| 156 | constexpr auto KMemoryRegionType_DramHeapBase = | 161 | KMemoryRegionType_Dram.DeriveSparse(0, 3, 1); |
| 162 | constexpr inline auto KMemoryRegionType_DramHeapBase = | ||
| 157 | KMemoryRegionType_Dram.DeriveSparse(0, 3, 2).SetAttribute(KMemoryRegionAttr_LinearMapped); | 163 | KMemoryRegionType_Dram.DeriveSparse(0, 3, 2).SetAttribute(KMemoryRegionAttr_LinearMapped); |
| 158 | static_assert(KMemoryRegionType_DramKernelBase.GetValue() == | 164 | static_assert(KMemoryRegionType_DramKernelBase.GetValue() == |
| 159 | (0xE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap)); | 165 | (0xE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap)); |
| 160 | static_assert(KMemoryRegionType_DramReservedBase.GetValue() == (0x16)); | 166 | static_assert(KMemoryRegionType_DramReservedBase.GetValue() == (0x16)); |
| 161 | static_assert(KMemoryRegionType_DramHeapBase.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped)); | 167 | static_assert(KMemoryRegionType_DramHeapBase.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped)); |
| 162 | 168 | ||
| 163 | constexpr auto KMemoryRegionType_DramKernelCode = | 169 | constexpr inline auto KMemoryRegionType_DramKernelCode = |
| 164 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 0); | 170 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 0); |
| 165 | constexpr auto KMemoryRegionType_DramKernelSlab = | 171 | constexpr inline auto KMemoryRegionType_DramKernelSlab = |
| 166 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 1); | 172 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 1); |
| 167 | constexpr auto KMemoryRegionType_DramKernelPtHeap = | 173 | constexpr inline auto KMemoryRegionType_DramKernelPtHeap = |
| 168 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 2).SetAttribute( | 174 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 2).SetAttribute( |
| 169 | KMemoryRegionAttr_LinearMapped); | 175 | KMemoryRegionAttr_LinearMapped); |
| 170 | constexpr auto KMemoryRegionType_DramKernelInitPt = | 176 | constexpr inline auto KMemoryRegionType_DramKernelInitPt = |
| 171 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 3).SetAttribute( | 177 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 3).SetAttribute( |
| 172 | KMemoryRegionAttr_LinearMapped); | 178 | KMemoryRegionAttr_LinearMapped); |
| 173 | static_assert(KMemoryRegionType_DramKernelCode.GetValue() == | 179 | static_assert(KMemoryRegionType_DramKernelCode.GetValue() == |
| @@ -181,32 +187,40 @@ static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() == | |||
| 181 | (0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | | 187 | (0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | |
| 182 | KMemoryRegionAttr_LinearMapped)); | 188 | KMemoryRegionAttr_LinearMapped)); |
| 183 | 189 | ||
| 184 | constexpr auto KMemoryRegionType_DramReservedEarly = | 190 | constexpr inline auto KMemoryRegionType_DramKernelSecureAppletMemory = |
| 191 | KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute( | ||
| 192 | KMemoryRegionAttr_LinearMapped); | ||
| 193 | static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() == | ||
| 194 | (0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | | ||
| 195 | KMemoryRegionAttr_LinearMapped)); | ||
| 196 | |||
| 197 | constexpr inline auto KMemoryRegionType_DramReservedEarly = | ||
| 185 | KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); | 198 | KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); |
| 186 | static_assert(KMemoryRegionType_DramReservedEarly.GetValue() == | 199 | static_assert(KMemoryRegionType_DramReservedEarly.GetValue() == |
| 187 | (0x16 | KMemoryRegionAttr_NoUserMap)); | 200 | (0x16 | KMemoryRegionAttr_NoUserMap)); |
| 188 | 201 | ||
| 189 | constexpr auto KMemoryRegionType_KernelTraceBuffer = | 202 | constexpr inline auto KMemoryRegionType_KernelTraceBuffer = |
| 190 | KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 0) | 203 | KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 0) |
| 191 | .SetAttribute(KMemoryRegionAttr_LinearMapped) | 204 | .SetAttribute(KMemoryRegionAttr_LinearMapped) |
| 192 | .SetAttribute(KMemoryRegionAttr_UserReadOnly); | 205 | .SetAttribute(KMemoryRegionAttr_UserReadOnly); |
| 193 | constexpr auto KMemoryRegionType_OnMemoryBootImage = | 206 | constexpr inline auto KMemoryRegionType_OnMemoryBootImage = |
| 194 | KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 1); | 207 | KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 1); |
| 195 | constexpr auto KMemoryRegionType_DTB = KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2); | 208 | constexpr inline auto KMemoryRegionType_DTB = |
| 209 | KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2); | ||
| 196 | static_assert(KMemoryRegionType_KernelTraceBuffer.GetValue() == | 210 | static_assert(KMemoryRegionType_KernelTraceBuffer.GetValue() == |
| 197 | (0xD6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_UserReadOnly)); | 211 | (0xD6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_UserReadOnly)); |
| 198 | static_assert(KMemoryRegionType_OnMemoryBootImage.GetValue() == 0x156); | 212 | static_assert(KMemoryRegionType_OnMemoryBootImage.GetValue() == 0x156); |
| 199 | static_assert(KMemoryRegionType_DTB.GetValue() == 0x256); | 213 | static_assert(KMemoryRegionType_DTB.GetValue() == 0x256); |
| 200 | 214 | ||
| 201 | constexpr auto KMemoryRegionType_DramPoolPartition = | 215 | constexpr inline auto KMemoryRegionType_DramPoolPartition = |
| 202 | KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); | 216 | KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); |
| 203 | static_assert(KMemoryRegionType_DramPoolPartition.GetValue() == | 217 | static_assert(KMemoryRegionType_DramPoolPartition.GetValue() == |
| 204 | (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | 218 | (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); |
| 205 | 219 | ||
| 206 | constexpr auto KMemoryRegionType_DramPoolManagement = | 220 | constexpr inline auto KMemoryRegionType_DramPoolManagement = |
| 207 | KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute( | 221 | KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute( |
| 208 | KMemoryRegionAttr_CarveoutProtected); | 222 | KMemoryRegionAttr_CarveoutProtected); |
| 209 | constexpr auto KMemoryRegionType_DramUserPool = | 223 | constexpr inline auto KMemoryRegionType_DramUserPool = |
| 210 | KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition(); | 224 | KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition(); |
| 211 | static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == | 225 | static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == |
| 212 | (0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | | 226 | (0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | |
| @@ -214,11 +228,13 @@ static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == | |||
| 214 | static_assert(KMemoryRegionType_DramUserPool.GetValue() == | 228 | static_assert(KMemoryRegionType_DramUserPool.GetValue() == |
| 215 | (0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | 229 | (0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); |
| 216 | 230 | ||
| 217 | constexpr auto KMemoryRegionType_DramApplicationPool = KMemoryRegionType_DramUserPool.Derive(4, 0); | 231 | constexpr inline auto KMemoryRegionType_DramApplicationPool = |
| 218 | constexpr auto KMemoryRegionType_DramAppletPool = KMemoryRegionType_DramUserPool.Derive(4, 1); | 232 | KMemoryRegionType_DramUserPool.Derive(4, 0); |
| 219 | constexpr auto KMemoryRegionType_DramSystemNonSecurePool = | 233 | constexpr inline auto KMemoryRegionType_DramAppletPool = |
| 234 | KMemoryRegionType_DramUserPool.Derive(4, 1); | ||
| 235 | constexpr inline auto KMemoryRegionType_DramSystemNonSecurePool = | ||
| 220 | KMemoryRegionType_DramUserPool.Derive(4, 2); | 236 | KMemoryRegionType_DramUserPool.Derive(4, 2); |
| 221 | constexpr auto KMemoryRegionType_DramSystemPool = | 237 | constexpr inline auto KMemoryRegionType_DramSystemPool = |
| 222 | KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected); | 238 | KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected); |
| 223 | static_assert(KMemoryRegionType_DramApplicationPool.GetValue() == | 239 | static_assert(KMemoryRegionType_DramApplicationPool.GetValue() == |
| 224 | (0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | 240 | (0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); |
| @@ -230,50 +246,55 @@ static_assert(KMemoryRegionType_DramSystemPool.GetValue() == | |||
| 230 | (0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | | 246 | (0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | |
| 231 | KMemoryRegionAttr_CarveoutProtected)); | 247 | KMemoryRegionAttr_CarveoutProtected)); |
| 232 | 248 | ||
| 233 | constexpr auto KMemoryRegionType_VirtualDramHeapBase = KMemoryRegionType_Dram.DeriveSparse(1, 3, 0); | 249 | constexpr inline auto KMemoryRegionType_VirtualDramHeapBase = |
| 234 | constexpr auto KMemoryRegionType_VirtualDramKernelPtHeap = | 250 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 0); |
| 251 | constexpr inline auto KMemoryRegionType_VirtualDramKernelPtHeap = | ||
| 235 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 1); | 252 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 1); |
| 236 | constexpr auto KMemoryRegionType_VirtualDramKernelTraceBuffer = | 253 | constexpr inline auto KMemoryRegionType_VirtualDramKernelTraceBuffer = |
| 237 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 2); | 254 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 2); |
| 238 | static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A); | 255 | static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A); |
| 239 | static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A); | 256 | static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A); |
| 240 | static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A); | 257 | static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A); |
| 241 | 258 | ||
| 242 | // UNUSED: .DeriveSparse(2, 2, 0); | 259 | // UNUSED: .DeriveSparse(2, 2, 0); |
| 243 | constexpr auto KMemoryRegionType_VirtualDramUnknownDebug = | 260 | constexpr inline auto KMemoryRegionType_VirtualDramUnknownDebug = |
| 244 | KMemoryRegionType_Dram.DeriveSparse(2, 2, 1); | 261 | KMemoryRegionType_Dram.DeriveSparse(2, 2, 1); |
| 245 | static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52)); | 262 | static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52)); |
| 246 | 263 | ||
| 247 | constexpr auto KMemoryRegionType_VirtualDramKernelInitPt = | 264 | constexpr inline auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory = |
| 265 | KMemoryRegionType_Dram.DeriveSparse(3, 1, 0); | ||
| 266 | static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62)); | ||
| 267 | |||
| 268 | constexpr inline auto KMemoryRegionType_VirtualDramKernelInitPt = | ||
| 248 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0); | 269 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0); |
| 249 | constexpr auto KMemoryRegionType_VirtualDramPoolManagement = | 270 | constexpr inline auto KMemoryRegionType_VirtualDramPoolManagement = |
| 250 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1); | 271 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1); |
| 251 | constexpr auto KMemoryRegionType_VirtualDramUserPool = | 272 | constexpr inline auto KMemoryRegionType_VirtualDramUserPool = |
| 252 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2); | 273 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2); |
| 253 | static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A); | 274 | static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A); |
| 254 | static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A); | 275 | static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A); |
| 255 | static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A); | 276 | static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A); |
| 256 | 277 | ||
| 257 | // NOTE: For unknown reason, the pools are derived out-of-order here. It's worth eventually trying | 278 | // NOTE: For unknown reason, the pools are derived out-of-order here. |
| 258 | // to understand why Nintendo made this choice. | 279 | // It's worth eventually trying to understand why Nintendo made this choice. |
| 259 | // UNUSED: .Derive(6, 0); | 280 | // UNUSED: .Derive(6, 0); |
| 260 | // UNUSED: .Derive(6, 1); | 281 | // UNUSED: .Derive(6, 1); |
| 261 | constexpr auto KMemoryRegionType_VirtualDramAppletPool = | 282 | constexpr inline auto KMemoryRegionType_VirtualDramAppletPool = |
| 262 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 2); | 283 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 2); |
| 263 | constexpr auto KMemoryRegionType_VirtualDramApplicationPool = | 284 | constexpr inline auto KMemoryRegionType_VirtualDramApplicationPool = |
| 264 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 3); | 285 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 3); |
| 265 | constexpr auto KMemoryRegionType_VirtualDramSystemNonSecurePool = | 286 | constexpr inline auto KMemoryRegionType_VirtualDramSystemNonSecurePool = |
| 266 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 4); | 287 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 4); |
| 267 | constexpr auto KMemoryRegionType_VirtualDramSystemPool = | 288 | constexpr inline auto KMemoryRegionType_VirtualDramSystemPool = |
| 268 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 5); | 289 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 5); |
| 269 | static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A); | 290 | static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A); |
| 270 | static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A); | 291 | static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A); |
| 271 | static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A); | 292 | static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A); |
| 272 | static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A); | 293 | static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A); |
| 273 | 294 | ||
| 274 | constexpr auto KMemoryRegionType_ArchDeviceBase = | 295 | constexpr inline auto KMemoryRegionType_ArchDeviceBase = |
| 275 | KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly(); | 296 | KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly(); |
| 276 | constexpr auto KMemoryRegionType_BoardDeviceBase = | 297 | constexpr inline auto KMemoryRegionType_BoardDeviceBase = |
| 277 | KMemoryRegionType_Kernel.DeriveTransition(0, 2).SetDenseOnly(); | 298 | KMemoryRegionType_Kernel.DeriveTransition(0, 2).SetDenseOnly(); |
| 278 | static_assert(KMemoryRegionType_ArchDeviceBase.GetValue() == 0x5); | 299 | static_assert(KMemoryRegionType_ArchDeviceBase.GetValue() == 0x5); |
| 279 | static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5); | 300 | static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5); |
| @@ -284,7 +305,7 @@ static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5); | |||
| 284 | #error "Unimplemented" | 305 | #error "Unimplemented" |
| 285 | #else | 306 | #else |
| 286 | // Default to no architecture devices. | 307 | // Default to no architecture devices. |
| 287 | constexpr auto NumArchitectureDeviceRegions = 0; | 308 | constexpr inline auto NumArchitectureDeviceRegions = 0; |
| 288 | #endif | 309 | #endif |
| 289 | static_assert(NumArchitectureDeviceRegions >= 0); | 310 | static_assert(NumArchitectureDeviceRegions >= 0); |
| 290 | 311 | ||
| @@ -292,34 +313,35 @@ static_assert(NumArchitectureDeviceRegions >= 0); | |||
| 292 | #include "core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc" | 313 | #include "core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc" |
| 293 | #else | 314 | #else |
| 294 | // Default to no board devices. | 315 | // Default to no board devices. |
| 295 | constexpr auto NumBoardDeviceRegions = 0; | 316 | constexpr inline auto NumBoardDeviceRegions = 0; |
| 296 | #endif | 317 | #endif |
| 297 | static_assert(NumBoardDeviceRegions >= 0); | 318 | static_assert(NumBoardDeviceRegions >= 0); |
| 298 | 319 | ||
| 299 | constexpr auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0); | 320 | constexpr inline auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0); |
| 300 | constexpr auto KMemoryRegionType_KernelStack = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1); | 321 | constexpr inline auto KMemoryRegionType_KernelStack = |
| 301 | constexpr auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2); | 322 | KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1); |
| 302 | constexpr auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3); | 323 | constexpr inline auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2); |
| 324 | constexpr inline auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3); | ||
| 303 | static_assert(KMemoryRegionType_KernelCode.GetValue() == 0x19); | 325 | static_assert(KMemoryRegionType_KernelCode.GetValue() == 0x19); |
| 304 | static_assert(KMemoryRegionType_KernelStack.GetValue() == 0x29); | 326 | static_assert(KMemoryRegionType_KernelStack.GetValue() == 0x29); |
| 305 | static_assert(KMemoryRegionType_KernelMisc.GetValue() == 0x49); | 327 | static_assert(KMemoryRegionType_KernelMisc.GetValue() == 0x49); |
| 306 | static_assert(KMemoryRegionType_KernelSlab.GetValue() == 0x89); | 328 | static_assert(KMemoryRegionType_KernelSlab.GetValue() == 0x89); |
| 307 | 329 | ||
| 308 | constexpr auto KMemoryRegionType_KernelMiscDerivedBase = | 330 | constexpr inline auto KMemoryRegionType_KernelMiscDerivedBase = |
| 309 | KMemoryRegionType_KernelMisc.DeriveTransition(); | 331 | KMemoryRegionType_KernelMisc.DeriveTransition(); |
| 310 | static_assert(KMemoryRegionType_KernelMiscDerivedBase.GetValue() == 0x149); | 332 | static_assert(KMemoryRegionType_KernelMiscDerivedBase.GetValue() == 0x149); |
| 311 | 333 | ||
| 312 | // UNUSED: .Derive(7, 0); | 334 | // UNUSED: .Derive(7, 0); |
| 313 | constexpr auto KMemoryRegionType_KernelMiscMainStack = | 335 | constexpr inline auto KMemoryRegionType_KernelMiscMainStack = |
| 314 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 1); | 336 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 1); |
| 315 | constexpr auto KMemoryRegionType_KernelMiscMappedDevice = | 337 | constexpr inline auto KMemoryRegionType_KernelMiscMappedDevice = |
| 316 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 2); | 338 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 2); |
| 317 | constexpr auto KMemoryRegionType_KernelMiscExceptionStack = | 339 | constexpr inline auto KMemoryRegionType_KernelMiscExceptionStack = |
| 318 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 3); | 340 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 3); |
| 319 | constexpr auto KMemoryRegionType_KernelMiscUnknownDebug = | 341 | constexpr inline auto KMemoryRegionType_KernelMiscUnknownDebug = |
| 320 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 4); | 342 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 4); |
| 321 | // UNUSED: .Derive(7, 5); | 343 | // UNUSED: .Derive(7, 5); |
| 322 | constexpr auto KMemoryRegionType_KernelMiscIdleStack = | 344 | constexpr inline auto KMemoryRegionType_KernelMiscIdleStack = |
| 323 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 6); | 345 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 6); |
| 324 | static_assert(KMemoryRegionType_KernelMiscMainStack.GetValue() == 0xB49); | 346 | static_assert(KMemoryRegionType_KernelMiscMainStack.GetValue() == 0xB49); |
| 325 | static_assert(KMemoryRegionType_KernelMiscMappedDevice.GetValue() == 0xD49); | 347 | static_assert(KMemoryRegionType_KernelMiscMappedDevice.GetValue() == 0xD49); |
| @@ -327,7 +349,8 @@ static_assert(KMemoryRegionType_KernelMiscExceptionStack.GetValue() == 0x1349); | |||
| 327 | static_assert(KMemoryRegionType_KernelMiscUnknownDebug.GetValue() == 0x1549); | 349 | static_assert(KMemoryRegionType_KernelMiscUnknownDebug.GetValue() == 0x1549); |
| 328 | static_assert(KMemoryRegionType_KernelMiscIdleStack.GetValue() == 0x2349); | 350 | static_assert(KMemoryRegionType_KernelMiscIdleStack.GetValue() == 0x2349); |
| 329 | 351 | ||
| 330 | constexpr auto KMemoryRegionType_KernelTemp = KMemoryRegionType_Kernel.Advance(2).Derive(2, 0); | 352 | constexpr inline auto KMemoryRegionType_KernelTemp = |
| 353 | KMemoryRegionType_Kernel.Advance(2).Derive(2, 0); | ||
| 331 | static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31); | 354 | static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31); |
| 332 | 355 | ||
| 333 | constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { | 356 | constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { |
| @@ -335,6 +358,8 @@ constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { | |||
| 335 | return KMemoryRegionType_VirtualDramKernelTraceBuffer; | 358 | return KMemoryRegionType_VirtualDramKernelTraceBuffer; |
| 336 | } else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { | 359 | } else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { |
| 337 | return KMemoryRegionType_VirtualDramKernelPtHeap; | 360 | return KMemoryRegionType_VirtualDramKernelPtHeap; |
| 361 | } else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) { | ||
| 362 | return KMemoryRegionType_VirtualDramKernelSecureAppletMemory; | ||
| 338 | } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) { | 363 | } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) { |
| 339 | return KMemoryRegionType_VirtualDramUnknownDebug; | 364 | return KMemoryRegionType_VirtualDramUnknownDebug; |
| 340 | } else { | 365 | } else { |
diff --git a/src/core/hle/kernel/k_page_bitmap.h b/src/core/hle/kernel/k_page_bitmap.h index c97b3dc0b..0ff987732 100644 --- a/src/core/hle/kernel/k_page_bitmap.h +++ b/src/core/hle/kernel/k_page_bitmap.h | |||
| @@ -16,107 +16,126 @@ | |||
| 16 | namespace Kernel { | 16 | namespace Kernel { |
| 17 | 17 | ||
| 18 | class KPageBitmap { | 18 | class KPageBitmap { |
| 19 | private: | 19 | public: |
| 20 | class RandomBitGenerator { | 20 | class RandomBitGenerator { |
| 21 | private: | 21 | public: |
| 22 | Common::TinyMT rng{}; | 22 | RandomBitGenerator() { |
| 23 | u32 entropy{}; | 23 | m_rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64())); |
| 24 | u32 bits_available{}; | 24 | } |
| 25 | |||
| 26 | u64 SelectRandomBit(u64 bitmap) { | ||
| 27 | u64 selected = 0; | ||
| 28 | |||
| 29 | for (size_t cur_num_bits = Common::BitSize<decltype(bitmap)>() / 2; cur_num_bits != 0; | ||
| 30 | cur_num_bits /= 2) { | ||
| 31 | const u64 high = (bitmap >> cur_num_bits); | ||
| 32 | const u64 low = (bitmap & (~(UINT64_C(0xFFFFFFFFFFFFFFFF) << cur_num_bits))); | ||
| 33 | |||
| 34 | // Choose high if we have high and (don't have low or select high randomly). | ||
| 35 | if (high && (low == 0 || this->GenerateRandomBit())) { | ||
| 36 | bitmap = high; | ||
| 37 | selected += cur_num_bits; | ||
| 38 | } else { | ||
| 39 | bitmap = low; | ||
| 40 | selected += 0; | ||
| 41 | } | ||
| 42 | } | ||
| 43 | |||
| 44 | return selected; | ||
| 45 | } | ||
| 46 | |||
| 47 | u64 GenerateRandom(u64 max) { | ||
| 48 | // Determine the number of bits we need. | ||
| 49 | const u64 bits_needed = 1 + (Common::BitSize<decltype(max)>() - std::countl_zero(max)); | ||
| 50 | |||
| 51 | // Generate a random value of the desired bitwidth. | ||
| 52 | const u64 rnd = this->GenerateRandomBits(static_cast<u32>(bits_needed)); | ||
| 53 | |||
| 54 | // Adjust the value to be in range. | ||
| 55 | return rnd - ((rnd / max) * max); | ||
| 56 | } | ||
| 25 | 57 | ||
| 26 | private: | 58 | private: |
| 27 | void RefreshEntropy() { | 59 | void RefreshEntropy() { |
| 28 | entropy = rng.GenerateRandomU32(); | 60 | m_entropy = m_rng.GenerateRandomU32(); |
| 29 | bits_available = static_cast<u32>(Common::BitSize<decltype(entropy)>()); | 61 | m_bits_available = static_cast<u32>(Common::BitSize<decltype(m_entropy)>()); |
| 30 | } | 62 | } |
| 31 | 63 | ||
| 32 | bool GenerateRandomBit() { | 64 | bool GenerateRandomBit() { |
| 33 | if (bits_available == 0) { | 65 | if (m_bits_available == 0) { |
| 34 | this->RefreshEntropy(); | 66 | this->RefreshEntropy(); |
| 35 | } | 67 | } |
| 36 | 68 | ||
| 37 | const bool rnd_bit = (entropy & 1) != 0; | 69 | const bool rnd_bit = (m_entropy & 1) != 0; |
| 38 | entropy >>= 1; | 70 | m_entropy >>= 1; |
| 39 | --bits_available; | 71 | --m_bits_available; |
| 40 | return rnd_bit; | 72 | return rnd_bit; |
| 41 | } | 73 | } |
| 42 | 74 | ||
| 43 | public: | 75 | u64 GenerateRandomBits(u32 num_bits) { |
| 44 | RandomBitGenerator() { | 76 | u64 result = 0; |
| 45 | rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64())); | ||
| 46 | } | ||
| 47 | 77 | ||
| 48 | std::size_t SelectRandomBit(u64 bitmap) { | 78 | // Iteratively add random bits to our result. |
| 49 | u64 selected = 0; | 79 | while (num_bits > 0) { |
| 80 | // Ensure we have random bits to take from. | ||
| 81 | if (m_bits_available == 0) { | ||
| 82 | this->RefreshEntropy(); | ||
| 83 | } | ||
| 50 | 84 | ||
| 51 | u64 cur_num_bits = Common::BitSize<decltype(bitmap)>() / 2; | 85 | // Determine how many bits to take this round. |
| 52 | u64 cur_mask = (1ULL << cur_num_bits) - 1; | 86 | const auto cur_bits = std::min(num_bits, m_bits_available); |
| 53 | 87 | ||
| 54 | while (cur_num_bits) { | 88 | // Generate mask for our current bits. |
| 55 | const u64 low = (bitmap >> 0) & cur_mask; | 89 | const u64 mask = (static_cast<u64>(1) << cur_bits) - 1; |
| 56 | const u64 high = (bitmap >> cur_num_bits) & cur_mask; | ||
| 57 | 90 | ||
| 58 | bool choose_low; | 91 | // Add bits to output from our entropy. |
| 59 | if (high == 0) { | 92 | result <<= cur_bits; |
| 60 | // If only low val is set, choose low. | 93 | result |= (m_entropy & mask); |
| 61 | choose_low = true; | ||
| 62 | } else if (low == 0) { | ||
| 63 | // If only high val is set, choose high. | ||
| 64 | choose_low = false; | ||
| 65 | } else { | ||
| 66 | // If both are set, choose random. | ||
| 67 | choose_low = this->GenerateRandomBit(); | ||
| 68 | } | ||
| 69 | 94 | ||
| 70 | // If we chose low, proceed with low. | 95 | // Remove bits from our entropy. |
| 71 | if (choose_low) { | 96 | m_entropy >>= cur_bits; |
| 72 | bitmap = low; | 97 | m_bits_available -= cur_bits; |
| 73 | selected += 0; | ||
| 74 | } else { | ||
| 75 | bitmap = high; | ||
| 76 | selected += cur_num_bits; | ||
| 77 | } | ||
| 78 | 98 | ||
| 79 | // Proceed. | 99 | // Advance. |
| 80 | cur_num_bits /= 2; | 100 | num_bits -= cur_bits; |
| 81 | cur_mask >>= cur_num_bits; | ||
| 82 | } | 101 | } |
| 83 | 102 | ||
| 84 | return selected; | 103 | return result; |
| 85 | } | 104 | } |
| 105 | |||
| 106 | private: | ||
| 107 | Common::TinyMT m_rng; | ||
| 108 | u32 m_entropy{}; | ||
| 109 | u32 m_bits_available{}; | ||
| 86 | }; | 110 | }; |
| 87 | 111 | ||
| 88 | public: | 112 | public: |
| 89 | static constexpr std::size_t MaxDepth = 4; | 113 | static constexpr size_t MaxDepth = 4; |
| 90 | |||
| 91 | private: | ||
| 92 | std::array<u64*, MaxDepth> bit_storages{}; | ||
| 93 | RandomBitGenerator rng{}; | ||
| 94 | std::size_t num_bits{}; | ||
| 95 | std::size_t used_depths{}; | ||
| 96 | 114 | ||
| 97 | public: | 115 | public: |
| 98 | KPageBitmap() = default; | 116 | KPageBitmap() = default; |
| 99 | 117 | ||
| 100 | constexpr std::size_t GetNumBits() const { | 118 | constexpr size_t GetNumBits() const { |
| 101 | return num_bits; | 119 | return m_num_bits; |
| 102 | } | 120 | } |
| 103 | constexpr s32 GetHighestDepthIndex() const { | 121 | constexpr s32 GetHighestDepthIndex() const { |
| 104 | return static_cast<s32>(used_depths) - 1; | 122 | return static_cast<s32>(m_used_depths) - 1; |
| 105 | } | 123 | } |
| 106 | 124 | ||
| 107 | u64* Initialize(u64* storage, std::size_t size) { | 125 | u64* Initialize(u64* storage, size_t size) { |
| 108 | // Initially, everything is un-set. | 126 | // Initially, everything is un-set. |
| 109 | num_bits = 0; | 127 | m_num_bits = 0; |
| 110 | 128 | ||
| 111 | // Calculate the needed bitmap depth. | 129 | // Calculate the needed bitmap depth. |
| 112 | used_depths = static_cast<std::size_t>(GetRequiredDepth(size)); | 130 | m_used_depths = static_cast<size_t>(GetRequiredDepth(size)); |
| 113 | ASSERT(used_depths <= MaxDepth); | 131 | ASSERT(m_used_depths <= MaxDepth); |
| 114 | 132 | ||
| 115 | // Set the bitmap pointers. | 133 | // Set the bitmap pointers. |
| 116 | for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) { | 134 | for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) { |
| 117 | bit_storages[depth] = storage; | 135 | m_bit_storages[depth] = storage; |
| 118 | size = Common::AlignUp(size, Common::BitSize<u64>()) / Common::BitSize<u64>(); | 136 | size = Common::AlignUp(size, Common::BitSize<u64>()) / Common::BitSize<u64>(); |
| 119 | storage += size; | 137 | storage += size; |
| 138 | m_end_storages[depth] = storage; | ||
| 120 | } | 139 | } |
| 121 | 140 | ||
| 122 | return storage; | 141 | return storage; |
| @@ -128,19 +147,19 @@ public: | |||
| 128 | 147 | ||
| 129 | if (random) { | 148 | if (random) { |
| 130 | do { | 149 | do { |
| 131 | const u64 v = bit_storages[depth][offset]; | 150 | const u64 v = m_bit_storages[depth][offset]; |
| 132 | if (v == 0) { | 151 | if (v == 0) { |
| 133 | // If depth is bigger than zero, then a previous level indicated a block was | 152 | // If depth is bigger than zero, then a previous level indicated a block was |
| 134 | // free. | 153 | // free. |
| 135 | ASSERT(depth == 0); | 154 | ASSERT(depth == 0); |
| 136 | return -1; | 155 | return -1; |
| 137 | } | 156 | } |
| 138 | offset = offset * Common::BitSize<u64>() + rng.SelectRandomBit(v); | 157 | offset = offset * Common::BitSize<u64>() + m_rng.SelectRandomBit(v); |
| 139 | ++depth; | 158 | ++depth; |
| 140 | } while (depth < static_cast<s32>(used_depths)); | 159 | } while (depth < static_cast<s32>(m_used_depths)); |
| 141 | } else { | 160 | } else { |
| 142 | do { | 161 | do { |
| 143 | const u64 v = bit_storages[depth][offset]; | 162 | const u64 v = m_bit_storages[depth][offset]; |
| 144 | if (v == 0) { | 163 | if (v == 0) { |
| 145 | // If depth is bigger than zero, then a previous level indicated a block was | 164 | // If depth is bigger than zero, then a previous level indicated a block was |
| 146 | // free. | 165 | // free. |
| @@ -149,28 +168,69 @@ public: | |||
| 149 | } | 168 | } |
| 150 | offset = offset * Common::BitSize<u64>() + std::countr_zero(v); | 169 | offset = offset * Common::BitSize<u64>() + std::countr_zero(v); |
| 151 | ++depth; | 170 | ++depth; |
| 152 | } while (depth < static_cast<s32>(used_depths)); | 171 | } while (depth < static_cast<s32>(m_used_depths)); |
| 153 | } | 172 | } |
| 154 | 173 | ||
| 155 | return static_cast<s64>(offset); | 174 | return static_cast<s64>(offset); |
| 156 | } | 175 | } |
| 157 | 176 | ||
| 158 | void SetBit(std::size_t offset) { | 177 | s64 FindFreeRange(size_t count) { |
| 178 | // Check that it is possible to find a range. | ||
| 179 | const u64* const storage_start = m_bit_storages[m_used_depths - 1]; | ||
| 180 | const u64* const storage_end = m_end_storages[m_used_depths - 1]; | ||
| 181 | |||
| 182 | // If we don't have a storage to iterate (or want more blocks than fit in a single storage), | ||
| 183 | // we can't find a free range. | ||
| 184 | if (!(storage_start < storage_end && count <= Common::BitSize<u64>())) { | ||
| 185 | return -1; | ||
| 186 | } | ||
| 187 | |||
| 188 | // Walk the storages to select a random free range. | ||
| 189 | const size_t options_per_storage = std::max<size_t>(Common::BitSize<u64>() / count, 1); | ||
| 190 | const size_t num_entries = std::max<size_t>(storage_end - storage_start, 1); | ||
| 191 | |||
| 192 | const u64 free_mask = (static_cast<u64>(1) << count) - 1; | ||
| 193 | |||
| 194 | size_t num_valid_options = 0; | ||
| 195 | s64 chosen_offset = -1; | ||
| 196 | for (size_t storage_index = 0; storage_index < num_entries; ++storage_index) { | ||
| 197 | u64 storage = storage_start[storage_index]; | ||
| 198 | for (size_t option = 0; option < options_per_storage; ++option) { | ||
| 199 | if ((storage & free_mask) == free_mask) { | ||
| 200 | // We've found a new valid option. | ||
| 201 | ++num_valid_options; | ||
| 202 | |||
| 203 | // Select the Kth valid option with probability 1/K. This leads to an overall | ||
| 204 | // uniform distribution. | ||
| 205 | if (num_valid_options == 1 || m_rng.GenerateRandom(num_valid_options) == 0) { | ||
| 206 | // This is our first option, so select it. | ||
| 207 | chosen_offset = storage_index * Common::BitSize<u64>() + option * count; | ||
| 208 | } | ||
| 209 | } | ||
| 210 | storage >>= count; | ||
| 211 | } | ||
| 212 | } | ||
| 213 | |||
| 214 | // Return the random offset we chose.*/ | ||
| 215 | return chosen_offset; | ||
| 216 | } | ||
| 217 | |||
| 218 | void SetBit(size_t offset) { | ||
| 159 | this->SetBit(this->GetHighestDepthIndex(), offset); | 219 | this->SetBit(this->GetHighestDepthIndex(), offset); |
| 160 | num_bits++; | 220 | m_num_bits++; |
| 161 | } | 221 | } |
| 162 | 222 | ||
| 163 | void ClearBit(std::size_t offset) { | 223 | void ClearBit(size_t offset) { |
| 164 | this->ClearBit(this->GetHighestDepthIndex(), offset); | 224 | this->ClearBit(this->GetHighestDepthIndex(), offset); |
| 165 | num_bits--; | 225 | m_num_bits--; |
| 166 | } | 226 | } |
| 167 | 227 | ||
| 168 | bool ClearRange(std::size_t offset, std::size_t count) { | 228 | bool ClearRange(size_t offset, size_t count) { |
| 169 | s32 depth = this->GetHighestDepthIndex(); | 229 | s32 depth = this->GetHighestDepthIndex(); |
| 170 | u64* bits = bit_storages[depth]; | 230 | u64* bits = m_bit_storages[depth]; |
| 171 | std::size_t bit_ind = offset / Common::BitSize<u64>(); | 231 | size_t bit_ind = offset / Common::BitSize<u64>(); |
| 172 | if (count < Common::BitSize<u64>()) { | 232 | if (count < Common::BitSize<u64>()) [[likely]] { |
| 173 | const std::size_t shift = offset % Common::BitSize<u64>(); | 233 | const size_t shift = offset % Common::BitSize<u64>(); |
| 174 | ASSERT(shift + count <= Common::BitSize<u64>()); | 234 | ASSERT(shift + count <= Common::BitSize<u64>()); |
| 175 | // Check that all the bits are set. | 235 | // Check that all the bits are set. |
| 176 | const u64 mask = ((u64(1) << count) - 1) << shift; | 236 | const u64 mask = ((u64(1) << count) - 1) << shift; |
| @@ -189,8 +249,8 @@ public: | |||
| 189 | ASSERT(offset % Common::BitSize<u64>() == 0); | 249 | ASSERT(offset % Common::BitSize<u64>() == 0); |
| 190 | ASSERT(count % Common::BitSize<u64>() == 0); | 250 | ASSERT(count % Common::BitSize<u64>() == 0); |
| 191 | // Check that all the bits are set. | 251 | // Check that all the bits are set. |
| 192 | std::size_t remaining = count; | 252 | size_t remaining = count; |
| 193 | std::size_t i = 0; | 253 | size_t i = 0; |
| 194 | do { | 254 | do { |
| 195 | if (bits[bit_ind + i++] != ~u64(0)) { | 255 | if (bits[bit_ind + i++] != ~u64(0)) { |
| 196 | return false; | 256 | return false; |
| @@ -209,18 +269,18 @@ public: | |||
| 209 | } while (remaining > 0); | 269 | } while (remaining > 0); |
| 210 | } | 270 | } |
| 211 | 271 | ||
| 212 | num_bits -= count; | 272 | m_num_bits -= count; |
| 213 | return true; | 273 | return true; |
| 214 | } | 274 | } |
| 215 | 275 | ||
| 216 | private: | 276 | private: |
| 217 | void SetBit(s32 depth, std::size_t offset) { | 277 | void SetBit(s32 depth, size_t offset) { |
| 218 | while (depth >= 0) { | 278 | while (depth >= 0) { |
| 219 | std::size_t ind = offset / Common::BitSize<u64>(); | 279 | size_t ind = offset / Common::BitSize<u64>(); |
| 220 | std::size_t which = offset % Common::BitSize<u64>(); | 280 | size_t which = offset % Common::BitSize<u64>(); |
| 221 | const u64 mask = u64(1) << which; | 281 | const u64 mask = u64(1) << which; |
| 222 | 282 | ||
| 223 | u64* bit = std::addressof(bit_storages[depth][ind]); | 283 | u64* bit = std::addressof(m_bit_storages[depth][ind]); |
| 224 | u64 v = *bit; | 284 | u64 v = *bit; |
| 225 | ASSERT((v & mask) == 0); | 285 | ASSERT((v & mask) == 0); |
| 226 | *bit = v | mask; | 286 | *bit = v | mask; |
| @@ -232,13 +292,13 @@ private: | |||
| 232 | } | 292 | } |
| 233 | } | 293 | } |
| 234 | 294 | ||
| 235 | void ClearBit(s32 depth, std::size_t offset) { | 295 | void ClearBit(s32 depth, size_t offset) { |
| 236 | while (depth >= 0) { | 296 | while (depth >= 0) { |
| 237 | std::size_t ind = offset / Common::BitSize<u64>(); | 297 | size_t ind = offset / Common::BitSize<u64>(); |
| 238 | std::size_t which = offset % Common::BitSize<u64>(); | 298 | size_t which = offset % Common::BitSize<u64>(); |
| 239 | const u64 mask = u64(1) << which; | 299 | const u64 mask = u64(1) << which; |
| 240 | 300 | ||
| 241 | u64* bit = std::addressof(bit_storages[depth][ind]); | 301 | u64* bit = std::addressof(m_bit_storages[depth][ind]); |
| 242 | u64 v = *bit; | 302 | u64 v = *bit; |
| 243 | ASSERT((v & mask) != 0); | 303 | ASSERT((v & mask) != 0); |
| 244 | v &= ~mask; | 304 | v &= ~mask; |
| @@ -252,7 +312,7 @@ private: | |||
| 252 | } | 312 | } |
| 253 | 313 | ||
| 254 | private: | 314 | private: |
| 255 | static constexpr s32 GetRequiredDepth(std::size_t region_size) { | 315 | static constexpr s32 GetRequiredDepth(size_t region_size) { |
| 256 | s32 depth = 0; | 316 | s32 depth = 0; |
| 257 | while (true) { | 317 | while (true) { |
| 258 | region_size /= Common::BitSize<u64>(); | 318 | region_size /= Common::BitSize<u64>(); |
| @@ -264,8 +324,8 @@ private: | |||
| 264 | } | 324 | } |
| 265 | 325 | ||
| 266 | public: | 326 | public: |
| 267 | static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size) { | 327 | static constexpr size_t CalculateManagementOverheadSize(size_t region_size) { |
| 268 | std::size_t overhead_bits = 0; | 328 | size_t overhead_bits = 0; |
| 269 | for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) { | 329 | for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) { |
| 270 | region_size = | 330 | region_size = |
| 271 | Common::AlignUp(region_size, Common::BitSize<u64>()) / Common::BitSize<u64>(); | 331 | Common::AlignUp(region_size, Common::BitSize<u64>()) / Common::BitSize<u64>(); |
| @@ -273,6 +333,13 @@ public: | |||
| 273 | } | 333 | } |
| 274 | return overhead_bits * sizeof(u64); | 334 | return overhead_bits * sizeof(u64); |
| 275 | } | 335 | } |
| 336 | |||
| 337 | private: | ||
| 338 | std::array<u64*, MaxDepth> m_bit_storages{}; | ||
| 339 | std::array<u64*, MaxDepth> m_end_storages{}; | ||
| 340 | RandomBitGenerator m_rng; | ||
| 341 | size_t m_num_bits{}; | ||
| 342 | size_t m_used_depths{}; | ||
| 276 | }; | 343 | }; |
| 277 | 344 | ||
| 278 | } // namespace Kernel | 345 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_page_buffer.h b/src/core/hle/kernel/k_page_buffer.h index aef06e213..cfedaae61 100644 --- a/src/core/hle/kernel/k_page_buffer.h +++ b/src/core/hle/kernel/k_page_buffer.h | |||
| @@ -11,6 +11,16 @@ | |||
| 11 | 11 | ||
| 12 | namespace Kernel { | 12 | namespace Kernel { |
| 13 | 13 | ||
| 14 | class KernelCore; | ||
| 15 | |||
| 16 | class KPageBufferSlabHeap : protected impl::KSlabHeapImpl { | ||
| 17 | public: | ||
| 18 | static constexpr size_t BufferSize = PageSize; | ||
| 19 | |||
| 20 | public: | ||
| 21 | void Initialize(Core::System& system); | ||
| 22 | }; | ||
| 23 | |||
| 14 | class KPageBuffer final : public KSlabAllocated<KPageBuffer> { | 24 | class KPageBuffer final : public KSlabAllocated<KPageBuffer> { |
| 15 | public: | 25 | public: |
| 16 | explicit KPageBuffer(KernelCore&) {} | 26 | explicit KPageBuffer(KernelCore&) {} |
| @@ -21,8 +31,6 @@ public: | |||
| 21 | private: | 31 | private: |
| 22 | [[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{}; | 32 | [[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{}; |
| 23 | }; | 33 | }; |
| 24 | 34 | static_assert(sizeof(KPageBuffer) == KPageBufferSlabHeap::BufferSize); | |
| 25 | static_assert(sizeof(KPageBuffer) == PageSize); | ||
| 26 | static_assert(alignof(KPageBuffer) == PageSize); | ||
| 27 | 35 | ||
| 28 | } // namespace Kernel | 36 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h index 968753992..316f172f2 100644 --- a/src/core/hle/kernel/k_page_group.h +++ b/src/core/hle/kernel/k_page_group.h | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | 5 | ||
| 6 | #include <list> | 6 | #include <list> |
| 7 | 7 | ||
| 8 | #include "common/alignment.h" | ||
| 8 | #include "common/assert.h" | 9 | #include "common/assert.h" |
| 9 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 10 | #include "core/hle/kernel/memory_types.h" | 11 | #include "core/hle/kernel/memory_types.h" |
| @@ -12,6 +13,89 @@ | |||
| 12 | 13 | ||
| 13 | namespace Kernel { | 14 | namespace Kernel { |
| 14 | 15 | ||
| 16 | class KPageGroup; | ||
| 17 | |||
| 18 | class KBlockInfo { | ||
| 19 | private: | ||
| 20 | friend class KPageGroup; | ||
| 21 | |||
| 22 | public: | ||
| 23 | constexpr KBlockInfo() = default; | ||
| 24 | |||
| 25 | constexpr void Initialize(PAddr addr, size_t np) { | ||
| 26 | ASSERT(Common::IsAligned(addr, PageSize)); | ||
| 27 | ASSERT(static_cast<u32>(np) == np); | ||
| 28 | |||
| 29 | m_page_index = static_cast<u32>(addr) / PageSize; | ||
| 30 | m_num_pages = static_cast<u32>(np); | ||
| 31 | } | ||
| 32 | |||
| 33 | constexpr PAddr GetAddress() const { | ||
| 34 | return m_page_index * PageSize; | ||
| 35 | } | ||
| 36 | constexpr size_t GetNumPages() const { | ||
| 37 | return m_num_pages; | ||
| 38 | } | ||
| 39 | constexpr size_t GetSize() const { | ||
| 40 | return this->GetNumPages() * PageSize; | ||
| 41 | } | ||
| 42 | constexpr PAddr GetEndAddress() const { | ||
| 43 | return (m_page_index + m_num_pages) * PageSize; | ||
| 44 | } | ||
| 45 | constexpr PAddr GetLastAddress() const { | ||
| 46 | return this->GetEndAddress() - 1; | ||
| 47 | } | ||
| 48 | |||
| 49 | constexpr KBlockInfo* GetNext() const { | ||
| 50 | return m_next; | ||
| 51 | } | ||
| 52 | |||
| 53 | constexpr bool IsEquivalentTo(const KBlockInfo& rhs) const { | ||
| 54 | return m_page_index == rhs.m_page_index && m_num_pages == rhs.m_num_pages; | ||
| 55 | } | ||
| 56 | |||
| 57 | constexpr bool operator==(const KBlockInfo& rhs) const { | ||
| 58 | return this->IsEquivalentTo(rhs); | ||
| 59 | } | ||
| 60 | |||
| 61 | constexpr bool operator!=(const KBlockInfo& rhs) const { | ||
| 62 | return !(*this == rhs); | ||
| 63 | } | ||
| 64 | |||
| 65 | constexpr bool IsStrictlyBefore(PAddr addr) const { | ||
| 66 | const PAddr end = this->GetEndAddress(); | ||
| 67 | |||
| 68 | if (m_page_index != 0 && end == 0) { | ||
| 69 | return false; | ||
| 70 | } | ||
| 71 | |||
| 72 | return end < addr; | ||
| 73 | } | ||
| 74 | |||
| 75 | constexpr bool operator<(PAddr addr) const { | ||
| 76 | return this->IsStrictlyBefore(addr); | ||
| 77 | } | ||
| 78 | |||
| 79 | constexpr bool TryConcatenate(PAddr addr, size_t np) { | ||
| 80 | if (addr != 0 && addr == this->GetEndAddress()) { | ||
| 81 | m_num_pages += static_cast<u32>(np); | ||
| 82 | return true; | ||
| 83 | } | ||
| 84 | return false; | ||
| 85 | } | ||
| 86 | |||
| 87 | private: | ||
| 88 | constexpr void SetNext(KBlockInfo* next) { | ||
| 89 | m_next = next; | ||
| 90 | } | ||
| 91 | |||
| 92 | private: | ||
| 93 | KBlockInfo* m_next{}; | ||
| 94 | u32 m_page_index{}; | ||
| 95 | u32 m_num_pages{}; | ||
| 96 | }; | ||
| 97 | static_assert(sizeof(KBlockInfo) <= 0x10); | ||
| 98 | |||
| 15 | class KPageGroup final { | 99 | class KPageGroup final { |
| 16 | public: | 100 | public: |
| 17 | class Node final { | 101 | class Node final { |
| @@ -92,6 +176,8 @@ public: | |||
| 92 | return nodes.empty(); | 176 | return nodes.empty(); |
| 93 | } | 177 | } |
| 94 | 178 | ||
| 179 | void Finalize() {} | ||
| 180 | |||
| 95 | private: | 181 | private: |
| 96 | std::list<Node> nodes; | 182 | std::list<Node> nodes; |
| 97 | }; | 183 | }; |
diff --git a/src/core/hle/kernel/k_page_heap.cpp b/src/core/hle/kernel/k_page_heap.cpp index 5ede60168..7b02c7d8b 100644 --- a/src/core/hle/kernel/k_page_heap.cpp +++ b/src/core/hle/kernel/k_page_heap.cpp | |||
| @@ -44,11 +44,11 @@ size_t KPageHeap::GetNumFreePages() const { | |||
| 44 | return num_free; | 44 | return num_free; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | PAddr KPageHeap::AllocateBlock(s32 index, bool random) { | 47 | PAddr KPageHeap::AllocateByLinearSearch(s32 index) { |
| 48 | const size_t needed_size = m_blocks[index].GetSize(); | 48 | const size_t needed_size = m_blocks[index].GetSize(); |
| 49 | 49 | ||
| 50 | for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) { | 50 | for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) { |
| 51 | if (const PAddr addr = m_blocks[i].PopBlock(random); addr != 0) { | 51 | if (const PAddr addr = m_blocks[i].PopBlock(false); addr != 0) { |
| 52 | if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) { | 52 | if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) { |
| 53 | this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize); | 53 | this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize); |
| 54 | } | 54 | } |
| @@ -59,6 +59,88 @@ PAddr KPageHeap::AllocateBlock(s32 index, bool random) { | |||
| 59 | return 0; | 59 | return 0; |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) { | ||
| 63 | // Get the size and required alignment. | ||
| 64 | const size_t needed_size = num_pages * PageSize; | ||
| 65 | const size_t align_size = align_pages * PageSize; | ||
| 66 | |||
| 67 | // Determine meta-alignment of our desired alignment size. | ||
| 68 | const size_t align_shift = std::countr_zero(align_size); | ||
| 69 | |||
| 70 | // Decide on a block to allocate from. | ||
| 71 | constexpr size_t MinimumPossibleAlignmentsForRandomAllocation = 4; | ||
| 72 | { | ||
| 73 | // By default, we'll want to look at all blocks larger than our current one. | ||
| 74 | s32 max_blocks = static_cast<s32>(m_num_blocks); | ||
| 75 | |||
| 76 | // Determine the maximum block we should try to allocate from. | ||
| 77 | size_t possible_alignments = 0; | ||
| 78 | for (s32 i = index; i < max_blocks; ++i) { | ||
| 79 | // Add the possible alignments from blocks at the current size. | ||
| 80 | possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * | ||
| 81 | m_blocks[i].GetNumFreeBlocks(); | ||
| 82 | |||
| 83 | // If there are enough possible alignments, we don't need to look at larger blocks. | ||
| 84 | if (possible_alignments >= MinimumPossibleAlignmentsForRandomAllocation) { | ||
| 85 | max_blocks = i + 1; | ||
| 86 | break; | ||
| 87 | } | ||
| 88 | } | ||
| 89 | |||
| 90 | // If we have any possible alignments which require a larger block, we need to pick one. | ||
| 91 | if (possible_alignments > 0 && index + 1 < max_blocks) { | ||
| 92 | // Select a random alignment from the possibilities. | ||
| 93 | const size_t rnd = m_rng.GenerateRandom(possible_alignments); | ||
| 94 | |||
| 95 | // Determine which block corresponds to the random alignment we chose. | ||
| 96 | possible_alignments = 0; | ||
| 97 | for (s32 i = index; i < max_blocks; ++i) { | ||
| 98 | // Add the possible alignments from blocks at the current size. | ||
| 99 | possible_alignments += | ||
| 100 | (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * | ||
| 101 | m_blocks[i].GetNumFreeBlocks(); | ||
| 102 | |||
| 103 | // If the current block gets us to our random choice, use the current block. | ||
| 104 | if (rnd < possible_alignments) { | ||
| 105 | index = i; | ||
| 106 | break; | ||
| 107 | } | ||
| 108 | } | ||
| 109 | } | ||
| 110 | } | ||
| 111 | |||
| 112 | // Pop a block from the index we selected. | ||
| 113 | if (PAddr addr = m_blocks[index].PopBlock(true); addr != 0) { | ||
| 114 | // Determine how much size we have left over. | ||
| 115 | if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size; | ||
| 116 | leftover_size > 0) { | ||
| 117 | // Determine how many valid alignments we can have. | ||
| 118 | const size_t possible_alignments = 1 + (leftover_size >> align_shift); | ||
| 119 | |||
| 120 | // Select a random valid alignment. | ||
| 121 | const size_t random_offset = m_rng.GenerateRandom(possible_alignments) << align_shift; | ||
| 122 | |||
| 123 | // Free memory before the random offset. | ||
| 124 | if (random_offset != 0) { | ||
| 125 | this->Free(addr, random_offset / PageSize); | ||
| 126 | } | ||
| 127 | |||
| 128 | // Advance our block by the random offset. | ||
| 129 | addr += random_offset; | ||
| 130 | |||
| 131 | // Free memory after our allocated block. | ||
| 132 | if (random_offset != leftover_size) { | ||
| 133 | this->Free(addr + needed_size, (leftover_size - random_offset) / PageSize); | ||
| 134 | } | ||
| 135 | } | ||
| 136 | |||
| 137 | // Return the block we allocated. | ||
| 138 | return addr; | ||
| 139 | } | ||
| 140 | |||
| 141 | return 0; | ||
| 142 | } | ||
| 143 | |||
| 62 | void KPageHeap::FreeBlock(PAddr block, s32 index) { | 144 | void KPageHeap::FreeBlock(PAddr block, s32 index) { |
| 63 | do { | 145 | do { |
| 64 | block = m_blocks[index++].PushBlock(block); | 146 | block = m_blocks[index++].PushBlock(block); |
diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h index 0917a8bed..9021edcf7 100644 --- a/src/core/hle/kernel/k_page_heap.h +++ b/src/core/hle/kernel/k_page_heap.h | |||
| @@ -14,13 +14,9 @@ | |||
| 14 | 14 | ||
| 15 | namespace Kernel { | 15 | namespace Kernel { |
| 16 | 16 | ||
| 17 | class KPageHeap final { | 17 | class KPageHeap { |
| 18 | public: | 18 | public: |
| 19 | YUZU_NON_COPYABLE(KPageHeap); | ||
| 20 | YUZU_NON_MOVEABLE(KPageHeap); | ||
| 21 | |||
| 22 | KPageHeap() = default; | 19 | KPageHeap() = default; |
| 23 | ~KPageHeap() = default; | ||
| 24 | 20 | ||
| 25 | constexpr PAddr GetAddress() const { | 21 | constexpr PAddr GetAddress() const { |
| 26 | return m_heap_address; | 22 | return m_heap_address; |
| @@ -57,7 +53,20 @@ public: | |||
| 57 | m_initial_used_size = m_heap_size - free_size - reserved_size; | 53 | m_initial_used_size = m_heap_size - free_size - reserved_size; |
| 58 | } | 54 | } |
| 59 | 55 | ||
| 60 | PAddr AllocateBlock(s32 index, bool random); | 56 | PAddr AllocateBlock(s32 index, bool random) { |
| 57 | if (random) { | ||
| 58 | const size_t block_pages = m_blocks[index].GetNumPages(); | ||
| 59 | return this->AllocateByRandom(index, block_pages, block_pages); | ||
| 60 | } else { | ||
| 61 | return this->AllocateByLinearSearch(index); | ||
| 62 | } | ||
| 63 | } | ||
| 64 | |||
| 65 | PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { | ||
| 66 | // TODO: linear search support? | ||
| 67 | return this->AllocateByRandom(index, num_pages, align_pages); | ||
| 68 | } | ||
| 69 | |||
| 61 | void Free(PAddr addr, size_t num_pages); | 70 | void Free(PAddr addr, size_t num_pages); |
| 62 | 71 | ||
| 63 | static size_t CalculateManagementOverheadSize(size_t region_size) { | 72 | static size_t CalculateManagementOverheadSize(size_t region_size) { |
| @@ -68,7 +77,7 @@ public: | |||
| 68 | static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) { | 77 | static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) { |
| 69 | const size_t target_pages = std::max(num_pages, align_pages); | 78 | const size_t target_pages = std::max(num_pages, align_pages); |
| 70 | for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) { | 79 | for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) { |
| 71 | if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { | 80 | if (target_pages <= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { |
| 72 | return static_cast<s32>(i); | 81 | return static_cast<s32>(i); |
| 73 | } | 82 | } |
| 74 | } | 83 | } |
| @@ -77,7 +86,7 @@ public: | |||
| 77 | 86 | ||
| 78 | static constexpr s32 GetBlockIndex(size_t num_pages) { | 87 | static constexpr s32 GetBlockIndex(size_t num_pages) { |
| 79 | for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) { | 88 | for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) { |
| 80 | if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { | 89 | if (num_pages >= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { |
| 81 | return i; | 90 | return i; |
| 82 | } | 91 | } |
| 83 | } | 92 | } |
| @@ -85,7 +94,7 @@ public: | |||
| 85 | } | 94 | } |
| 86 | 95 | ||
| 87 | static constexpr size_t GetBlockSize(size_t index) { | 96 | static constexpr size_t GetBlockSize(size_t index) { |
| 88 | return size_t(1) << MemoryBlockPageShifts[index]; | 97 | return static_cast<size_t>(1) << MemoryBlockPageShifts[index]; |
| 89 | } | 98 | } |
| 90 | 99 | ||
| 91 | static constexpr size_t GetBlockNumPages(size_t index) { | 100 | static constexpr size_t GetBlockNumPages(size_t index) { |
| @@ -93,13 +102,9 @@ public: | |||
| 93 | } | 102 | } |
| 94 | 103 | ||
| 95 | private: | 104 | private: |
| 96 | class Block final { | 105 | class Block { |
| 97 | public: | 106 | public: |
| 98 | YUZU_NON_COPYABLE(Block); | ||
| 99 | YUZU_NON_MOVEABLE(Block); | ||
| 100 | |||
| 101 | Block() = default; | 107 | Block() = default; |
| 102 | ~Block() = default; | ||
| 103 | 108 | ||
| 104 | constexpr size_t GetShift() const { | 109 | constexpr size_t GetShift() const { |
| 105 | return m_block_shift; | 110 | return m_block_shift; |
| @@ -201,6 +206,9 @@ private: | |||
| 201 | }; | 206 | }; |
| 202 | 207 | ||
| 203 | private: | 208 | private: |
| 209 | PAddr AllocateByLinearSearch(s32 index); | ||
| 210 | PAddr AllocateByRandom(s32 index, size_t num_pages, size_t align_pages); | ||
| 211 | |||
| 204 | static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts, | 212 | static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts, |
| 205 | size_t num_block_shifts); | 213 | size_t num_block_shifts); |
| 206 | 214 | ||
| @@ -209,7 +217,8 @@ private: | |||
| 209 | size_t m_heap_size{}; | 217 | size_t m_heap_size{}; |
| 210 | size_t m_initial_used_size{}; | 218 | size_t m_initial_used_size{}; |
| 211 | size_t m_num_blocks{}; | 219 | size_t m_num_blocks{}; |
| 212 | std::array<Block, NumMemoryBlockPageShifts> m_blocks{}; | 220 | std::array<Block, NumMemoryBlockPageShifts> m_blocks; |
| 221 | KPageBitmap::RandomBitGenerator m_rng; | ||
| 213 | std::vector<u64> m_management_data; | 222 | std::vector<u64> m_management_data; |
| 214 | }; | 223 | }; |
| 215 | 224 | ||
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 307e491cb..5387bf5fe 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include "core/hle/kernel/k_resource_limit.h" | 15 | #include "core/hle/kernel/k_resource_limit.h" |
| 16 | #include "core/hle/kernel/k_scoped_resource_reservation.h" | 16 | #include "core/hle/kernel/k_scoped_resource_reservation.h" |
| 17 | #include "core/hle/kernel/k_system_control.h" | 17 | #include "core/hle/kernel/k_system_control.h" |
| 18 | #include "core/hle/kernel/k_system_resource.h" | ||
| 18 | #include "core/hle/kernel/kernel.h" | 19 | #include "core/hle/kernel/kernel.h" |
| 19 | #include "core/hle/kernel/svc_results.h" | 20 | #include "core/hle/kernel/svc_results.h" |
| 20 | #include "core/memory.h" | 21 | #include "core/memory.h" |
| @@ -23,6 +24,61 @@ namespace Kernel { | |||
| 23 | 24 | ||
| 24 | namespace { | 25 | namespace { |
| 25 | 26 | ||
| 27 | class KScopedLightLockPair { | ||
| 28 | YUZU_NON_COPYABLE(KScopedLightLockPair); | ||
| 29 | YUZU_NON_MOVEABLE(KScopedLightLockPair); | ||
| 30 | |||
| 31 | private: | ||
| 32 | KLightLock* m_lower; | ||
| 33 | KLightLock* m_upper; | ||
| 34 | |||
| 35 | public: | ||
| 36 | KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) { | ||
| 37 | // Ensure our locks are in a consistent order. | ||
| 38 | if (std::addressof(lhs) <= std::addressof(rhs)) { | ||
| 39 | m_lower = std::addressof(lhs); | ||
| 40 | m_upper = std::addressof(rhs); | ||
| 41 | } else { | ||
| 42 | m_lower = std::addressof(rhs); | ||
| 43 | m_upper = std::addressof(lhs); | ||
| 44 | } | ||
| 45 | |||
| 46 | // Acquire both locks. | ||
| 47 | m_lower->Lock(); | ||
| 48 | if (m_lower != m_upper) { | ||
| 49 | m_upper->Lock(); | ||
| 50 | } | ||
| 51 | } | ||
| 52 | |||
| 53 | ~KScopedLightLockPair() { | ||
| 54 | // Unlock the upper lock. | ||
| 55 | if (m_upper != nullptr && m_upper != m_lower) { | ||
| 56 | m_upper->Unlock(); | ||
| 57 | } | ||
| 58 | |||
| 59 | // Unlock the lower lock. | ||
| 60 | if (m_lower != nullptr) { | ||
| 61 | m_lower->Unlock(); | ||
| 62 | } | ||
| 63 | } | ||
| 64 | |||
| 65 | public: | ||
| 66 | // Utility. | ||
| 67 | void TryUnlockHalf(KLightLock& lock) { | ||
| 68 | // Only allow unlocking if the lock is half the pair. | ||
| 69 | if (m_lower != m_upper) { | ||
| 70 | // We want to be sure the lock is one we own. | ||
| 71 | if (m_lower == std::addressof(lock)) { | ||
| 72 | lock.Unlock(); | ||
| 73 | m_lower = nullptr; | ||
| 74 | } else if (m_upper == std::addressof(lock)) { | ||
| 75 | lock.Unlock(); | ||
| 76 | m_upper = nullptr; | ||
| 77 | } | ||
| 78 | } | ||
| 79 | } | ||
| 80 | }; | ||
| 81 | |||
| 26 | using namespace Common::Literals; | 82 | using namespace Common::Literals; |
| 27 | 83 | ||
| 28 | constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { | 84 | constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { |
| @@ -49,9 +105,10 @@ KPageTable::KPageTable(Core::System& system_) | |||
| 49 | KPageTable::~KPageTable() = default; | 105 | KPageTable::~KPageTable() = default; |
| 50 | 106 | ||
| 51 | Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, | 107 | Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, |
| 52 | VAddr code_addr, size_t code_size, | 108 | bool enable_das_merge, bool from_back, |
| 53 | KMemoryBlockSlabManager* mem_block_slab_manager, | 109 | KMemoryManager::Pool pool, VAddr code_addr, |
| 54 | KMemoryManager::Pool pool) { | 110 | size_t code_size, KSystemResource* system_resource, |
| 111 | KResourceLimit* resource_limit) { | ||
| 55 | 112 | ||
| 56 | const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { | 113 | const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { |
| 57 | return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); | 114 | return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); |
| @@ -112,11 +169,13 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type | |||
| 112 | 169 | ||
| 113 | // Set other basic fields | 170 | // Set other basic fields |
| 114 | m_enable_aslr = enable_aslr; | 171 | m_enable_aslr = enable_aslr; |
| 115 | m_enable_device_address_space_merge = false; | 172 | m_enable_device_address_space_merge = enable_das_merge; |
| 116 | m_address_space_start = start; | 173 | m_address_space_start = start; |
| 117 | m_address_space_end = end; | 174 | m_address_space_end = end; |
| 118 | m_is_kernel = false; | 175 | m_is_kernel = false; |
| 119 | m_memory_block_slab_manager = mem_block_slab_manager; | 176 | m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer(); |
| 177 | m_block_info_manager = system_resource->GetBlockInfoManagerPointer(); | ||
| 178 | m_resource_limit = resource_limit; | ||
| 120 | 179 | ||
| 121 | // Determine the region we can place our undetermineds in | 180 | // Determine the region we can place our undetermineds in |
| 122 | VAddr alloc_start{}; | 181 | VAddr alloc_start{}; |
| @@ -215,10 +274,22 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type | |||
| 215 | } | 274 | } |
| 216 | } | 275 | } |
| 217 | 276 | ||
| 218 | // Set heap members | 277 | // Set heap and fill members. |
| 219 | m_current_heap_end = m_heap_region_start; | 278 | m_current_heap_end = m_heap_region_start; |
| 220 | m_max_heap_size = 0; | 279 | m_max_heap_size = 0; |
| 221 | m_max_physical_memory_size = 0; | 280 | m_mapped_physical_memory_size = 0; |
| 281 | m_mapped_unsafe_physical_memory = 0; | ||
| 282 | m_mapped_insecure_memory = 0; | ||
| 283 | m_mapped_ipc_server_memory = 0; | ||
| 284 | |||
| 285 | m_heap_fill_value = 0; | ||
| 286 | m_ipc_fill_value = 0; | ||
| 287 | m_stack_fill_value = 0; | ||
| 288 | |||
| 289 | // Set allocation option. | ||
| 290 | m_allocate_option = | ||
| 291 | KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack | ||
| 292 | : KMemoryManager::Direction::FromFront); | ||
| 222 | 293 | ||
| 223 | // Ensure that we regions inside our address space | 294 | // Ensure that we regions inside our address space |
| 224 | auto IsInAddressSpace = [&](VAddr addr) { | 295 | auto IsInAddressSpace = [&](VAddr addr) { |
| @@ -267,6 +338,16 @@ void KPageTable::Finalize() { | |||
| 267 | m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size); | 338 | m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size); |
| 268 | }); | 339 | }); |
| 269 | 340 | ||
| 341 | // Release any insecure mapped memory. | ||
| 342 | if (m_mapped_insecure_memory) { | ||
| 343 | UNIMPLEMENTED(); | ||
| 344 | } | ||
| 345 | |||
| 346 | // Release any ipc server memory. | ||
| 347 | if (m_mapped_ipc_server_memory) { | ||
| 348 | UNIMPLEMENTED(); | ||
| 349 | } | ||
| 350 | |||
| 270 | // Close the backing page table, as the destructor is not called for guest objects. | 351 | // Close the backing page table, as the destructor is not called for guest objects. |
| 271 | m_page_table_impl.reset(); | 352 | m_page_table_impl.reset(); |
| 272 | } | 353 | } |
| @@ -650,7 +731,8 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu | |||
| 650 | 731 | ||
| 651 | Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, | 732 | Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, |
| 652 | VAddr src_addr) { | 733 | VAddr src_addr) { |
| 653 | KScopedLightLock lk(m_general_lock); | 734 | // Acquire the table locks. |
| 735 | KScopedLightLockPair lk(src_page_table.m_general_lock, m_general_lock); | ||
| 654 | 736 | ||
| 655 | const size_t num_pages{size / PageSize}; | 737 | const size_t num_pages{size / PageSize}; |
| 656 | 738 | ||
| @@ -686,9 +768,753 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& s | |||
| 686 | R_SUCCEED(); | 768 | R_SUCCEED(); |
| 687 | } | 769 | } |
| 688 | 770 | ||
| 771 | Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, | ||
| 772 | VAddr address, size_t size, KMemoryPermission test_perm, | ||
| 773 | KMemoryState dst_state) { | ||
| 774 | // Validate pre-conditions. | ||
| 775 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 776 | ASSERT(test_perm == KMemoryPermission::UserReadWrite || | ||
| 777 | test_perm == KMemoryPermission::UserRead); | ||
| 778 | |||
| 779 | // Check that the address is in range. | ||
| 780 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 781 | |||
| 782 | // Get the source permission. | ||
| 783 | const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite) | ||
| 784 | ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped | ||
| 785 | : KMemoryPermission::UserRead; | ||
| 786 | |||
| 787 | // Get aligned extents. | ||
| 788 | const VAddr aligned_src_start = Common::AlignDown((address), PageSize); | ||
| 789 | const VAddr aligned_src_end = Common::AlignUp((address) + size, PageSize); | ||
| 790 | const VAddr mapping_src_start = Common::AlignUp((address), PageSize); | ||
| 791 | const VAddr mapping_src_end = Common::AlignDown((address) + size, PageSize); | ||
| 792 | |||
| 793 | const auto aligned_src_last = (aligned_src_end)-1; | ||
| 794 | const auto mapping_src_last = (mapping_src_end)-1; | ||
| 795 | |||
| 796 | // Get the test state and attribute mask. | ||
| 797 | KMemoryState test_state; | ||
| 798 | KMemoryAttribute test_attr_mask; | ||
| 799 | switch (dst_state) { | ||
| 800 | case KMemoryState::Ipc: | ||
| 801 | test_state = KMemoryState::FlagCanUseIpc; | ||
| 802 | test_attr_mask = | ||
| 803 | KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked; | ||
| 804 | break; | ||
| 805 | case KMemoryState::NonSecureIpc: | ||
| 806 | test_state = KMemoryState::FlagCanUseNonSecureIpc; | ||
| 807 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 808 | break; | ||
| 809 | case KMemoryState::NonDeviceIpc: | ||
| 810 | test_state = KMemoryState::FlagCanUseNonDeviceIpc; | ||
| 811 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 812 | break; | ||
| 813 | default: | ||
| 814 | R_THROW(ResultInvalidCombination); | ||
| 815 | } | ||
| 816 | |||
| 817 | // Ensure that on failure, we roll back appropriately. | ||
| 818 | size_t mapped_size = 0; | ||
| 819 | ON_RESULT_FAILURE { | ||
| 820 | if (mapped_size > 0) { | ||
| 821 | this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size, | ||
| 822 | src_perm); | ||
| 823 | } | ||
| 824 | }; | ||
| 825 | |||
| 826 | size_t blocks_needed = 0; | ||
| 827 | |||
| 828 | // Iterate, mapping as needed. | ||
| 829 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start); | ||
| 830 | while (true) { | ||
| 831 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 832 | |||
| 833 | // Validate the current block. | ||
| 834 | R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm, | ||
| 835 | test_attr_mask, KMemoryAttribute::None)); | ||
| 836 | |||
| 837 | if (mapping_src_start < mapping_src_end && (mapping_src_start) < info.GetEndAddress() && | ||
| 838 | info.GetAddress() < (mapping_src_end)) { | ||
| 839 | const auto cur_start = | ||
| 840 | info.GetAddress() >= (mapping_src_start) ? info.GetAddress() : (mapping_src_start); | ||
| 841 | const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress() | ||
| 842 | : (mapping_src_end); | ||
| 843 | const size_t cur_size = cur_end - cur_start; | ||
| 844 | |||
| 845 | if (info.GetAddress() < (mapping_src_start)) { | ||
| 846 | ++blocks_needed; | ||
| 847 | } | ||
| 848 | if (mapping_src_last < info.GetLastAddress()) { | ||
| 849 | ++blocks_needed; | ||
| 850 | } | ||
| 851 | |||
| 852 | // Set the permissions on the block, if we need to. | ||
| 853 | if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) { | ||
| 854 | R_TRY(Operate(cur_start, cur_size / PageSize, src_perm, | ||
| 855 | OperationType::ChangePermissions)); | ||
| 856 | } | ||
| 857 | |||
| 858 | // Note that we mapped this part. | ||
| 859 | mapped_size += cur_size; | ||
| 860 | } | ||
| 861 | |||
| 862 | // If the block is at the end, we're done. | ||
| 863 | if (aligned_src_last <= info.GetLastAddress()) { | ||
| 864 | break; | ||
| 865 | } | ||
| 866 | |||
| 867 | // Advance. | ||
| 868 | ++it; | ||
| 869 | ASSERT(it != m_memory_block_manager.end()); | ||
| 870 | } | ||
| 871 | |||
| 872 | if (out_blocks_needed != nullptr) { | ||
| 873 | ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | ||
| 874 | *out_blocks_needed = blocks_needed; | ||
| 875 | } | ||
| 876 | |||
| 877 | R_SUCCEED(); | ||
| 878 | } | ||
| 879 | |||
| 880 | Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr, | ||
| 881 | KMemoryPermission test_perm, KMemoryState dst_state, | ||
| 882 | KPageTable& src_page_table, bool send) { | ||
| 883 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 884 | ASSERT(src_page_table.IsLockedByCurrentThread()); | ||
| 885 | |||
| 886 | // Check that we can theoretically map. | ||
| 887 | const VAddr region_start = m_alias_region_start; | ||
| 888 | const size_t region_size = m_alias_region_end - m_alias_region_start; | ||
| 889 | R_UNLESS(size < region_size, ResultOutOfAddressSpace); | ||
| 890 | |||
| 891 | // Get aligned source extents. | ||
| 892 | const VAddr src_start = src_addr; | ||
| 893 | const VAddr src_end = src_addr + size; | ||
| 894 | const VAddr aligned_src_start = Common::AlignDown((src_start), PageSize); | ||
| 895 | const VAddr aligned_src_end = Common::AlignUp((src_start) + size, PageSize); | ||
| 896 | const VAddr mapping_src_start = Common::AlignUp((src_start), PageSize); | ||
| 897 | const VAddr mapping_src_end = Common::AlignDown((src_start) + size, PageSize); | ||
| 898 | const size_t aligned_src_size = aligned_src_end - aligned_src_start; | ||
| 899 | const size_t mapping_src_size = | ||
| 900 | (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0; | ||
| 901 | |||
| 902 | // Select a random address to map at. | ||
| 903 | VAddr dst_addr = | ||
| 904 | this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize, | ||
| 905 | PageSize, 0, this->GetNumGuardPages()); | ||
| 906 | |||
| 907 | R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace); | ||
| 908 | |||
| 909 | // Check that we can perform the operation we're about to perform. | ||
| 910 | ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state)); | ||
| 911 | |||
| 912 | // Create an update allocator. | ||
| 913 | Result allocator_result; | ||
| 914 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 915 | m_memory_block_slab_manager); | ||
| 916 | R_TRY(allocator_result); | ||
| 917 | |||
| 918 | // We're going to perform an update, so create a helper. | ||
| 919 | KScopedPageTableUpdater updater(this); | ||
| 920 | |||
| 921 | // Reserve space for any partial pages we allocate. | ||
| 922 | const size_t unmapped_size = aligned_src_size - mapping_src_size; | ||
| 923 | KScopedResourceReservation memory_reservation( | ||
| 924 | m_resource_limit, LimitableResource::PhysicalMemoryMax, unmapped_size); | ||
| 925 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||
| 926 | |||
| 927 | // Ensure that we manage page references correctly. | ||
| 928 | PAddr start_partial_page = 0; | ||
| 929 | PAddr end_partial_page = 0; | ||
| 930 | VAddr cur_mapped_addr = dst_addr; | ||
| 931 | |||
| 932 | // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll | ||
| 933 | // free on scope exit. | ||
| 934 | SCOPE_EXIT({ | ||
| 935 | if (start_partial_page != 0) { | ||
| 936 | m_system.Kernel().MemoryManager().Close(start_partial_page, 1); | ||
| 937 | } | ||
| 938 | if (end_partial_page != 0) { | ||
| 939 | m_system.Kernel().MemoryManager().Close(end_partial_page, 1); | ||
| 940 | } | ||
| 941 | }); | ||
| 942 | |||
| 943 | ON_RESULT_FAILURE { | ||
| 944 | if (cur_mapped_addr != dst_addr) { | ||
| 945 | // HACK: Manually close the pages. | ||
| 946 | HACK_ClosePages(dst_addr, (cur_mapped_addr - dst_addr) / PageSize); | ||
| 947 | |||
| 948 | ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize, | ||
| 949 | KMemoryPermission::None, OperationType::Unmap) | ||
| 950 | .IsSuccess()); | ||
| 951 | } | ||
| 952 | }; | ||
| 953 | |||
| 954 | // Allocate the start page as needed. | ||
| 955 | if (aligned_src_start < mapping_src_start) { | ||
| 956 | start_partial_page = | ||
| 957 | m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); | ||
| 958 | R_UNLESS(start_partial_page != 0, ResultOutOfMemory); | ||
| 959 | } | ||
| 960 | |||
| 961 | // Allocate the end page as needed. | ||
| 962 | if (mapping_src_end < aligned_src_end && | ||
| 963 | (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) { | ||
| 964 | end_partial_page = | ||
| 965 | m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); | ||
| 966 | R_UNLESS(end_partial_page != 0, ResultOutOfMemory); | ||
| 967 | } | ||
| 968 | |||
| 969 | // Get the implementation. | ||
| 970 | auto& src_impl = src_page_table.PageTableImpl(); | ||
| 971 | |||
| 972 | // Get the fill value for partial pages. | ||
| 973 | const auto fill_val = m_ipc_fill_value; | ||
| 974 | |||
| 975 | // Begin traversal. | ||
| 976 | Common::PageTable::TraversalContext context; | ||
| 977 | Common::PageTable::TraversalEntry next_entry; | ||
| 978 | bool traverse_valid = src_impl.BeginTraversal(next_entry, context, aligned_src_start); | ||
| 979 | ASSERT(traverse_valid); | ||
| 980 | |||
| 981 | // Prepare tracking variables. | ||
| 982 | PAddr cur_block_addr = next_entry.phys_addr; | ||
| 983 | size_t cur_block_size = | ||
| 984 | next_entry.block_size - ((cur_block_addr) & (next_entry.block_size - 1)); | ||
| 985 | size_t tot_block_size = cur_block_size; | ||
| 986 | |||
| 987 | // Map the start page, if we have one. | ||
| 988 | if (start_partial_page != 0) { | ||
| 989 | // Ensure the page holds correct data. | ||
| 990 | const VAddr start_partial_virt = | ||
| 991 | GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), start_partial_page); | ||
| 992 | if (send) { | ||
| 993 | const size_t partial_offset = src_start - aligned_src_start; | ||
| 994 | size_t copy_size, clear_size; | ||
| 995 | if (src_end < mapping_src_start) { | ||
| 996 | copy_size = size; | ||
| 997 | clear_size = mapping_src_start - src_end; | ||
| 998 | } else { | ||
| 999 | copy_size = mapping_src_start - src_start; | ||
| 1000 | clear_size = 0; | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val, | ||
| 1004 | partial_offset); | ||
| 1005 | std::memcpy( | ||
| 1006 | m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset), | ||
| 1007 | m_system.Memory().GetPointer<void>( | ||
| 1008 | GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), cur_block_addr) + | ||
| 1009 | partial_offset), | ||
| 1010 | copy_size); | ||
| 1011 | if (clear_size > 0) { | ||
| 1012 | std::memset(m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset + | ||
| 1013 | copy_size), | ||
| 1014 | fill_val, clear_size); | ||
| 1015 | } | ||
| 1016 | } else { | ||
| 1017 | std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val, PageSize); | ||
| 1018 | } | ||
| 1019 | |||
| 1020 | // Map the page. | ||
| 1021 | R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page)); | ||
| 1022 | |||
| 1023 | // HACK: Manually open the pages. | ||
| 1024 | HACK_OpenPages(start_partial_page, 1); | ||
| 1025 | |||
| 1026 | // Update tracking extents. | ||
| 1027 | cur_mapped_addr += PageSize; | ||
| 1028 | cur_block_addr += PageSize; | ||
| 1029 | cur_block_size -= PageSize; | ||
| 1030 | |||
| 1031 | // If the block's size was one page, we may need to continue traversal. | ||
| 1032 | if (cur_block_size == 0 && aligned_src_size > PageSize) { | ||
| 1033 | traverse_valid = src_impl.ContinueTraversal(next_entry, context); | ||
| 1034 | ASSERT(traverse_valid); | ||
| 1035 | |||
| 1036 | cur_block_addr = next_entry.phys_addr; | ||
| 1037 | cur_block_size = next_entry.block_size; | ||
| 1038 | tot_block_size += next_entry.block_size; | ||
| 1039 | } | ||
| 1040 | } | ||
| 1041 | |||
| 1042 | // Map the remaining pages. | ||
| 1043 | while (aligned_src_start + tot_block_size < mapping_src_end) { | ||
| 1044 | // Continue the traversal. | ||
| 1045 | traverse_valid = src_impl.ContinueTraversal(next_entry, context); | ||
| 1046 | ASSERT(traverse_valid); | ||
| 1047 | |||
| 1048 | // Process the block. | ||
| 1049 | if (next_entry.phys_addr != cur_block_addr + cur_block_size) { | ||
| 1050 | // Map the block we've been processing so far. | ||
| 1051 | R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map, | ||
| 1052 | cur_block_addr)); | ||
| 1053 | |||
| 1054 | // HACK: Manually open the pages. | ||
| 1055 | HACK_OpenPages(cur_block_addr, cur_block_size / PageSize); | ||
| 1056 | |||
| 1057 | // Update tracking extents. | ||
| 1058 | cur_mapped_addr += cur_block_size; | ||
| 1059 | cur_block_addr = next_entry.phys_addr; | ||
| 1060 | cur_block_size = next_entry.block_size; | ||
| 1061 | } else { | ||
| 1062 | cur_block_size += next_entry.block_size; | ||
| 1063 | } | ||
| 1064 | tot_block_size += next_entry.block_size; | ||
| 1065 | } | ||
| 1066 | |||
| 1067 | // Handle the last direct-mapped page. | ||
| 1068 | if (const VAddr mapped_block_end = aligned_src_start + tot_block_size - cur_block_size; | ||
| 1069 | mapped_block_end < mapping_src_end) { | ||
| 1070 | const size_t last_block_size = mapping_src_end - mapped_block_end; | ||
| 1071 | |||
| 1072 | // Map the last block. | ||
| 1073 | R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map, | ||
| 1074 | cur_block_addr)); | ||
| 1075 | |||
| 1076 | // HACK: Manually open the pages. | ||
| 1077 | HACK_OpenPages(cur_block_addr, last_block_size / PageSize); | ||
| 1078 | |||
| 1079 | // Update tracking extents. | ||
| 1080 | cur_mapped_addr += last_block_size; | ||
| 1081 | cur_block_addr += last_block_size; | ||
| 1082 | if (mapped_block_end + cur_block_size < aligned_src_end && | ||
| 1083 | cur_block_size == last_block_size) { | ||
| 1084 | traverse_valid = src_impl.ContinueTraversal(next_entry, context); | ||
| 1085 | ASSERT(traverse_valid); | ||
| 1086 | |||
| 1087 | cur_block_addr = next_entry.phys_addr; | ||
| 1088 | } | ||
| 1089 | } | ||
| 1090 | |||
| 1091 | // Map the end page, if we have one. | ||
| 1092 | if (end_partial_page != 0) { | ||
| 1093 | // Ensure the page holds correct data. | ||
| 1094 | const VAddr end_partial_virt = | ||
| 1095 | GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), end_partial_page); | ||
| 1096 | if (send) { | ||
| 1097 | const size_t copy_size = src_end - mapping_src_end; | ||
| 1098 | std::memcpy(m_system.Memory().GetPointer<void>(end_partial_virt), | ||
| 1099 | m_system.Memory().GetPointer<void>(GetHeapVirtualAddress( | ||
| 1100 | m_system.Kernel().MemoryLayout(), cur_block_addr)), | ||
| 1101 | copy_size); | ||
| 1102 | std::memset(m_system.Memory().GetPointer<void>(end_partial_virt + copy_size), fill_val, | ||
| 1103 | PageSize - copy_size); | ||
| 1104 | } else { | ||
| 1105 | std::memset(m_system.Memory().GetPointer<void>(end_partial_virt), fill_val, PageSize); | ||
| 1106 | } | ||
| 1107 | |||
| 1108 | // Map the page. | ||
| 1109 | R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page)); | ||
| 1110 | |||
| 1111 | // HACK: Manually open the pages. | ||
| 1112 | HACK_OpenPages(end_partial_page, 1); | ||
| 1113 | } | ||
| 1114 | |||
| 1115 | // Update memory blocks to reflect our changes | ||
| 1116 | m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize, | ||
| 1117 | dst_state, test_perm, KMemoryAttribute::None, | ||
| 1118 | KMemoryBlockDisableMergeAttribute::Normal, | ||
| 1119 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1120 | |||
| 1121 | // Set the output address. | ||
| 1122 | *out_addr = dst_addr + (src_start - aligned_src_start); | ||
| 1123 | |||
| 1124 | // We succeeded. | ||
| 1125 | memory_reservation.Commit(); | ||
| 1126 | R_SUCCEED(); | ||
| 1127 | } | ||
| 1128 | |||
| 1129 | Result KPageTable::SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr, | ||
| 1130 | KPageTable& src_page_table, KMemoryPermission test_perm, | ||
| 1131 | KMemoryState dst_state, bool send) { | ||
| 1132 | // For convenience, alias this. | ||
| 1133 | KPageTable& dst_page_table = *this; | ||
| 1134 | |||
| 1135 | // Acquire the table locks. | ||
| 1136 | KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); | ||
| 1137 | |||
| 1138 | // We're going to perform an update, so create a helper. | ||
| 1139 | KScopedPageTableUpdater updater(std::addressof(src_page_table)); | ||
| 1140 | |||
| 1141 | // Perform client setup. | ||
| 1142 | size_t num_allocator_blocks; | ||
| 1143 | R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(), | ||
| 1144 | std::addressof(num_allocator_blocks), src_addr, size, | ||
| 1145 | test_perm, dst_state)); | ||
| 1146 | |||
| 1147 | // Create an update allocator. | ||
| 1148 | Result allocator_result; | ||
| 1149 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1150 | src_page_table.m_memory_block_slab_manager, | ||
| 1151 | num_allocator_blocks); | ||
| 1152 | R_TRY(allocator_result); | ||
| 1153 | |||
| 1154 | // Get the mapped extents. | ||
| 1155 | const VAddr src_map_start = Common::AlignUp((src_addr), PageSize); | ||
| 1156 | const VAddr src_map_end = Common::AlignDown((src_addr) + size, PageSize); | ||
| 1157 | const size_t src_map_size = src_map_end - src_map_start; | ||
| 1158 | |||
| 1159 | // Ensure that we clean up appropriately if we fail after this. | ||
| 1160 | const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite) | ||
| 1161 | ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped | ||
| 1162 | : KMemoryPermission::UserRead; | ||
| 1163 | ON_RESULT_FAILURE { | ||
| 1164 | if (src_map_end > src_map_start) { | ||
| 1165 | src_page_table.CleanupForIpcClientOnServerSetupFailure( | ||
| 1166 | updater.GetPageList(), src_map_start, src_map_size, src_perm); | ||
| 1167 | } | ||
| 1168 | }; | ||
| 1169 | |||
| 1170 | // Perform server setup. | ||
| 1171 | R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state, | ||
| 1172 | src_page_table, send)); | ||
| 1173 | |||
| 1174 | // If anything was mapped, ipc-lock the pages. | ||
| 1175 | if (src_map_start < src_map_end) { | ||
| 1176 | // Get the source permission. | ||
| 1177 | src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start, | ||
| 1178 | (src_map_end - src_map_start) / PageSize, | ||
| 1179 | &KMemoryBlock::LockForIpc, src_perm); | ||
| 1180 | } | ||
| 1181 | |||
| 1182 | R_SUCCEED(); | ||
| 1183 | } | ||
| 1184 | |||
| 1185 | Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state) { | ||
| 1186 | // Validate the address. | ||
| 1187 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 1188 | |||
| 1189 | // Lock the table. | ||
| 1190 | KScopedLightLock lk(m_general_lock); | ||
| 1191 | |||
| 1192 | // Validate the memory state. | ||
| 1193 | size_t num_allocator_blocks; | ||
| 1194 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 1195 | KMemoryState::All, dst_state, KMemoryPermission::UserRead, | ||
| 1196 | KMemoryPermission::UserRead, KMemoryAttribute::All, | ||
| 1197 | KMemoryAttribute::None)); | ||
| 1198 | |||
| 1199 | // Create an update allocator. | ||
| 1200 | Result allocator_result; | ||
| 1201 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1202 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1203 | R_TRY(allocator_result); | ||
| 1204 | |||
| 1205 | // We're going to perform an update, so create a helper. | ||
| 1206 | KScopedPageTableUpdater updater(this); | ||
| 1207 | |||
| 1208 | // Get aligned extents. | ||
| 1209 | const VAddr aligned_start = Common::AlignDown((address), PageSize); | ||
| 1210 | const VAddr aligned_end = Common::AlignUp((address) + size, PageSize); | ||
| 1211 | const size_t aligned_size = aligned_end - aligned_start; | ||
| 1212 | const size_t aligned_num_pages = aligned_size / PageSize; | ||
| 1213 | |||
| 1214 | // HACK: Manually close the pages. | ||
| 1215 | HACK_ClosePages(aligned_start, aligned_num_pages); | ||
| 1216 | |||
| 1217 | // Unmap the pages. | ||
| 1218 | R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap)); | ||
| 1219 | |||
| 1220 | // Update memory blocks. | ||
| 1221 | m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages, | ||
| 1222 | KMemoryState::None, KMemoryPermission::None, | ||
| 1223 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 1224 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 1225 | |||
| 1226 | // Release from the resource limit as relevant. | ||
| 1227 | const VAddr mapping_start = Common::AlignUp((address), PageSize); | ||
| 1228 | const VAddr mapping_end = Common::AlignDown((address) + size, PageSize); | ||
| 1229 | const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0; | ||
| 1230 | m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, aligned_size - mapping_size); | ||
| 1231 | |||
| 1232 | R_SUCCEED(); | ||
| 1233 | } | ||
| 1234 | |||
| 1235 | Result KPageTable::CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state) { | ||
| 1236 | // Validate the address. | ||
| 1237 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 1238 | |||
| 1239 | // Get aligned source extents. | ||
| 1240 | const VAddr mapping_start = Common::AlignUp((address), PageSize); | ||
| 1241 | const VAddr mapping_end = Common::AlignDown((address) + size, PageSize); | ||
| 1242 | const VAddr mapping_last = mapping_end - 1; | ||
| 1243 | const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0; | ||
| 1244 | |||
| 1245 | // If nothing was mapped, we're actually done immediately. | ||
| 1246 | R_SUCCEED_IF(mapping_size == 0); | ||
| 1247 | |||
| 1248 | // Get the test state and attribute mask. | ||
| 1249 | KMemoryState test_state; | ||
| 1250 | KMemoryAttribute test_attr_mask; | ||
| 1251 | switch (dst_state) { | ||
| 1252 | case KMemoryState::Ipc: | ||
| 1253 | test_state = KMemoryState::FlagCanUseIpc; | ||
| 1254 | test_attr_mask = | ||
| 1255 | KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked; | ||
| 1256 | break; | ||
| 1257 | case KMemoryState::NonSecureIpc: | ||
| 1258 | test_state = KMemoryState::FlagCanUseNonSecureIpc; | ||
| 1259 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 1260 | break; | ||
| 1261 | case KMemoryState::NonDeviceIpc: | ||
| 1262 | test_state = KMemoryState::FlagCanUseNonDeviceIpc; | ||
| 1263 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 1264 | break; | ||
| 1265 | default: | ||
| 1266 | R_THROW(ResultInvalidCombination); | ||
| 1267 | } | ||
| 1268 | |||
| 1269 | // Lock the table. | ||
| 1270 | // NOTE: Nintendo does this *after* creating the updater below, but this does not follow | ||
| 1271 | // convention elsewhere in KPageTable. | ||
| 1272 | KScopedLightLock lk(m_general_lock); | ||
| 1273 | |||
| 1274 | // We're going to perform an update, so create a helper. | ||
| 1275 | KScopedPageTableUpdater updater(this); | ||
| 1276 | |||
| 1277 | // Ensure that on failure, we roll back appropriately. | ||
| 1278 | size_t mapped_size = 0; | ||
| 1279 | ON_RESULT_FAILURE { | ||
| 1280 | if (mapped_size > 0) { | ||
| 1281 | // Determine where the mapping ends. | ||
| 1282 | const auto mapped_end = (mapping_start) + mapped_size; | ||
| 1283 | const auto mapped_last = mapped_end - 1; | ||
| 1284 | |||
| 1285 | // Get current and next iterators. | ||
| 1286 | KMemoryBlockManager::const_iterator start_it = | ||
| 1287 | m_memory_block_manager.FindIterator(mapping_start); | ||
| 1288 | KMemoryBlockManager::const_iterator next_it = start_it; | ||
| 1289 | ++next_it; | ||
| 1290 | |||
| 1291 | // Get the current block info. | ||
| 1292 | KMemoryInfo cur_info = start_it->GetMemoryInfo(); | ||
| 1293 | |||
| 1294 | // Create tracking variables. | ||
| 1295 | VAddr cur_address = cur_info.GetAddress(); | ||
| 1296 | size_t cur_size = cur_info.GetSize(); | ||
| 1297 | bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); | ||
| 1298 | bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; | ||
| 1299 | bool first = | ||
| 1300 | cur_info.GetIpcDisableMergeCount() == 1 && | ||
| 1301 | (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) == | ||
| 1302 | KMemoryBlockDisableMergeAttribute::None; | ||
| 1303 | |||
| 1304 | while (((cur_address) + cur_size - 1) < mapped_last) { | ||
| 1305 | // Check that we have a next block. | ||
| 1306 | ASSERT(next_it != m_memory_block_manager.end()); | ||
| 1307 | |||
| 1308 | // Get the next info. | ||
| 1309 | const KMemoryInfo next_info = next_it->GetMemoryInfo(); | ||
| 1310 | |||
| 1311 | // Check if we can consolidate the next block's permission set with the current one. | ||
| 1312 | |||
| 1313 | const bool next_perm_eq = | ||
| 1314 | next_info.GetPermission() == next_info.GetOriginalPermission(); | ||
| 1315 | const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; | ||
| 1316 | if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && | ||
| 1317 | cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { | ||
| 1318 | // We can consolidate the reprotection for the current and next block into a | ||
| 1319 | // single call. | ||
| 1320 | cur_size += next_info.GetSize(); | ||
| 1321 | } else { | ||
| 1322 | // We have to operate on the current block. | ||
| 1323 | if ((cur_needs_set_perm || first) && !cur_perm_eq) { | ||
| 1324 | ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(), | ||
| 1325 | OperationType::ChangePermissions) | ||
| 1326 | .IsSuccess()); | ||
| 1327 | } | ||
| 1328 | |||
| 1329 | // Advance. | ||
| 1330 | cur_address = next_info.GetAddress(); | ||
| 1331 | cur_size = next_info.GetSize(); | ||
| 1332 | first = false; | ||
| 1333 | } | ||
| 1334 | |||
| 1335 | // Advance. | ||
| 1336 | cur_info = next_info; | ||
| 1337 | cur_perm_eq = next_perm_eq; | ||
| 1338 | cur_needs_set_perm = next_needs_set_perm; | ||
| 1339 | ++next_it; | ||
| 1340 | } | ||
| 1341 | |||
| 1342 | // Process the last block. | ||
| 1343 | if ((first || cur_needs_set_perm) && !cur_perm_eq) { | ||
| 1344 | ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(), | ||
| 1345 | OperationType::ChangePermissions) | ||
| 1346 | .IsSuccess()); | ||
| 1347 | } | ||
| 1348 | } | ||
| 1349 | }; | ||
| 1350 | |||
| 1351 | // Iterate, reprotecting as needed. | ||
| 1352 | { | ||
| 1353 | // Get current and next iterators. | ||
| 1354 | KMemoryBlockManager::const_iterator start_it = | ||
| 1355 | m_memory_block_manager.FindIterator(mapping_start); | ||
| 1356 | KMemoryBlockManager::const_iterator next_it = start_it; | ||
| 1357 | ++next_it; | ||
| 1358 | |||
| 1359 | // Validate the current block. | ||
| 1360 | KMemoryInfo cur_info = start_it->GetMemoryInfo(); | ||
| 1361 | ASSERT(this->CheckMemoryState(cur_info, test_state, test_state, KMemoryPermission::None, | ||
| 1362 | KMemoryPermission::None, | ||
| 1363 | test_attr_mask | KMemoryAttribute::IpcLocked, | ||
| 1364 | KMemoryAttribute::IpcLocked) | ||
| 1365 | .IsSuccess()); | ||
| 1366 | |||
| 1367 | // Create tracking variables. | ||
| 1368 | VAddr cur_address = cur_info.GetAddress(); | ||
| 1369 | size_t cur_size = cur_info.GetSize(); | ||
| 1370 | bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); | ||
| 1371 | bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; | ||
| 1372 | bool first = | ||
| 1373 | cur_info.GetIpcDisableMergeCount() == 1 && | ||
| 1374 | (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) == | ||
| 1375 | KMemoryBlockDisableMergeAttribute::None; | ||
| 1376 | |||
| 1377 | while ((cur_address + cur_size - 1) < mapping_last) { | ||
| 1378 | // Check that we have a next block. | ||
| 1379 | ASSERT(next_it != m_memory_block_manager.end()); | ||
| 1380 | |||
| 1381 | // Get the next info. | ||
| 1382 | const KMemoryInfo next_info = next_it->GetMemoryInfo(); | ||
| 1383 | |||
| 1384 | // Validate the next block. | ||
| 1385 | ASSERT(this->CheckMemoryState(next_info, test_state, test_state, | ||
| 1386 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 1387 | test_attr_mask | KMemoryAttribute::IpcLocked, | ||
| 1388 | KMemoryAttribute::IpcLocked) | ||
| 1389 | .IsSuccess()); | ||
| 1390 | |||
| 1391 | // Check if we can consolidate the next block's permission set with the current one. | ||
| 1392 | const bool next_perm_eq = | ||
| 1393 | next_info.GetPermission() == next_info.GetOriginalPermission(); | ||
| 1394 | const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; | ||
| 1395 | if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && | ||
| 1396 | cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { | ||
| 1397 | // We can consolidate the reprotection for the current and next block into a single | ||
| 1398 | // call. | ||
| 1399 | cur_size += next_info.GetSize(); | ||
| 1400 | } else { | ||
| 1401 | // We have to operate on the current block. | ||
| 1402 | if ((cur_needs_set_perm || first) && !cur_perm_eq) { | ||
| 1403 | R_TRY(Operate(cur_address, cur_size / PageSize, | ||
| 1404 | cur_needs_set_perm ? cur_info.GetOriginalPermission() | ||
| 1405 | : cur_info.GetPermission(), | ||
| 1406 | OperationType::ChangePermissions)); | ||
| 1407 | } | ||
| 1408 | |||
| 1409 | // Mark that we mapped the block. | ||
| 1410 | mapped_size += cur_size; | ||
| 1411 | |||
| 1412 | // Advance. | ||
| 1413 | cur_address = next_info.GetAddress(); | ||
| 1414 | cur_size = next_info.GetSize(); | ||
| 1415 | first = false; | ||
| 1416 | } | ||
| 1417 | |||
| 1418 | // Advance. | ||
| 1419 | cur_info = next_info; | ||
| 1420 | cur_perm_eq = next_perm_eq; | ||
| 1421 | cur_needs_set_perm = next_needs_set_perm; | ||
| 1422 | ++next_it; | ||
| 1423 | } | ||
| 1424 | |||
| 1425 | // Process the last block. | ||
| 1426 | const auto lock_count = | ||
| 1427 | cur_info.GetIpcLockCount() + | ||
| 1428 | (next_it != m_memory_block_manager.end() | ||
| 1429 | ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) | ||
| 1430 | : 0); | ||
| 1431 | if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) { | ||
| 1432 | R_TRY(Operate(cur_address, cur_size / PageSize, | ||
| 1433 | cur_needs_set_perm ? cur_info.GetOriginalPermission() | ||
| 1434 | : cur_info.GetPermission(), | ||
| 1435 | OperationType::ChangePermissions)); | ||
| 1436 | } | ||
| 1437 | } | ||
| 1438 | |||
| 1439 | // Create an update allocator. | ||
| 1440 | // NOTE: Guaranteed zero blocks needed here. | ||
| 1441 | Result allocator_result; | ||
| 1442 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1443 | m_memory_block_slab_manager, 0); | ||
| 1444 | R_TRY(allocator_result); | ||
| 1445 | |||
| 1446 | // Unlock the pages. | ||
| 1447 | m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start, | ||
| 1448 | mapping_size / PageSize, &KMemoryBlock::UnlockForIpc, | ||
| 1449 | KMemoryPermission::None); | ||
| 1450 | |||
| 1451 | R_SUCCEED(); | ||
| 1452 | } | ||
| 1453 | |||
| 1454 | void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLinkedList* page_list, | ||
| 1455 | VAddr address, size_t size, | ||
| 1456 | KMemoryPermission prot_perm) { | ||
| 1457 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 1458 | ASSERT(Common::IsAligned(address, PageSize)); | ||
| 1459 | ASSERT(Common::IsAligned(size, PageSize)); | ||
| 1460 | |||
| 1461 | // Get the mapped extents. | ||
| 1462 | const VAddr src_map_start = address; | ||
| 1463 | const VAddr src_map_end = address + size; | ||
| 1464 | const VAddr src_map_last = src_map_end - 1; | ||
| 1465 | |||
| 1466 | // This function is only invoked when there's something to do. | ||
| 1467 | ASSERT(src_map_end > src_map_start); | ||
| 1468 | |||
| 1469 | // Iterate over blocks, fixing permissions. | ||
| 1470 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address); | ||
| 1471 | while (true) { | ||
| 1472 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1473 | |||
| 1474 | const auto cur_start = | ||
| 1475 | info.GetAddress() >= src_map_start ? info.GetAddress() : src_map_start; | ||
| 1476 | const auto cur_end = | ||
| 1477 | src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress(); | ||
| 1478 | |||
| 1479 | // If we can, fix the protections on the block. | ||
| 1480 | if ((info.GetIpcLockCount() == 0 && | ||
| 1481 | (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) || | ||
| 1482 | (info.GetIpcLockCount() != 0 && | ||
| 1483 | (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) { | ||
| 1484 | // Check if we actually need to fix the protections on the block. | ||
| 1485 | if (cur_end == src_map_end || info.GetAddress() <= src_map_start || | ||
| 1486 | (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) { | ||
| 1487 | ASSERT(Operate(cur_start, (cur_end - cur_start) / PageSize, info.GetPermission(), | ||
| 1488 | OperationType::ChangePermissions) | ||
| 1489 | .IsSuccess()); | ||
| 1490 | } | ||
| 1491 | } | ||
| 1492 | |||
| 1493 | // If we're past the end of the region, we're done. | ||
| 1494 | if (src_map_last <= info.GetLastAddress()) { | ||
| 1495 | break; | ||
| 1496 | } | ||
| 1497 | |||
| 1498 | // Advance. | ||
| 1499 | ++it; | ||
| 1500 | ASSERT(it != m_memory_block_manager.end()); | ||
| 1501 | } | ||
| 1502 | } | ||
| 1503 | |||
| 1504 | void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) { | ||
| 1505 | m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages); | ||
| 1506 | } | ||
| 1507 | |||
| 1508 | void KPageTable::HACK_ClosePages(VAddr virt_addr, size_t num_pages) { | ||
| 1509 | for (size_t index = 0; index < num_pages; ++index) { | ||
| 1510 | const auto paddr = GetPhysicalAddr(virt_addr + (index * PageSize)); | ||
| 1511 | m_system.Kernel().MemoryManager().Close(paddr, 1); | ||
| 1512 | } | ||
| 1513 | } | ||
| 1514 | |||
| 689 | Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | 1515 | Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { |
| 690 | // Lock the physical memory lock. | 1516 | // Lock the physical memory lock. |
| 691 | KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); | 1517 | KScopedLightLock phys_lk(m_map_physical_memory_lock); |
| 692 | 1518 | ||
| 693 | // Calculate the last address for convenience. | 1519 | // Calculate the last address for convenience. |
| 694 | const VAddr last_address = address + size - 1; | 1520 | const VAddr last_address = address + size - 1; |
| @@ -742,15 +1568,19 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 742 | { | 1568 | { |
| 743 | // Reserve the memory from the process resource limit. | 1569 | // Reserve the memory from the process resource limit. |
| 744 | KScopedResourceReservation memory_reservation( | 1570 | KScopedResourceReservation memory_reservation( |
| 745 | m_system.Kernel().CurrentProcess()->GetResourceLimit(), | 1571 | m_resource_limit, LimitableResource::PhysicalMemoryMax, size - mapped_size); |
| 746 | LimitableResource::PhysicalMemory, size - mapped_size); | ||
| 747 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 1572 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 748 | 1573 | ||
| 749 | // Allocate pages for the new memory. | 1574 | // Allocate pages for the new memory. |
| 750 | KPageGroup pg; | 1575 | KPageGroup pg; |
| 751 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( | 1576 | R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( |
| 752 | &pg, (size - mapped_size) / PageSize, | 1577 | &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0)); |
| 753 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); | 1578 | |
| 1579 | // If we fail in the next bit (or retry), we need to cleanup the pages. | ||
| 1580 | // auto pg_guard = SCOPE_GUARD { | ||
| 1581 | // pg.OpenFirst(); | ||
| 1582 | // pg.Close(); | ||
| 1583 | //}; | ||
| 754 | 1584 | ||
| 755 | // Map the memory. | 1585 | // Map the memory. |
| 756 | { | 1586 | { |
| @@ -810,15 +1640,24 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 810 | 1640 | ||
| 811 | // Create an update allocator. | 1641 | // Create an update allocator. |
| 812 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | 1642 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); |
| 813 | Result allocator_result{ResultSuccess}; | 1643 | Result allocator_result; |
| 814 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 1644 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 815 | m_memory_block_slab_manager, | 1645 | m_memory_block_slab_manager, |
| 816 | num_allocator_blocks); | 1646 | num_allocator_blocks); |
| 817 | R_TRY(allocator_result); | 1647 | R_TRY(allocator_result); |
| 818 | 1648 | ||
| 1649 | // We're going to perform an update, so create a helper. | ||
| 1650 | KScopedPageTableUpdater updater(this); | ||
| 1651 | |||
| 1652 | // Prepare to iterate over the memory. | ||
| 1653 | auto pg_it = pg.Nodes().begin(); | ||
| 1654 | PAddr pg_phys_addr = pg_it->GetAddress(); | ||
| 1655 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 1656 | |||
| 819 | // Reset the current tracking address, and make sure we clean up on failure. | 1657 | // Reset the current tracking address, and make sure we clean up on failure. |
| 1658 | // pg_guard.Cancel(); | ||
| 820 | cur_address = address; | 1659 | cur_address = address; |
| 821 | auto unmap_guard = detail::ScopeExit([&] { | 1660 | ON_RESULT_FAILURE { |
| 822 | if (cur_address > address) { | 1661 | if (cur_address > address) { |
| 823 | const VAddr last_unmap_address = cur_address - 1; | 1662 | const VAddr last_unmap_address = cur_address - 1; |
| 824 | 1663 | ||
| @@ -841,6 +1680,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 841 | last_unmap_address + 1 - cur_address) / | 1680 | last_unmap_address + 1 - cur_address) / |
| 842 | PageSize; | 1681 | PageSize; |
| 843 | 1682 | ||
| 1683 | // HACK: Manually close the pages. | ||
| 1684 | HACK_ClosePages(cur_address, cur_pages); | ||
| 1685 | |||
| 844 | // Unmap. | 1686 | // Unmap. |
| 845 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, | 1687 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, |
| 846 | OperationType::Unmap) | 1688 | OperationType::Unmap) |
| @@ -857,12 +1699,17 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 857 | ++it; | 1699 | ++it; |
| 858 | } | 1700 | } |
| 859 | } | 1701 | } |
| 860 | }); | ||
| 861 | 1702 | ||
| 862 | // Iterate over the memory. | 1703 | // Release any remaining unmapped memory. |
| 863 | auto pg_it = pg.Nodes().begin(); | 1704 | m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages); |
| 864 | PAddr pg_phys_addr = pg_it->GetAddress(); | 1705 | m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages); |
| 865 | size_t pg_pages = pg_it->GetNumPages(); | 1706 | for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) { |
| 1707 | m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(), | ||
| 1708 | pg_it->GetNumPages()); | ||
| 1709 | m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(), | ||
| 1710 | pg_it->GetNumPages()); | ||
| 1711 | } | ||
| 1712 | }; | ||
| 866 | 1713 | ||
| 867 | auto it = m_memory_block_manager.FindIterator(cur_address); | 1714 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| 868 | while (true) { | 1715 | while (true) { |
| @@ -897,6 +1744,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 897 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, | 1744 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, |
| 898 | OperationType::Map, pg_phys_addr)); | 1745 | OperationType::Map, pg_phys_addr)); |
| 899 | 1746 | ||
| 1747 | // HACK: Manually open the pages. | ||
| 1748 | HACK_OpenPages(pg_phys_addr, cur_pages); | ||
| 1749 | |||
| 900 | // Advance. | 1750 | // Advance. |
| 901 | cur_address += cur_pages * PageSize; | 1751 | cur_address += cur_pages * PageSize; |
| 902 | map_pages -= cur_pages; | 1752 | map_pages -= cur_pages; |
| @@ -928,9 +1778,6 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 928 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, | 1778 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, |
| 929 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None); | 1779 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None); |
| 930 | 1780 | ||
| 931 | // Cancel our guard. | ||
| 932 | unmap_guard.Cancel(); | ||
| 933 | |||
| 934 | R_SUCCEED(); | 1781 | R_SUCCEED(); |
| 935 | } | 1782 | } |
| 936 | } | 1783 | } |
| @@ -939,7 +1786,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 939 | 1786 | ||
| 940 | Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | 1787 | Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { |
| 941 | // Lock the physical memory lock. | 1788 | // Lock the physical memory lock. |
| 942 | KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); | 1789 | KScopedLightLock phys_lk(m_map_physical_memory_lock); |
| 943 | 1790 | ||
| 944 | // Lock the table. | 1791 | // Lock the table. |
| 945 | KScopedLightLock lk(m_general_lock); | 1792 | KScopedLightLock lk(m_general_lock); |
| @@ -948,8 +1795,11 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 948 | const VAddr last_address = address + size - 1; | 1795 | const VAddr last_address = address + size - 1; |
| 949 | 1796 | ||
| 950 | // Define iteration variables. | 1797 | // Define iteration variables. |
| 951 | VAddr cur_address = 0; | 1798 | VAddr map_start_address = 0; |
| 952 | size_t mapped_size = 0; | 1799 | VAddr map_last_address = 0; |
| 1800 | |||
| 1801 | VAddr cur_address; | ||
| 1802 | size_t mapped_size; | ||
| 953 | size_t num_allocator_blocks = 0; | 1803 | size_t num_allocator_blocks = 0; |
| 954 | 1804 | ||
| 955 | // Check if the memory is mapped. | 1805 | // Check if the memory is mapped. |
| @@ -975,27 +1825,27 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 975 | if (is_normal) { | 1825 | if (is_normal) { |
| 976 | R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory); | 1826 | R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory); |
| 977 | 1827 | ||
| 1828 | if (map_start_address == 0) { | ||
| 1829 | map_start_address = cur_address; | ||
| 1830 | } | ||
| 1831 | map_last_address = | ||
| 1832 | (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address; | ||
| 1833 | |||
| 978 | if (info.GetAddress() < address) { | 1834 | if (info.GetAddress() < address) { |
| 979 | ++num_allocator_blocks; | 1835 | ++num_allocator_blocks; |
| 980 | } | 1836 | } |
| 981 | if (last_address < info.GetLastAddress()) { | 1837 | if (last_address < info.GetLastAddress()) { |
| 982 | ++num_allocator_blocks; | 1838 | ++num_allocator_blocks; |
| 983 | } | 1839 | } |
| 1840 | |||
| 1841 | mapped_size += (map_last_address + 1 - cur_address); | ||
| 984 | } | 1842 | } |
| 985 | 1843 | ||
| 986 | // Check if we're done. | 1844 | // Check if we're done. |
| 987 | if (last_address <= info.GetLastAddress()) { | 1845 | if (last_address <= info.GetLastAddress()) { |
| 988 | if (is_normal) { | ||
| 989 | mapped_size += (last_address + 1 - cur_address); | ||
| 990 | } | ||
| 991 | break; | 1846 | break; |
| 992 | } | 1847 | } |
| 993 | 1848 | ||
| 994 | // Track the memory if it's mapped. | ||
| 995 | if (is_normal) { | ||
| 996 | mapped_size += VAddr(info.GetEndAddress()) - cur_address; | ||
| 997 | } | ||
| 998 | |||
| 999 | // Advance. | 1849 | // Advance. |
| 1000 | cur_address = info.GetEndAddress(); | 1850 | cur_address = info.GetEndAddress(); |
| 1001 | ++it; | 1851 | ++it; |
| @@ -1005,125 +1855,22 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 1005 | R_SUCCEED_IF(mapped_size == 0); | 1855 | R_SUCCEED_IF(mapped_size == 0); |
| 1006 | } | 1856 | } |
| 1007 | 1857 | ||
| 1008 | // Make a page group for the unmap region. | ||
| 1009 | KPageGroup pg; | ||
| 1010 | { | ||
| 1011 | auto& impl = this->PageTableImpl(); | ||
| 1012 | |||
| 1013 | // Begin traversal. | ||
| 1014 | Common::PageTable::TraversalContext context; | ||
| 1015 | Common::PageTable::TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0}; | ||
| 1016 | bool cur_valid = false; | ||
| 1017 | Common::PageTable::TraversalEntry next_entry; | ||
| 1018 | bool next_valid = false; | ||
| 1019 | size_t tot_size = 0; | ||
| 1020 | |||
| 1021 | cur_address = address; | ||
| 1022 | next_valid = impl.BeginTraversal(next_entry, context, cur_address); | ||
| 1023 | next_entry.block_size = | ||
| 1024 | (next_entry.block_size - (next_entry.phys_addr & (next_entry.block_size - 1))); | ||
| 1025 | |||
| 1026 | // Iterate, building the group. | ||
| 1027 | while (true) { | ||
| 1028 | if ((!next_valid && !cur_valid) || | ||
| 1029 | (next_valid && cur_valid && | ||
| 1030 | next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) { | ||
| 1031 | cur_entry.block_size += next_entry.block_size; | ||
| 1032 | } else { | ||
| 1033 | if (cur_valid) { | ||
| 1034 | // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr)); | ||
| 1035 | R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize)); | ||
| 1036 | } | ||
| 1037 | |||
| 1038 | // Update tracking variables. | ||
| 1039 | tot_size += cur_entry.block_size; | ||
| 1040 | cur_entry = next_entry; | ||
| 1041 | cur_valid = next_valid; | ||
| 1042 | } | ||
| 1043 | |||
| 1044 | if (cur_entry.block_size + tot_size >= size) { | ||
| 1045 | break; | ||
| 1046 | } | ||
| 1047 | |||
| 1048 | next_valid = impl.ContinueTraversal(next_entry, context); | ||
| 1049 | } | ||
| 1050 | |||
| 1051 | // Add the last block. | ||
| 1052 | if (cur_valid) { | ||
| 1053 | // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr)); | ||
| 1054 | R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize)); | ||
| 1055 | } | ||
| 1056 | } | ||
| 1057 | ASSERT(pg.GetNumPages() == mapped_size / PageSize); | ||
| 1058 | |||
| 1059 | // Create an update allocator. | 1858 | // Create an update allocator. |
| 1060 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | 1859 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); |
| 1061 | Result allocator_result{ResultSuccess}; | 1860 | Result allocator_result; |
| 1062 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 1861 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1063 | m_memory_block_slab_manager, num_allocator_blocks); | 1862 | m_memory_block_slab_manager, num_allocator_blocks); |
| 1064 | R_TRY(allocator_result); | 1863 | R_TRY(allocator_result); |
| 1065 | 1864 | ||
| 1066 | // Reset the current tracking address, and make sure we clean up on failure. | 1865 | // We're going to perform an update, so create a helper. |
| 1067 | cur_address = address; | 1866 | KScopedPageTableUpdater updater(this); |
| 1068 | auto remap_guard = detail::ScopeExit([&] { | ||
| 1069 | if (cur_address > address) { | ||
| 1070 | const VAddr last_map_address = cur_address - 1; | ||
| 1071 | cur_address = address; | ||
| 1072 | |||
| 1073 | // Iterate over the memory we unmapped. | ||
| 1074 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 1075 | auto pg_it = pg.Nodes().begin(); | ||
| 1076 | PAddr pg_phys_addr = pg_it->GetAddress(); | ||
| 1077 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 1078 | |||
| 1079 | while (true) { | ||
| 1080 | // Get the memory info for the pages we unmapped, convert to property. | ||
| 1081 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1082 | |||
| 1083 | // If the memory is normal, we unmapped it and need to re-map it. | ||
| 1084 | if (info.GetState() == KMemoryState::Normal) { | ||
| 1085 | // Determine the range to map. | ||
| 1086 | size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address, | ||
| 1087 | last_map_address + 1 - cur_address) / | ||
| 1088 | PageSize; | ||
| 1089 | |||
| 1090 | // While we have pages to map, map them. | ||
| 1091 | while (map_pages > 0) { | ||
| 1092 | // Check if we're at the end of the physical block. | ||
| 1093 | if (pg_pages == 0) { | ||
| 1094 | // Ensure there are more pages to map. | ||
| 1095 | ASSERT(pg_it != pg.Nodes().end()); | ||
| 1096 | |||
| 1097 | // Advance our physical block. | ||
| 1098 | ++pg_it; | ||
| 1099 | pg_phys_addr = pg_it->GetAddress(); | ||
| 1100 | pg_pages = pg_it->GetNumPages(); | ||
| 1101 | } | ||
| 1102 | |||
| 1103 | // Map whatever we can. | ||
| 1104 | const size_t cur_pages = std::min(pg_pages, map_pages); | ||
| 1105 | ASSERT(this->Operate(cur_address, cur_pages, info.GetPermission(), | ||
| 1106 | OperationType::Map, pg_phys_addr) == ResultSuccess); | ||
| 1107 | 1867 | ||
| 1108 | // Advance. | 1868 | // Separate the mapping. |
| 1109 | cur_address += cur_pages * PageSize; | 1869 | R_TRY(Operate(map_start_address, (map_last_address + 1 - map_start_address) / PageSize, |
| 1110 | map_pages -= cur_pages; | 1870 | KMemoryPermission::None, OperationType::Separate)); |
| 1111 | 1871 | ||
| 1112 | pg_phys_addr += cur_pages * PageSize; | 1872 | // Reset the current tracking address, and make sure we clean up on failure. |
| 1113 | pg_pages -= cur_pages; | 1873 | cur_address = address; |
| 1114 | } | ||
| 1115 | } | ||
| 1116 | |||
| 1117 | // Check if we're done. | ||
| 1118 | if (last_map_address <= info.GetLastAddress()) { | ||
| 1119 | break; | ||
| 1120 | } | ||
| 1121 | |||
| 1122 | // Advance. | ||
| 1123 | ++it; | ||
| 1124 | } | ||
| 1125 | } | ||
| 1126 | }); | ||
| 1127 | 1874 | ||
| 1128 | // Iterate over the memory, unmapping as we go. | 1875 | // Iterate over the memory, unmapping as we go. |
| 1129 | auto it = m_memory_block_manager.FindIterator(cur_address); | 1876 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| @@ -1141,8 +1888,12 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 1141 | last_address + 1 - cur_address) / | 1888 | last_address + 1 - cur_address) / |
| 1142 | PageSize; | 1889 | PageSize; |
| 1143 | 1890 | ||
| 1891 | // HACK: Manually close the pages. | ||
| 1892 | HACK_ClosePages(cur_address, cur_pages); | ||
| 1893 | |||
| 1144 | // Unmap. | 1894 | // Unmap. |
| 1145 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)); | 1895 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) |
| 1896 | .IsSuccess()); | ||
| 1146 | } | 1897 | } |
| 1147 | 1898 | ||
| 1148 | // Check if we're done. | 1899 | // Check if we're done. |
| @@ -1157,8 +1908,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 1157 | 1908 | ||
| 1158 | // Release the memory resource. | 1909 | // Release the memory resource. |
| 1159 | m_mapped_physical_memory_size -= mapped_size; | 1910 | m_mapped_physical_memory_size -= mapped_size; |
| 1160 | auto process{m_system.Kernel().CurrentProcess()}; | 1911 | m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, mapped_size); |
| 1161 | process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); | ||
| 1162 | 1912 | ||
| 1163 | // Update memory blocks. | 1913 | // Update memory blocks. |
| 1164 | m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, | 1914 | m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, |
| @@ -1166,14 +1916,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 1166 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | 1916 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, |
| 1167 | KMemoryBlockDisableMergeAttribute::None); | 1917 | KMemoryBlockDisableMergeAttribute::None); |
| 1168 | 1918 | ||
| 1169 | // TODO(bunnei): This is a workaround until the next set of changes, where we add reference | ||
| 1170 | // counting for mapped pages. Until then, we must manually close the reference to the page | ||
| 1171 | // group. | ||
| 1172 | m_system.Kernel().MemoryManager().Close(pg); | ||
| 1173 | |||
| 1174 | // We succeeded. | 1919 | // We succeeded. |
| 1175 | remap_guard.Cancel(); | ||
| 1176 | |||
| 1177 | R_SUCCEED(); | 1920 | R_SUCCEED(); |
| 1178 | } | 1921 | } |
| 1179 | 1922 | ||
| @@ -1749,8 +2492,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) { | |||
| 1749 | OperationType::Unmap)); | 2492 | OperationType::Unmap)); |
| 1750 | 2493 | ||
| 1751 | // Release the memory from the resource limit. | 2494 | // Release the memory from the resource limit. |
| 1752 | m_system.Kernel().CurrentProcess()->GetResourceLimit()->Release( | 2495 | m_resource_limit->Release(LimitableResource::PhysicalMemoryMax, num_pages * PageSize); |
| 1753 | LimitableResource::PhysicalMemory, num_pages * PageSize); | ||
| 1754 | 2496 | ||
| 1755 | // Apply the memory block update. | 2497 | // Apply the memory block update. |
| 1756 | m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, | 2498 | m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, |
| @@ -1780,8 +2522,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) { | |||
| 1780 | 2522 | ||
| 1781 | // Reserve memory for the heap extension. | 2523 | // Reserve memory for the heap extension. |
| 1782 | KScopedResourceReservation memory_reservation( | 2524 | KScopedResourceReservation memory_reservation( |
| 1783 | m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, | 2525 | m_resource_limit, LimitableResource::PhysicalMemoryMax, allocation_size); |
| 1784 | allocation_size); | ||
| 1785 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 2526 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 1786 | 2527 | ||
| 1787 | // Allocate pages for the heap extension. | 2528 | // Allocate pages for the heap extension. |
| @@ -1869,7 +2610,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_ | |||
| 1869 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); | 2610 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); |
| 1870 | } else { | 2611 | } else { |
| 1871 | KPageGroup page_group; | 2612 | KPageGroup page_group; |
| 1872 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( | 2613 | R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( |
| 1873 | &page_group, needed_num_pages, | 2614 | &page_group, needed_num_pages, |
| 1874 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); | 2615 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); |
| 1875 | R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); | 2616 | R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); |
| @@ -1883,8 +2624,9 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_ | |||
| 1883 | return addr; | 2624 | return addr; |
| 1884 | } | 2625 | } |
| 1885 | 2626 | ||
| 1886 | Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, | 2627 | Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, |
| 1887 | bool is_aligned) { | 2628 | KMemoryPermission perm, bool is_aligned, |
| 2629 | bool check_heap) { | ||
| 1888 | // Lightly validate the range before doing anything else. | 2630 | // Lightly validate the range before doing anything else. |
| 1889 | const size_t num_pages = size / PageSize; | 2631 | const size_t num_pages = size / PageSize; |
| 1890 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | 2632 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); |
| @@ -1894,15 +2636,18 @@ Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMem | |||
| 1894 | 2636 | ||
| 1895 | // Check the memory state. | 2637 | // Check the memory state. |
| 1896 | const auto test_state = | 2638 | const auto test_state = |
| 1897 | (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap); | 2639 | (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) | |
| 2640 | (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None); | ||
| 1898 | size_t num_allocator_blocks; | 2641 | size_t num_allocator_blocks; |
| 1899 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state, | 2642 | KMemoryState old_state; |
| 2643 | R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr, | ||
| 2644 | std::addressof(num_allocator_blocks), address, size, test_state, | ||
| 1900 | test_state, perm, perm, | 2645 | test_state, perm, perm, |
| 1901 | KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, | 2646 | KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, |
| 1902 | KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); | 2647 | KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); |
| 1903 | 2648 | ||
| 1904 | // Create an update allocator. | 2649 | // Create an update allocator. |
| 1905 | Result allocator_result{ResultSuccess}; | 2650 | Result allocator_result; |
| 1906 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 2651 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1907 | m_memory_block_slab_manager, num_allocator_blocks); | 2652 | m_memory_block_slab_manager, num_allocator_blocks); |
| 1908 | R_TRY(allocator_result); | 2653 | R_TRY(allocator_result); |
| @@ -1911,10 +2656,13 @@ Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMem | |||
| 1911 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, | 2656 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, |
| 1912 | &KMemoryBlock::ShareToDevice, KMemoryPermission::None); | 2657 | &KMemoryBlock::ShareToDevice, KMemoryPermission::None); |
| 1913 | 2658 | ||
| 2659 | // Set whether the locked memory was io. | ||
| 2660 | *out_is_io = old_state == KMemoryState::Io; | ||
| 2661 | |||
| 1914 | R_SUCCEED(); | 2662 | R_SUCCEED(); |
| 1915 | } | 2663 | } |
| 1916 | 2664 | ||
| 1917 | Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) { | 2665 | Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap) { |
| 1918 | // Lightly validate the range before doing anything else. | 2666 | // Lightly validate the range before doing anything else. |
| 1919 | const size_t num_pages = size / PageSize; | 2667 | const size_t num_pages = size / PageSize; |
| 1920 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | 2668 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); |
| @@ -1923,16 +2671,16 @@ Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) { | |||
| 1923 | KScopedLightLock lk(m_general_lock); | 2671 | KScopedLightLock lk(m_general_lock); |
| 1924 | 2672 | ||
| 1925 | // Check the memory state. | 2673 | // Check the memory state. |
| 2674 | const auto test_state = KMemoryState::FlagCanDeviceMap | | ||
| 2675 | (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None); | ||
| 1926 | size_t num_allocator_blocks; | 2676 | size_t num_allocator_blocks; |
| 1927 | R_TRY(this->CheckMemoryStateContiguous( | 2677 | R_TRY(this->CheckMemoryStateContiguous( |
| 1928 | std::addressof(num_allocator_blocks), address, size, | 2678 | std::addressof(num_allocator_blocks), address, size, test_state, test_state, |
| 1929 | KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, | ||
| 1930 | KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, | ||
| 1931 | KMemoryPermission::None, KMemoryPermission::None, | 2679 | KMemoryPermission::None, KMemoryPermission::None, |
| 1932 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); | 2680 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); |
| 1933 | 2681 | ||
| 1934 | // Create an update allocator. | 2682 | // Create an update allocator. |
| 1935 | Result allocator_result{ResultSuccess}; | 2683 | Result allocator_result; |
| 1936 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 2684 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1937 | m_memory_block_slab_manager, num_allocator_blocks); | 2685 | m_memory_block_slab_manager, num_allocator_blocks); |
| 1938 | R_TRY(allocator_result); | 2686 | R_TRY(allocator_result); |
| @@ -1976,13 +2724,28 @@ Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) { | |||
| 1976 | R_SUCCEED(); | 2724 | R_SUCCEED(); |
| 1977 | } | 2725 | } |
| 1978 | 2726 | ||
| 2727 | Result KPageTable::LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size) { | ||
| 2728 | R_RETURN(this->LockMemoryAndOpen( | ||
| 2729 | nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer, | ||
| 2730 | KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All, | ||
| 2731 | KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None, | ||
| 2732 | KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite, | ||
| 2733 | KMemoryAttribute::Locked)); | ||
| 2734 | } | ||
| 2735 | |||
| 2736 | Result KPageTable::UnlockForIpcUserBuffer(VAddr address, size_t size) { | ||
| 2737 | R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer, | ||
| 2738 | KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None, | ||
| 2739 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 2740 | KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, | ||
| 2741 | KMemoryAttribute::Locked, nullptr)); | ||
| 2742 | } | ||
| 2743 | |||
| 1979 | Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) { | 2744 | Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) { |
| 1980 | R_RETURN(this->LockMemoryAndOpen( | 2745 | R_RETURN(this->LockMemoryAndOpen( |
| 1981 | out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, | 2746 | out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, |
| 1982 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, | 2747 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, |
| 1983 | KMemoryAttribute::None, | 2748 | KMemoryAttribute::None, KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite, |
| 1984 | static_cast<KMemoryPermission>(KMemoryPermission::NotMapped | | ||
| 1985 | KMemoryPermission::KernelReadWrite), | ||
| 1986 | KMemoryAttribute::Locked)); | 2749 | KMemoryAttribute::Locked)); |
| 1987 | } | 2750 | } |
| 1988 | 2751 | ||
| @@ -2066,6 +2829,10 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, | |||
| 2066 | m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); | 2829 | m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); |
| 2067 | break; | 2830 | break; |
| 2068 | } | 2831 | } |
| 2832 | case OperationType::Separate: { | ||
| 2833 | // HACK: Unimplemented. | ||
| 2834 | break; | ||
| 2835 | } | ||
| 2069 | case OperationType::ChangePermissions: | 2836 | case OperationType::ChangePermissions: |
| 2070 | case OperationType::ChangePermissionsAndRefresh: | 2837 | case OperationType::ChangePermissionsAndRefresh: |
| 2071 | break; | 2838 | break; |
| @@ -2075,6 +2842,17 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, | |||
| 2075 | R_SUCCEED(); | 2842 | R_SUCCEED(); |
| 2076 | } | 2843 | } |
| 2077 | 2844 | ||
| 2845 | void KPageTable::FinalizeUpdate(PageLinkedList* page_list) { | ||
| 2846 | while (page_list->Peek()) { | ||
| 2847 | [[maybe_unused]] auto page = page_list->Pop(); | ||
| 2848 | |||
| 2849 | // TODO(bunnei): Free pages once they are allocated in guest memory | ||
| 2850 | // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page)); | ||
| 2851 | // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0); | ||
| 2852 | // this->GetPageTableManager().Free(page); | ||
| 2853 | } | ||
| 2854 | } | ||
| 2855 | |||
| 2078 | VAddr KPageTable::GetRegionAddress(KMemoryState state) const { | 2856 | VAddr KPageTable::GetRegionAddress(KMemoryState state) const { |
| 2079 | switch (state) { | 2857 | switch (state) { |
| 2080 | case KMemoryState::Free: | 2858 | case KMemoryState::Free: |
| @@ -2101,6 +2879,7 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const { | |||
| 2101 | case KMemoryState::GeneratedCode: | 2879 | case KMemoryState::GeneratedCode: |
| 2102 | case KMemoryState::CodeOut: | 2880 | case KMemoryState::CodeOut: |
| 2103 | case KMemoryState::Coverage: | 2881 | case KMemoryState::Coverage: |
| 2882 | case KMemoryState::Insecure: | ||
| 2104 | return m_alias_code_region_start; | 2883 | return m_alias_code_region_start; |
| 2105 | case KMemoryState::Code: | 2884 | case KMemoryState::Code: |
| 2106 | case KMemoryState::CodeData: | 2885 | case KMemoryState::CodeData: |
| @@ -2136,6 +2915,7 @@ size_t KPageTable::GetRegionSize(KMemoryState state) const { | |||
| 2136 | case KMemoryState::GeneratedCode: | 2915 | case KMemoryState::GeneratedCode: |
| 2137 | case KMemoryState::CodeOut: | 2916 | case KMemoryState::CodeOut: |
| 2138 | case KMemoryState::Coverage: | 2917 | case KMemoryState::Coverage: |
| 2918 | case KMemoryState::Insecure: | ||
| 2139 | return m_alias_code_region_end - m_alias_code_region_start; | 2919 | return m_alias_code_region_end - m_alias_code_region_start; |
| 2140 | case KMemoryState::Code: | 2920 | case KMemoryState::Code: |
| 2141 | case KMemoryState::CodeData: | 2921 | case KMemoryState::CodeData: |
| @@ -2177,6 +2957,7 @@ bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const { | |||
| 2177 | case KMemoryState::GeneratedCode: | 2957 | case KMemoryState::GeneratedCode: |
| 2178 | case KMemoryState::CodeOut: | 2958 | case KMemoryState::CodeOut: |
| 2179 | case KMemoryState::Coverage: | 2959 | case KMemoryState::Coverage: |
| 2960 | case KMemoryState::Insecure: | ||
| 2180 | return is_in_region && !is_in_heap && !is_in_alias; | 2961 | return is_in_region && !is_in_heap && !is_in_alias; |
| 2181 | case KMemoryState::Normal: | 2962 | case KMemoryState::Normal: |
| 2182 | ASSERT(is_in_heap); | 2963 | ASSERT(is_in_heap); |
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index c6aeacd96..950850291 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include "core/hle/kernel/k_memory_layout.h" | 16 | #include "core/hle/kernel/k_memory_layout.h" |
| 17 | #include "core/hle/kernel/k_memory_manager.h" | 17 | #include "core/hle/kernel/k_memory_manager.h" |
| 18 | #include "core/hle/result.h" | 18 | #include "core/hle/result.h" |
| 19 | #include "core/memory.h" | ||
| 19 | 20 | ||
| 20 | namespace Core { | 21 | namespace Core { |
| 21 | class System; | 22 | class System; |
| @@ -23,7 +24,10 @@ class System; | |||
| 23 | 24 | ||
| 24 | namespace Kernel { | 25 | namespace Kernel { |
| 25 | 26 | ||
| 27 | class KBlockInfoManager; | ||
| 26 | class KMemoryBlockManager; | 28 | class KMemoryBlockManager; |
| 29 | class KResourceLimit; | ||
| 30 | class KSystemResource; | ||
| 27 | 31 | ||
| 28 | class KPageTable final { | 32 | class KPageTable final { |
| 29 | public: | 33 | public: |
| @@ -36,9 +40,9 @@ public: | |||
| 36 | ~KPageTable(); | 40 | ~KPageTable(); |
| 37 | 41 | ||
| 38 | Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, | 42 | Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, |
| 39 | VAddr code_addr, size_t code_size, | 43 | bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, |
| 40 | KMemoryBlockSlabManager* mem_block_slab_manager, | 44 | VAddr code_addr, size_t code_size, KSystemResource* system_resource, |
| 41 | KMemoryManager::Pool pool); | 45 | KResourceLimit* resource_limit); |
| 42 | 46 | ||
| 43 | void Finalize(); | 47 | void Finalize(); |
| 44 | 48 | ||
| @@ -74,12 +78,20 @@ public: | |||
| 74 | KMemoryState state, KMemoryPermission perm, | 78 | KMemoryState state, KMemoryPermission perm, |
| 75 | PAddr map_addr = 0); | 79 | PAddr map_addr = 0); |
| 76 | 80 | ||
| 77 | Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, | 81 | Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, |
| 78 | bool is_aligned); | 82 | KMemoryPermission perm, bool is_aligned, bool check_heap); |
| 79 | Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size); | 83 | Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap); |
| 80 | 84 | ||
| 81 | Result UnlockForDeviceAddressSpace(VAddr addr, size_t size); | 85 | Result UnlockForDeviceAddressSpace(VAddr addr, size_t size); |
| 82 | 86 | ||
| 87 | Result LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size); | ||
| 88 | Result UnlockForIpcUserBuffer(VAddr address, size_t size); | ||
| 89 | |||
| 90 | Result SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr, KPageTable& src_page_table, | ||
| 91 | KMemoryPermission test_perm, KMemoryState dst_state, bool send); | ||
| 92 | Result CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state); | ||
| 93 | Result CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state); | ||
| 94 | |||
| 83 | Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size); | 95 | Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size); |
| 84 | Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg); | 96 | Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg); |
| 85 | Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, | 97 | Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, |
| @@ -97,13 +109,54 @@ public: | |||
| 97 | 109 | ||
| 98 | bool CanContain(VAddr addr, size_t size, KMemoryState state) const; | 110 | bool CanContain(VAddr addr, size_t size, KMemoryState state) const; |
| 99 | 111 | ||
| 112 | protected: | ||
| 113 | struct PageLinkedList { | ||
| 114 | private: | ||
| 115 | struct Node { | ||
| 116 | Node* m_next; | ||
| 117 | std::array<u8, PageSize - sizeof(Node*)> m_buffer; | ||
| 118 | }; | ||
| 119 | |||
| 120 | public: | ||
| 121 | constexpr PageLinkedList() = default; | ||
| 122 | |||
| 123 | void Push(Node* n) { | ||
| 124 | ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize)); | ||
| 125 | n->m_next = m_root; | ||
| 126 | m_root = n; | ||
| 127 | } | ||
| 128 | |||
| 129 | void Push(Core::Memory::Memory& memory, VAddr addr) { | ||
| 130 | this->Push(memory.GetPointer<Node>(addr)); | ||
| 131 | } | ||
| 132 | |||
| 133 | Node* Peek() const { | ||
| 134 | return m_root; | ||
| 135 | } | ||
| 136 | |||
| 137 | Node* Pop() { | ||
| 138 | Node* const r = m_root; | ||
| 139 | |||
| 140 | m_root = r->m_next; | ||
| 141 | r->m_next = nullptr; | ||
| 142 | |||
| 143 | return r; | ||
| 144 | } | ||
| 145 | |||
| 146 | private: | ||
| 147 | Node* m_root{}; | ||
| 148 | }; | ||
| 149 | static_assert(std::is_trivially_destructible<PageLinkedList>::value); | ||
| 150 | |||
| 100 | private: | 151 | private: |
| 101 | enum class OperationType : u32 { | 152 | enum class OperationType : u32 { |
| 102 | Map, | 153 | Map = 0, |
| 103 | MapGroup, | 154 | MapFirst = 1, |
| 104 | Unmap, | 155 | MapGroup = 2, |
| 105 | ChangePermissions, | 156 | Unmap = 3, |
| 106 | ChangePermissionsAndRefresh, | 157 | ChangePermissions = 4, |
| 158 | ChangePermissionsAndRefresh = 5, | ||
| 159 | Separate = 6, | ||
| 107 | }; | 160 | }; |
| 108 | 161 | ||
| 109 | static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = | 162 | static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = |
| @@ -123,6 +176,7 @@ private: | |||
| 123 | OperationType operation); | 176 | OperationType operation); |
| 124 | Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, | 177 | Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, |
| 125 | PAddr map_addr = 0); | 178 | PAddr map_addr = 0); |
| 179 | void FinalizeUpdate(PageLinkedList* page_list); | ||
| 126 | VAddr GetRegionAddress(KMemoryState state) const; | 180 | VAddr GetRegionAddress(KMemoryState state) const; |
| 127 | size_t GetRegionSize(KMemoryState state) const; | 181 | size_t GetRegionSize(KMemoryState state) const; |
| 128 | 182 | ||
| @@ -199,6 +253,18 @@ private: | |||
| 199 | return *out != 0; | 253 | return *out != 0; |
| 200 | } | 254 | } |
| 201 | 255 | ||
| 256 | Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, VAddr address, | ||
| 257 | size_t size, KMemoryPermission test_perm, KMemoryState dst_state); | ||
| 258 | Result SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr, | ||
| 259 | KMemoryPermission test_perm, KMemoryState dst_state, | ||
| 260 | KPageTable& src_page_table, bool send); | ||
| 261 | void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, | ||
| 262 | size_t size, KMemoryPermission prot_perm); | ||
| 263 | |||
| 264 | // HACK: These will be removed once we automatically manage page reference counts. | ||
| 265 | void HACK_OpenPages(PAddr phys_addr, size_t num_pages); | ||
| 266 | void HACK_ClosePages(VAddr virt_addr, size_t num_pages); | ||
| 267 | |||
| 202 | mutable KLightLock m_general_lock; | 268 | mutable KLightLock m_general_lock; |
| 203 | mutable KLightLock m_map_physical_memory_lock; | 269 | mutable KLightLock m_map_physical_memory_lock; |
| 204 | 270 | ||
| @@ -316,6 +382,31 @@ public: | |||
| 316 | addr + size - 1 <= m_address_space_end - 1; | 382 | addr + size - 1 <= m_address_space_end - 1; |
| 317 | } | 383 | } |
| 318 | 384 | ||
| 385 | public: | ||
| 386 | static VAddr GetLinearMappedVirtualAddress(const KMemoryLayout& layout, PAddr addr) { | ||
| 387 | return layout.GetLinearVirtualAddress(addr); | ||
| 388 | } | ||
| 389 | |||
| 390 | static PAddr GetLinearMappedPhysicalAddress(const KMemoryLayout& layout, VAddr addr) { | ||
| 391 | return layout.GetLinearPhysicalAddress(addr); | ||
| 392 | } | ||
| 393 | |||
| 394 | static VAddr GetHeapVirtualAddress(const KMemoryLayout& layout, PAddr addr) { | ||
| 395 | return GetLinearMappedVirtualAddress(layout, addr); | ||
| 396 | } | ||
| 397 | |||
| 398 | static PAddr GetHeapPhysicalAddress(const KMemoryLayout& layout, VAddr addr) { | ||
| 399 | return GetLinearMappedPhysicalAddress(layout, addr); | ||
| 400 | } | ||
| 401 | |||
| 402 | static VAddr GetPageTableVirtualAddress(const KMemoryLayout& layout, PAddr addr) { | ||
| 403 | return GetLinearMappedVirtualAddress(layout, addr); | ||
| 404 | } | ||
| 405 | |||
| 406 | static PAddr GetPageTablePhysicalAddress(const KMemoryLayout& layout, VAddr addr) { | ||
| 407 | return GetLinearMappedPhysicalAddress(layout, addr); | ||
| 408 | } | ||
| 409 | |||
| 319 | private: | 410 | private: |
| 320 | constexpr bool IsKernel() const { | 411 | constexpr bool IsKernel() const { |
| 321 | return m_is_kernel; | 412 | return m_is_kernel; |
| @@ -331,6 +422,24 @@ private: | |||
| 331 | } | 422 | } |
| 332 | 423 | ||
| 333 | private: | 424 | private: |
| 425 | class KScopedPageTableUpdater { | ||
| 426 | private: | ||
| 427 | KPageTable* m_pt{}; | ||
| 428 | PageLinkedList m_ll; | ||
| 429 | |||
| 430 | public: | ||
| 431 | explicit KScopedPageTableUpdater(KPageTable* pt) : m_pt(pt) {} | ||
| 432 | explicit KScopedPageTableUpdater(KPageTable& pt) : KScopedPageTableUpdater(&pt) {} | ||
| 433 | ~KScopedPageTableUpdater() { | ||
| 434 | m_pt->FinalizeUpdate(this->GetPageList()); | ||
| 435 | } | ||
| 436 | |||
| 437 | PageLinkedList* GetPageList() { | ||
| 438 | return &m_ll; | ||
| 439 | } | ||
| 440 | }; | ||
| 441 | |||
| 442 | private: | ||
| 334 | VAddr m_address_space_start{}; | 443 | VAddr m_address_space_start{}; |
| 335 | VAddr m_address_space_end{}; | 444 | VAddr m_address_space_end{}; |
| 336 | VAddr m_heap_region_start{}; | 445 | VAddr m_heap_region_start{}; |
| @@ -347,20 +456,27 @@ private: | |||
| 347 | VAddr m_alias_code_region_start{}; | 456 | VAddr m_alias_code_region_start{}; |
| 348 | VAddr m_alias_code_region_end{}; | 457 | VAddr m_alias_code_region_end{}; |
| 349 | 458 | ||
| 350 | size_t m_mapped_physical_memory_size{}; | ||
| 351 | size_t m_max_heap_size{}; | 459 | size_t m_max_heap_size{}; |
| 352 | size_t m_max_physical_memory_size{}; | 460 | size_t m_mapped_physical_memory_size{}; |
| 461 | size_t m_mapped_unsafe_physical_memory{}; | ||
| 462 | size_t m_mapped_insecure_memory{}; | ||
| 463 | size_t m_mapped_ipc_server_memory{}; | ||
| 353 | size_t m_address_space_width{}; | 464 | size_t m_address_space_width{}; |
| 354 | 465 | ||
| 355 | KMemoryBlockManager m_memory_block_manager; | 466 | KMemoryBlockManager m_memory_block_manager; |
| 467 | u32 m_allocate_option{}; | ||
| 356 | 468 | ||
| 357 | bool m_is_kernel{}; | 469 | bool m_is_kernel{}; |
| 358 | bool m_enable_aslr{}; | 470 | bool m_enable_aslr{}; |
| 359 | bool m_enable_device_address_space_merge{}; | 471 | bool m_enable_device_address_space_merge{}; |
| 360 | 472 | ||
| 361 | KMemoryBlockSlabManager* m_memory_block_slab_manager{}; | 473 | KMemoryBlockSlabManager* m_memory_block_slab_manager{}; |
| 474 | KBlockInfoManager* m_block_info_manager{}; | ||
| 475 | KResourceLimit* m_resource_limit{}; | ||
| 362 | 476 | ||
| 363 | u32 m_heap_fill_value{}; | 477 | u32 m_heap_fill_value{}; |
| 478 | u32 m_ipc_fill_value{}; | ||
| 479 | u32 m_stack_fill_value{}; | ||
| 364 | const KMemoryRegion* m_cached_physical_heap_region{}; | 480 | const KMemoryRegion* m_cached_physical_heap_region{}; |
| 365 | 481 | ||
| 366 | KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; | 482 | KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; |
diff --git a/src/core/hle/kernel/k_page_table_manager.h b/src/core/hle/kernel/k_page_table_manager.h new file mode 100644 index 000000000..91a45cde3 --- /dev/null +++ b/src/core/hle/kernel/k_page_table_manager.h | |||
| @@ -0,0 +1,55 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <atomic> | ||
| 7 | |||
| 8 | #include "common/common_types.h" | ||
| 9 | #include "core/hle/kernel/k_dynamic_resource_manager.h" | ||
| 10 | #include "core/hle/kernel/k_page_table_slab_heap.h" | ||
| 11 | |||
| 12 | namespace Kernel { | ||
| 13 | |||
| 14 | class KPageTableManager : public KDynamicResourceManager<impl::PageTablePage, true> { | ||
| 15 | public: | ||
| 16 | using RefCount = KPageTableSlabHeap::RefCount; | ||
| 17 | static constexpr size_t PageTableSize = KPageTableSlabHeap::PageTableSize; | ||
| 18 | |||
| 19 | public: | ||
| 20 | KPageTableManager() = default; | ||
| 21 | |||
| 22 | void Initialize(KDynamicPageManager* page_allocator, KPageTableSlabHeap* pt_heap) { | ||
| 23 | m_pt_heap = pt_heap; | ||
| 24 | |||
| 25 | static_assert(std::derived_from<KPageTableSlabHeap, DynamicSlabType>); | ||
| 26 | BaseHeap::Initialize(page_allocator, pt_heap); | ||
| 27 | } | ||
| 28 | |||
| 29 | VAddr Allocate() { | ||
| 30 | return VAddr(BaseHeap::Allocate()); | ||
| 31 | } | ||
| 32 | |||
| 33 | RefCount GetRefCount(VAddr addr) const { | ||
| 34 | return m_pt_heap->GetRefCount(addr); | ||
| 35 | } | ||
| 36 | |||
| 37 | void Open(VAddr addr, int count) { | ||
| 38 | return m_pt_heap->Open(addr, count); | ||
| 39 | } | ||
| 40 | |||
| 41 | bool Close(VAddr addr, int count) { | ||
| 42 | return m_pt_heap->Close(addr, count); | ||
| 43 | } | ||
| 44 | |||
| 45 | bool IsInPageTableHeap(VAddr addr) const { | ||
| 46 | return m_pt_heap->IsInRange(addr); | ||
| 47 | } | ||
| 48 | |||
| 49 | private: | ||
| 50 | using BaseHeap = KDynamicResourceManager<impl::PageTablePage, true>; | ||
| 51 | |||
| 52 | KPageTableSlabHeap* m_pt_heap{}; | ||
| 53 | }; | ||
| 54 | |||
| 55 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_page_table_slab_heap.h b/src/core/hle/kernel/k_page_table_slab_heap.h new file mode 100644 index 000000000..a9543cbd0 --- /dev/null +++ b/src/core/hle/kernel/k_page_table_slab_heap.h | |||
| @@ -0,0 +1,93 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <array> | ||
| 7 | #include <vector> | ||
| 8 | |||
| 9 | #include "common/common_types.h" | ||
| 10 | #include "core/hle/kernel/k_dynamic_slab_heap.h" | ||
| 11 | #include "core/hle/kernel/slab_helpers.h" | ||
| 12 | |||
| 13 | namespace Kernel { | ||
| 14 | |||
| 15 | namespace impl { | ||
| 16 | |||
| 17 | class PageTablePage { | ||
| 18 | public: | ||
| 19 | // Do not initialize anything. | ||
| 20 | PageTablePage() = default; | ||
| 21 | |||
| 22 | private: | ||
| 23 | std::array<u8, PageSize> m_buffer{}; | ||
| 24 | }; | ||
| 25 | static_assert(sizeof(PageTablePage) == PageSize); | ||
| 26 | |||
| 27 | } // namespace impl | ||
| 28 | |||
| 29 | class KPageTableSlabHeap : public KDynamicSlabHeap<impl::PageTablePage, true> { | ||
| 30 | public: | ||
| 31 | using RefCount = u16; | ||
| 32 | static constexpr size_t PageTableSize = sizeof(impl::PageTablePage); | ||
| 33 | static_assert(PageTableSize == PageSize); | ||
| 34 | |||
| 35 | public: | ||
| 36 | KPageTableSlabHeap() = default; | ||
| 37 | |||
| 38 | static constexpr size_t CalculateReferenceCountSize(size_t size) { | ||
| 39 | return (size / PageSize) * sizeof(RefCount); | ||
| 40 | } | ||
| 41 | |||
| 42 | void Initialize(KDynamicPageManager* page_allocator, size_t object_count, RefCount* rc) { | ||
| 43 | BaseHeap::Initialize(page_allocator, object_count); | ||
| 44 | this->Initialize(rc); | ||
| 45 | } | ||
| 46 | |||
| 47 | RefCount GetRefCount(VAddr addr) { | ||
| 48 | ASSERT(this->IsInRange(addr)); | ||
| 49 | return *this->GetRefCountPointer(addr); | ||
| 50 | } | ||
| 51 | |||
| 52 | void Open(VAddr addr, int count) { | ||
| 53 | ASSERT(this->IsInRange(addr)); | ||
| 54 | |||
| 55 | *this->GetRefCountPointer(addr) += static_cast<RefCount>(count); | ||
| 56 | |||
| 57 | ASSERT(this->GetRefCount(addr) > 0); | ||
| 58 | } | ||
| 59 | |||
| 60 | bool Close(VAddr addr, int count) { | ||
| 61 | ASSERT(this->IsInRange(addr)); | ||
| 62 | ASSERT(this->GetRefCount(addr) >= count); | ||
| 63 | |||
| 64 | *this->GetRefCountPointer(addr) -= static_cast<RefCount>(count); | ||
| 65 | return this->GetRefCount(addr) == 0; | ||
| 66 | } | ||
| 67 | |||
| 68 | bool IsInPageTableHeap(VAddr addr) const { | ||
| 69 | return this->IsInRange(addr); | ||
| 70 | } | ||
| 71 | |||
| 72 | private: | ||
| 73 | void Initialize([[maybe_unused]] RefCount* rc) { | ||
| 74 | // TODO(bunnei): Use rc once we support kernel virtual memory allocations. | ||
| 75 | const auto count = this->GetSize() / PageSize; | ||
| 76 | m_ref_counts.resize(count); | ||
| 77 | |||
| 78 | for (size_t i = 0; i < count; i++) { | ||
| 79 | m_ref_counts[i] = 0; | ||
| 80 | } | ||
| 81 | } | ||
| 82 | |||
| 83 | RefCount* GetRefCountPointer(VAddr addr) { | ||
| 84 | return m_ref_counts.data() + ((addr - this->GetAddress()) / PageSize); | ||
| 85 | } | ||
| 86 | |||
| 87 | private: | ||
| 88 | using BaseHeap = KDynamicSlabHeap<impl::PageTablePage, true>; | ||
| 89 | |||
| 90 | std::vector<RefCount> m_ref_counts; | ||
| 91 | }; | ||
| 92 | |||
| 93 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 8c3495e5a..55a9c5fae 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp | |||
| @@ -38,7 +38,7 @@ namespace { | |||
| 38 | */ | 38 | */ |
| 39 | void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority, VAddr stack_top) { | 39 | void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority, VAddr stack_top) { |
| 40 | const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart(); | 40 | const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart(); |
| 41 | ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1)); | 41 | ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::ThreadCountMax, 1)); |
| 42 | 42 | ||
| 43 | KThread* thread = KThread::Create(system.Kernel()); | 43 | KThread* thread = KThread::Create(system.Kernel()); |
| 44 | SCOPE_EXIT({ thread->Close(); }); | 44 | SCOPE_EXIT({ thread->Close(); }); |
| @@ -124,7 +124,7 @@ void KProcess::DecrementRunningThreadCount() { | |||
| 124 | } | 124 | } |
| 125 | 125 | ||
| 126 | u64 KProcess::GetTotalPhysicalMemoryAvailable() { | 126 | u64 KProcess::GetTotalPhysicalMemoryAvailable() { |
| 127 | const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) + | 127 | const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) + |
| 128 | page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size + | 128 | page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size + |
| 129 | main_thread_stack_size}; | 129 | main_thread_stack_size}; |
| 130 | if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); | 130 | if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); |
| @@ -349,8 +349,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: | |||
| 349 | // We currently do not support process-specific system resource | 349 | // We currently do not support process-specific system resource |
| 350 | UNIMPLEMENTED_IF(system_resource_size != 0); | 350 | UNIMPLEMENTED_IF(system_resource_size != 0); |
| 351 | 351 | ||
| 352 | KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory, | 352 | KScopedResourceReservation memory_reservation( |
| 353 | code_size + system_resource_size); | 353 | resource_limit, LimitableResource::PhysicalMemoryMax, code_size + system_resource_size); |
| 354 | if (!memory_reservation.Succeeded()) { | 354 | if (!memory_reservation.Succeeded()) { |
| 355 | LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", | 355 | LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", |
| 356 | code_size + system_resource_size); | 356 | code_size + system_resource_size); |
| @@ -358,8 +358,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: | |||
| 358 | } | 358 | } |
| 359 | // Initialize proces address space | 359 | // Initialize proces address space |
| 360 | if (const Result result{page_table.InitializeForProcess( | 360 | if (const Result result{page_table.InitializeForProcess( |
| 361 | metadata.GetAddressSpaceType(), false, 0x8000000, code_size, | 361 | metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application, |
| 362 | &kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)}; | 362 | 0x8000000, code_size, &kernel.GetSystemSystemResource(), resource_limit)}; |
| 363 | result.IsError()) { | 363 | result.IsError()) { |
| 364 | R_RETURN(result); | 364 | R_RETURN(result); |
| 365 | } | 365 | } |
| @@ -406,8 +406,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: | |||
| 406 | 406 | ||
| 407 | void KProcess::Run(s32 main_thread_priority, u64 stack_size) { | 407 | void KProcess::Run(s32 main_thread_priority, u64 stack_size) { |
| 408 | AllocateMainThreadStack(stack_size); | 408 | AllocateMainThreadStack(stack_size); |
| 409 | resource_limit->Reserve(LimitableResource::Threads, 1); | 409 | resource_limit->Reserve(LimitableResource::ThreadCountMax, 1); |
| 410 | resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size); | 410 | resource_limit->Reserve(LimitableResource::PhysicalMemoryMax, main_thread_stack_size); |
| 411 | 411 | ||
| 412 | const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; | 412 | const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; |
| 413 | ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError()); | 413 | ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError()); |
| @@ -442,7 +442,7 @@ void KProcess::PrepareForTermination() { | |||
| 442 | plr_address = 0; | 442 | plr_address = 0; |
| 443 | 443 | ||
| 444 | if (resource_limit) { | 444 | if (resource_limit) { |
| 445 | resource_limit->Release(LimitableResource::PhysicalMemory, | 445 | resource_limit->Release(LimitableResource::PhysicalMemoryMax, |
| 446 | main_thread_stack_size + image_size); | 446 | main_thread_stack_size + image_size); |
| 447 | } | 447 | } |
| 448 | 448 | ||
diff --git a/src/core/hle/kernel/k_resource_limit.cpp b/src/core/hle/kernel/k_resource_limit.cpp index 010dcf99e..b9d22b414 100644 --- a/src/core/hle/kernel/k_resource_limit.cpp +++ b/src/core/hle/kernel/k_resource_limit.cpp | |||
| @@ -159,12 +159,13 @@ KResourceLimit* CreateResourceLimitForProcess(Core::System& system, s64 physical | |||
| 159 | // TODO(bunnei): These values are the system defaults, the limits for service processes are | 159 | // TODO(bunnei): These values are the system defaults, the limits for service processes are |
| 160 | // lower. These should use the correct limit values. | 160 | // lower. These should use the correct limit values. |
| 161 | 161 | ||
| 162 | ASSERT(resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, physical_memory_size) | 162 | ASSERT(resource_limit->SetLimitValue(LimitableResource::PhysicalMemoryMax, physical_memory_size) |
| 163 | .IsSuccess()); | 163 | .IsSuccess()); |
| 164 | ASSERT(resource_limit->SetLimitValue(LimitableResource::Threads, 800).IsSuccess()); | 164 | ASSERT(resource_limit->SetLimitValue(LimitableResource::ThreadCountMax, 800).IsSuccess()); |
| 165 | ASSERT(resource_limit->SetLimitValue(LimitableResource::Events, 900).IsSuccess()); | 165 | ASSERT(resource_limit->SetLimitValue(LimitableResource::EventCountMax, 900).IsSuccess()); |
| 166 | ASSERT(resource_limit->SetLimitValue(LimitableResource::TransferMemory, 200).IsSuccess()); | 166 | ASSERT( |
| 167 | ASSERT(resource_limit->SetLimitValue(LimitableResource::Sessions, 1133).IsSuccess()); | 167 | resource_limit->SetLimitValue(LimitableResource::TransferMemoryCountMax, 200).IsSuccess()); |
| 168 | ASSERT(resource_limit->SetLimitValue(LimitableResource::SessionCountMax, 1133).IsSuccess()); | ||
| 168 | 169 | ||
| 169 | return resource_limit; | 170 | return resource_limit; |
| 170 | } | 171 | } |
diff --git a/src/core/hle/kernel/k_resource_limit.h b/src/core/hle/kernel/k_resource_limit.h index 65c98c979..2573d1b7c 100644 --- a/src/core/hle/kernel/k_resource_limit.h +++ b/src/core/hle/kernel/k_resource_limit.h | |||
| @@ -16,15 +16,8 @@ class CoreTiming; | |||
| 16 | 16 | ||
| 17 | namespace Kernel { | 17 | namespace Kernel { |
| 18 | class KernelCore; | 18 | class KernelCore; |
| 19 | enum class LimitableResource : u32 { | 19 | |
| 20 | PhysicalMemory = 0, | 20 | using LimitableResource = Svc::LimitableResource; |
| 21 | Threads = 1, | ||
| 22 | Events = 2, | ||
| 23 | TransferMemory = 3, | ||
| 24 | Sessions = 4, | ||
| 25 | |||
| 26 | Count, | ||
| 27 | }; | ||
| 28 | 21 | ||
| 29 | constexpr bool IsValidResourceType(LimitableResource type) { | 22 | constexpr bool IsValidResourceType(LimitableResource type) { |
| 30 | return type < LimitableResource::Count; | 23 | return type < LimitableResource::Count; |
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index b1cabbca0..d6676904b 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -384,7 +384,8 @@ void KScheduler::SwitchThread(KThread* next_thread) { | |||
| 384 | 384 | ||
| 385 | void KScheduler::ScheduleImpl() { | 385 | void KScheduler::ScheduleImpl() { |
| 386 | // First, clear the needs scheduling bool. | 386 | // First, clear the needs scheduling bool. |
| 387 | m_state.needs_scheduling.store(false, std::memory_order_seq_cst); | 387 | m_state.needs_scheduling.store(false, std::memory_order_relaxed); |
| 388 | std::atomic_thread_fence(std::memory_order_seq_cst); | ||
| 388 | 389 | ||
| 389 | // Load the appropriate thread pointers for scheduling. | 390 | // Load the appropriate thread pointers for scheduling. |
| 390 | KThread* const cur_thread{GetCurrentThreadPointer(kernel)}; | 391 | KThread* const cur_thread{GetCurrentThreadPointer(kernel)}; |
| @@ -400,7 +401,8 @@ void KScheduler::ScheduleImpl() { | |||
| 400 | // If there aren't, we want to check if the highest priority thread is the same as the current | 401 | // If there aren't, we want to check if the highest priority thread is the same as the current |
| 401 | // thread. | 402 | // thread. |
| 402 | if (highest_priority_thread == cur_thread) { | 403 | if (highest_priority_thread == cur_thread) { |
| 403 | // If they're the same, then we can just return. | 404 | // If they're the same, then we can just issue a memory barrier and return. |
| 405 | std::atomic_thread_fence(std::memory_order_seq_cst); | ||
| 404 | return; | 406 | return; |
| 405 | } | 407 | } |
| 406 | 408 | ||
| @@ -476,7 +478,8 @@ void KScheduler::ScheduleImplFiber() { | |||
| 476 | 478 | ||
| 477 | // We failed to successfully do the context switch, and need to retry. | 479 | // We failed to successfully do the context switch, and need to retry. |
| 478 | // Clear needs_scheduling. | 480 | // Clear needs_scheduling. |
| 479 | m_state.needs_scheduling.store(false, std::memory_order_seq_cst); | 481 | m_state.needs_scheduling.store(false, std::memory_order_relaxed); |
| 482 | std::atomic_thread_fence(std::memory_order_seq_cst); | ||
| 480 | 483 | ||
| 481 | // Refresh the highest priority thread. | 484 | // Refresh the highest priority thread. |
| 482 | highest_priority_thread = m_state.highest_priority_thread; | 485 | highest_priority_thread = m_state.highest_priority_thread; |
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index 73314b45e..129d60472 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h | |||
| @@ -60,6 +60,9 @@ public: | |||
| 60 | 60 | ||
| 61 | // Release an instance of the lock. | 61 | // Release an instance of the lock. |
| 62 | if ((--lock_count) == 0) { | 62 | if ((--lock_count) == 0) { |
| 63 | // Perform a memory barrier here. | ||
| 64 | std::atomic_thread_fence(std::memory_order_seq_cst); | ||
| 65 | |||
| 63 | // We're no longer going to hold the lock. Take note of what cores need scheduling. | 66 | // We're no longer going to hold the lock. Take note of what cores need scheduling. |
| 64 | const u64 cores_needing_scheduling = | 67 | const u64 cores_needing_scheduling = |
| 65 | SchedulerType::UpdateHighestPriorityThreads(kernel); | 68 | SchedulerType::UpdateHighestPriorityThreads(kernel); |
diff --git a/src/core/hle/kernel/k_session.cpp b/src/core/hle/kernel/k_session.cpp index 7a6534ac3..b6f6fe9d9 100644 --- a/src/core/hle/kernel/k_session.cpp +++ b/src/core/hle/kernel/k_session.cpp | |||
| @@ -76,7 +76,7 @@ void KSession::OnClientClosed() { | |||
| 76 | void KSession::PostDestroy(uintptr_t arg) { | 76 | void KSession::PostDestroy(uintptr_t arg) { |
| 77 | // Release the session count resource the owner process holds. | 77 | // Release the session count resource the owner process holds. |
| 78 | KProcess* owner = reinterpret_cast<KProcess*>(arg); | 78 | KProcess* owner = reinterpret_cast<KProcess*>(arg); |
| 79 | owner->GetResourceLimit()->Release(LimitableResource::Sessions, 1); | 79 | owner->GetResourceLimit()->Release(LimitableResource::SessionCountMax, 1); |
| 80 | owner->Close(); | 80 | owner->Close(); |
| 81 | } | 81 | } |
| 82 | 82 | ||
diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp index a039cc591..10cd4c43d 100644 --- a/src/core/hle/kernel/k_shared_memory.cpp +++ b/src/core/hle/kernel/k_shared_memory.cpp | |||
| @@ -14,7 +14,7 @@ namespace Kernel { | |||
| 14 | KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} | 14 | KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} |
| 15 | 15 | ||
| 16 | KSharedMemory::~KSharedMemory() { | 16 | KSharedMemory::~KSharedMemory() { |
| 17 | kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemory, size); | 17 | kernel.GetSystemResourceLimit()->Release(LimitableResource::PhysicalMemoryMax, size); |
| 18 | } | 18 | } |
| 19 | 19 | ||
| 20 | Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, | 20 | Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_, |
| @@ -35,7 +35,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o | |||
| 35 | KResourceLimit* reslimit = kernel.GetSystemResourceLimit(); | 35 | KResourceLimit* reslimit = kernel.GetSystemResourceLimit(); |
| 36 | 36 | ||
| 37 | // Reserve memory for ourselves. | 37 | // Reserve memory for ourselves. |
| 38 | KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemory, | 38 | KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemoryMax, |
| 39 | size_); | 39 | size_); |
| 40 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 40 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 41 | 41 | ||
| @@ -57,7 +57,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o | |||
| 57 | 57 | ||
| 58 | void KSharedMemory::Finalize() { | 58 | void KSharedMemory::Finalize() { |
| 59 | // Release the memory reservation. | 59 | // Release the memory reservation. |
| 60 | resource_limit->Release(LimitableResource::PhysicalMemory, size); | 60 | resource_limit->Release(LimitableResource::PhysicalMemoryMax, size); |
| 61 | resource_limit->Close(); | 61 | resource_limit->Close(); |
| 62 | 62 | ||
| 63 | // Perform inherited finalization. | 63 | // Perform inherited finalization. |
diff --git a/src/core/hle/kernel/k_system_resource.cpp b/src/core/hle/kernel/k_system_resource.cpp new file mode 100644 index 000000000..4cc377a6c --- /dev/null +++ b/src/core/hle/kernel/k_system_resource.cpp | |||
| @@ -0,0 +1,26 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include "core/hle/kernel/k_system_resource.h" | ||
| 5 | |||
| 6 | namespace Kernel { | ||
| 7 | |||
| 8 | Result KSecureSystemResource::Initialize([[maybe_unused]] size_t size, | ||
| 9 | [[maybe_unused]] KResourceLimit* resource_limit, | ||
| 10 | [[maybe_unused]] KMemoryManager::Pool pool) { | ||
| 11 | // Unimplemented | ||
| 12 | UNREACHABLE(); | ||
| 13 | } | ||
| 14 | |||
| 15 | void KSecureSystemResource::Finalize() { | ||
| 16 | // Unimplemented | ||
| 17 | UNREACHABLE(); | ||
| 18 | } | ||
| 19 | |||
| 20 | size_t KSecureSystemResource::CalculateRequiredSecureMemorySize( | ||
| 21 | [[maybe_unused]] size_t size, [[maybe_unused]] KMemoryManager::Pool pool) { | ||
| 22 | // Unimplemented | ||
| 23 | UNREACHABLE(); | ||
| 24 | } | ||
| 25 | |||
| 26 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_system_resource.h b/src/core/hle/kernel/k_system_resource.h new file mode 100644 index 000000000..9a991f725 --- /dev/null +++ b/src/core/hle/kernel/k_system_resource.h | |||
| @@ -0,0 +1,137 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include "common/assert.h" | ||
| 7 | #include "common/common_types.h" | ||
| 8 | #include "core/hle/kernel/k_auto_object.h" | ||
| 9 | #include "core/hle/kernel/k_dynamic_resource_manager.h" | ||
| 10 | #include "core/hle/kernel/k_memory_manager.h" | ||
| 11 | #include "core/hle/kernel/k_page_table_manager.h" | ||
| 12 | #include "core/hle/kernel/k_resource_limit.h" | ||
| 13 | #include "core/hle/kernel/slab_helpers.h" | ||
| 14 | |||
| 15 | namespace Kernel { | ||
| 16 | |||
| 17 | // NOTE: Nintendo's implementation does not have the "is_secure_resource" field, and instead uses | ||
| 18 | // virtual IsSecureResource(). | ||
| 19 | |||
| 20 | class KSystemResource : public KAutoObject { | ||
| 21 | KERNEL_AUTOOBJECT_TRAITS(KSystemResource, KAutoObject); | ||
| 22 | |||
| 23 | public: | ||
| 24 | explicit KSystemResource(KernelCore& kernel_) : KAutoObject(kernel_) {} | ||
| 25 | |||
| 26 | protected: | ||
| 27 | void SetSecureResource() { | ||
| 28 | m_is_secure_resource = true; | ||
| 29 | } | ||
| 30 | |||
| 31 | public: | ||
| 32 | virtual void Destroy() override { | ||
| 33 | UNREACHABLE_MSG("KSystemResource::Destroy() was called"); | ||
| 34 | } | ||
| 35 | |||
| 36 | bool IsSecureResource() const { | ||
| 37 | return m_is_secure_resource; | ||
| 38 | } | ||
| 39 | |||
| 40 | void SetManagers(KMemoryBlockSlabManager& mb, KBlockInfoManager& bi, KPageTableManager& pt) { | ||
| 41 | ASSERT(m_p_memory_block_slab_manager == nullptr); | ||
| 42 | ASSERT(m_p_block_info_manager == nullptr); | ||
| 43 | ASSERT(m_p_page_table_manager == nullptr); | ||
| 44 | |||
| 45 | m_p_memory_block_slab_manager = std::addressof(mb); | ||
| 46 | m_p_block_info_manager = std::addressof(bi); | ||
| 47 | m_p_page_table_manager = std::addressof(pt); | ||
| 48 | } | ||
| 49 | |||
| 50 | const KMemoryBlockSlabManager& GetMemoryBlockSlabManager() const { | ||
| 51 | return *m_p_memory_block_slab_manager; | ||
| 52 | } | ||
| 53 | const KBlockInfoManager& GetBlockInfoManager() const { | ||
| 54 | return *m_p_block_info_manager; | ||
| 55 | } | ||
| 56 | const KPageTableManager& GetPageTableManager() const { | ||
| 57 | return *m_p_page_table_manager; | ||
| 58 | } | ||
| 59 | |||
| 60 | KMemoryBlockSlabManager& GetMemoryBlockSlabManager() { | ||
| 61 | return *m_p_memory_block_slab_manager; | ||
| 62 | } | ||
| 63 | KBlockInfoManager& GetBlockInfoManager() { | ||
| 64 | return *m_p_block_info_manager; | ||
| 65 | } | ||
| 66 | KPageTableManager& GetPageTableManager() { | ||
| 67 | return *m_p_page_table_manager; | ||
| 68 | } | ||
| 69 | |||
| 70 | KMemoryBlockSlabManager* GetMemoryBlockSlabManagerPointer() { | ||
| 71 | return m_p_memory_block_slab_manager; | ||
| 72 | } | ||
| 73 | KBlockInfoManager* GetBlockInfoManagerPointer() { | ||
| 74 | return m_p_block_info_manager; | ||
| 75 | } | ||
| 76 | KPageTableManager* GetPageTableManagerPointer() { | ||
| 77 | return m_p_page_table_manager; | ||
| 78 | } | ||
| 79 | |||
| 80 | private: | ||
| 81 | KMemoryBlockSlabManager* m_p_memory_block_slab_manager{}; | ||
| 82 | KBlockInfoManager* m_p_block_info_manager{}; | ||
| 83 | KPageTableManager* m_p_page_table_manager{}; | ||
| 84 | bool m_is_secure_resource{false}; | ||
| 85 | }; | ||
| 86 | |||
| 87 | class KSecureSystemResource final | ||
| 88 | : public KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource> { | ||
| 89 | public: | ||
| 90 | explicit KSecureSystemResource(KernelCore& kernel_) | ||
| 91 | : KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource>(kernel_) { | ||
| 92 | // Mark ourselves as being a secure resource. | ||
| 93 | this->SetSecureResource(); | ||
| 94 | } | ||
| 95 | |||
| 96 | Result Initialize(size_t size, KResourceLimit* resource_limit, KMemoryManager::Pool pool); | ||
| 97 | void Finalize(); | ||
| 98 | |||
| 99 | bool IsInitialized() const { | ||
| 100 | return m_is_initialized; | ||
| 101 | } | ||
| 102 | static void PostDestroy([[maybe_unused]] uintptr_t arg) {} | ||
| 103 | |||
| 104 | size_t CalculateRequiredSecureMemorySize() const { | ||
| 105 | return CalculateRequiredSecureMemorySize(m_resource_size, m_resource_pool); | ||
| 106 | } | ||
| 107 | |||
| 108 | size_t GetSize() const { | ||
| 109 | return m_resource_size; | ||
| 110 | } | ||
| 111 | size_t GetUsedSize() const { | ||
| 112 | return m_dynamic_page_manager.GetUsed() * PageSize; | ||
| 113 | } | ||
| 114 | |||
| 115 | const KDynamicPageManager& GetDynamicPageManager() const { | ||
| 116 | return m_dynamic_page_manager; | ||
| 117 | } | ||
| 118 | |||
| 119 | public: | ||
| 120 | static size_t CalculateRequiredSecureMemorySize(size_t size, KMemoryManager::Pool pool); | ||
| 121 | |||
| 122 | private: | ||
| 123 | bool m_is_initialized{}; | ||
| 124 | KMemoryManager::Pool m_resource_pool{}; | ||
| 125 | KDynamicPageManager m_dynamic_page_manager; | ||
| 126 | KMemoryBlockSlabManager m_memory_block_slab_manager; | ||
| 127 | KBlockInfoManager m_block_info_manager; | ||
| 128 | KPageTableManager m_page_table_manager; | ||
| 129 | KMemoryBlockSlabHeap m_memory_block_heap; | ||
| 130 | KBlockInfoSlabHeap m_block_info_heap; | ||
| 131 | KPageTableSlabHeap m_page_table_heap; | ||
| 132 | KResourceLimit* m_resource_limit{}; | ||
| 133 | VAddr m_resource_address{}; | ||
| 134 | size_t m_resource_size{}; | ||
| 135 | }; | ||
| 136 | |||
| 137 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index cc88d08f0..21207fe99 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp | |||
| @@ -263,9 +263,9 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_ | |||
| 263 | R_SUCCEED(); | 263 | R_SUCCEED(); |
| 264 | } | 264 | } |
| 265 | 265 | ||
| 266 | Result KThread::InitializeDummyThread(KThread* thread) { | 266 | Result KThread::InitializeDummyThread(KThread* thread, KProcess* owner) { |
| 267 | // Initialize the thread. | 267 | // Initialize the thread. |
| 268 | R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy)); | 268 | R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, owner, ThreadType::Dummy)); |
| 269 | 269 | ||
| 270 | // Initialize emulation parameters. | 270 | // Initialize emulation parameters. |
| 271 | thread->stack_parameters.disable_count = 0; | 271 | thread->stack_parameters.disable_count = 0; |
| @@ -303,7 +303,7 @@ void KThread::PostDestroy(uintptr_t arg) { | |||
| 303 | const bool resource_limit_release_hint = (arg & 1); | 303 | const bool resource_limit_release_hint = (arg & 1); |
| 304 | const s64 hint_value = (resource_limit_release_hint ? 0 : 1); | 304 | const s64 hint_value = (resource_limit_release_hint ? 0 : 1); |
| 305 | if (owner != nullptr) { | 305 | if (owner != nullptr) { |
| 306 | owner->GetResourceLimit()->Release(LimitableResource::Threads, 1, hint_value); | 306 | owner->GetResourceLimit()->Release(LimitableResource::ThreadCountMax, 1, hint_value); |
| 307 | owner->Close(); | 307 | owner->Close(); |
| 308 | } | 308 | } |
| 309 | } | 309 | } |
| @@ -1054,7 +1054,7 @@ void KThread::Exit() { | |||
| 1054 | 1054 | ||
| 1055 | // Release the thread resource hint, running thread count from parent. | 1055 | // Release the thread resource hint, running thread count from parent. |
| 1056 | if (parent != nullptr) { | 1056 | if (parent != nullptr) { |
| 1057 | parent->GetResourceLimit()->Release(Kernel::LimitableResource::Threads, 0, 1); | 1057 | parent->GetResourceLimit()->Release(Kernel::LimitableResource::ThreadCountMax, 0, 1); |
| 1058 | resource_limit_release_hint = true; | 1058 | resource_limit_release_hint = true; |
| 1059 | parent->DecrementRunningThreadCount(); | 1059 | parent->DecrementRunningThreadCount(); |
| 1060 | } | 1060 | } |
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index 30aa10c9a..f38c92bff 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h | |||
| @@ -415,7 +415,7 @@ public: | |||
| 415 | 415 | ||
| 416 | static void PostDestroy(uintptr_t arg); | 416 | static void PostDestroy(uintptr_t arg); |
| 417 | 417 | ||
| 418 | [[nodiscard]] static Result InitializeDummyThread(KThread* thread); | 418 | [[nodiscard]] static Result InitializeDummyThread(KThread* thread, KProcess* owner); |
| 419 | 419 | ||
| 420 | [[nodiscard]] static Result InitializeMainThread(Core::System& system, KThread* thread, | 420 | [[nodiscard]] static Result InitializeMainThread(Core::System& system, KThread* thread, |
| 421 | s32 virt_core); | 421 | s32 virt_core); |
diff --git a/src/core/hle/kernel/k_transfer_memory.cpp b/src/core/hle/kernel/k_transfer_memory.cpp index b0320eb73..9f34c2d46 100644 --- a/src/core/hle/kernel/k_transfer_memory.cpp +++ b/src/core/hle/kernel/k_transfer_memory.cpp | |||
| @@ -37,7 +37,7 @@ void KTransferMemory::Finalize() { | |||
| 37 | 37 | ||
| 38 | void KTransferMemory::PostDestroy(uintptr_t arg) { | 38 | void KTransferMemory::PostDestroy(uintptr_t arg) { |
| 39 | KProcess* owner = reinterpret_cast<KProcess*>(arg); | 39 | KProcess* owner = reinterpret_cast<KProcess*>(arg); |
| 40 | owner->GetResourceLimit()->Release(LimitableResource::TransferMemory, 1); | 40 | owner->GetResourceLimit()->Release(LimitableResource::TransferMemoryCountMax, 1); |
| 41 | owner->Close(); | 41 | owner->Close(); |
| 42 | } | 42 | } |
| 43 | 43 | ||
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 09c36ee09..b77723503 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -28,10 +28,12 @@ | |||
| 28 | #include "core/hle/kernel/k_handle_table.h" | 28 | #include "core/hle/kernel/k_handle_table.h" |
| 29 | #include "core/hle/kernel/k_memory_layout.h" | 29 | #include "core/hle/kernel/k_memory_layout.h" |
| 30 | #include "core/hle/kernel/k_memory_manager.h" | 30 | #include "core/hle/kernel/k_memory_manager.h" |
| 31 | #include "core/hle/kernel/k_page_buffer.h" | ||
| 31 | #include "core/hle/kernel/k_process.h" | 32 | #include "core/hle/kernel/k_process.h" |
| 32 | #include "core/hle/kernel/k_resource_limit.h" | 33 | #include "core/hle/kernel/k_resource_limit.h" |
| 33 | #include "core/hle/kernel/k_scheduler.h" | 34 | #include "core/hle/kernel/k_scheduler.h" |
| 34 | #include "core/hle/kernel/k_shared_memory.h" | 35 | #include "core/hle/kernel/k_shared_memory.h" |
| 36 | #include "core/hle/kernel/k_system_resource.h" | ||
| 35 | #include "core/hle/kernel/k_thread.h" | 37 | #include "core/hle/kernel/k_thread.h" |
| 36 | #include "core/hle/kernel/k_worker_task_manager.h" | 38 | #include "core/hle/kernel/k_worker_task_manager.h" |
| 37 | #include "core/hle/kernel/kernel.h" | 39 | #include "core/hle/kernel/kernel.h" |
| @@ -47,6 +49,11 @@ MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70)); | |||
| 47 | namespace Kernel { | 49 | namespace Kernel { |
| 48 | 50 | ||
| 49 | struct KernelCore::Impl { | 51 | struct KernelCore::Impl { |
| 52 | static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000; | ||
| 53 | static constexpr size_t SystemMemoryBlockSlabHeapSize = 10000; | ||
| 54 | static constexpr size_t BlockInfoSlabHeapSize = 4000; | ||
| 55 | static constexpr size_t ReservedDynamicPageCount = 64; | ||
| 56 | |||
| 50 | explicit Impl(Core::System& system_, KernelCore& kernel_) | 57 | explicit Impl(Core::System& system_, KernelCore& kernel_) |
| 51 | : time_manager{system_}, service_threads_manager{1, "ServiceThreadsManager"}, | 58 | : time_manager{system_}, service_threads_manager{1, "ServiceThreadsManager"}, |
| 52 | service_thread_barrier{2}, system{system_} {} | 59 | service_thread_barrier{2}, system{system_} {} |
| @@ -71,7 +78,6 @@ struct KernelCore::Impl { | |||
| 71 | // Initialize kernel memory and resources. | 78 | // Initialize kernel memory and resources. |
| 72 | InitializeSystemResourceLimit(kernel, system.CoreTiming()); | 79 | InitializeSystemResourceLimit(kernel, system.CoreTiming()); |
| 73 | InitializeMemoryLayout(); | 80 | InitializeMemoryLayout(); |
| 74 | Init::InitializeKPageBufferSlabHeap(system); | ||
| 75 | InitializeShutdownThreads(); | 81 | InitializeShutdownThreads(); |
| 76 | InitializePhysicalCores(); | 82 | InitializePhysicalCores(); |
| 77 | InitializePreemption(kernel); | 83 | InitializePreemption(kernel); |
| @@ -81,12 +87,13 @@ struct KernelCore::Impl { | |||
| 81 | const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion(); | 87 | const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion(); |
| 82 | ASSERT(pt_heap_region.GetEndAddress() != 0); | 88 | ASSERT(pt_heap_region.GetEndAddress() != 0); |
| 83 | 89 | ||
| 84 | InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize()); | 90 | InitializeResourceManagers(kernel, pt_heap_region.GetAddress(), |
| 91 | pt_heap_region.GetSize()); | ||
| 85 | } | 92 | } |
| 86 | 93 | ||
| 87 | RegisterHostThread(); | 94 | RegisterHostThread(nullptr); |
| 88 | 95 | ||
| 89 | default_service_thread = CreateServiceThread(kernel, "DefaultServiceThread"); | 96 | default_service_thread = &CreateServiceThread(kernel, "DefaultServiceThread"); |
| 90 | } | 97 | } |
| 91 | 98 | ||
| 92 | void InitializeCores() { | 99 | void InitializeCores() { |
| @@ -222,18 +229,22 @@ struct KernelCore::Impl { | |||
| 222 | const auto kernel_size{sizes.second}; | 229 | const auto kernel_size{sizes.second}; |
| 223 | 230 | ||
| 224 | // If setting the default system values fails, then something seriously wrong has occurred. | 231 | // If setting the default system values fails, then something seriously wrong has occurred. |
| 225 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, total_size) | 232 | ASSERT( |
| 233 | system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemoryMax, total_size) | ||
| 234 | .IsSuccess()); | ||
| 235 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::ThreadCountMax, 800) | ||
| 236 | .IsSuccess()); | ||
| 237 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::EventCountMax, 900) | ||
| 238 | .IsSuccess()); | ||
| 239 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::TransferMemoryCountMax, 200) | ||
| 226 | .IsSuccess()); | 240 | .IsSuccess()); |
| 227 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Threads, 800).IsSuccess()); | 241 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::SessionCountMax, 1133) |
| 228 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Events, 900).IsSuccess()); | ||
| 229 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::TransferMemory, 200) | ||
| 230 | .IsSuccess()); | 242 | .IsSuccess()); |
| 231 | ASSERT(system_resource_limit->SetLimitValue(LimitableResource::Sessions, 1133).IsSuccess()); | 243 | system_resource_limit->Reserve(LimitableResource::PhysicalMemoryMax, kernel_size); |
| 232 | system_resource_limit->Reserve(LimitableResource::PhysicalMemory, kernel_size); | ||
| 233 | 244 | ||
| 234 | // Reserve secure applet memory, introduced in firmware 5.0.0 | 245 | // Reserve secure applet memory, introduced in firmware 5.0.0 |
| 235 | constexpr u64 secure_applet_memory_size{4_MiB}; | 246 | constexpr u64 secure_applet_memory_size{4_MiB}; |
| 236 | ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemory, | 247 | ASSERT(system_resource_limit->Reserve(LimitableResource::PhysicalMemoryMax, |
| 237 | secure_applet_memory_size)); | 248 | secure_applet_memory_size)); |
| 238 | } | 249 | } |
| 239 | 250 | ||
| @@ -253,16 +264,82 @@ struct KernelCore::Impl { | |||
| 253 | system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event); | 264 | system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event); |
| 254 | } | 265 | } |
| 255 | 266 | ||
| 256 | void InitializeResourceManagers(VAddr address, size_t size) { | 267 | void InitializeResourceManagers(KernelCore& kernel, VAddr address, size_t size) { |
| 257 | dynamic_page_manager = std::make_unique<KDynamicPageManager>(); | 268 | // Ensure that the buffer is suitable for our use. |
| 258 | memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>(); | 269 | ASSERT(Common::IsAligned(address, PageSize)); |
| 270 | ASSERT(Common::IsAligned(size, PageSize)); | ||
| 271 | |||
| 272 | // Ensure that we have space for our reference counts. | ||
| 273 | const size_t rc_size = | ||
| 274 | Common::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(size), PageSize); | ||
| 275 | ASSERT(rc_size < size); | ||
| 276 | size -= rc_size; | ||
| 277 | |||
| 278 | // Initialize the resource managers' shared page manager. | ||
| 279 | resource_manager_page_manager = std::make_unique<KDynamicPageManager>(); | ||
| 280 | resource_manager_page_manager->Initialize( | ||
| 281 | address, size, std::max<size_t>(PageSize, KPageBufferSlabHeap::BufferSize)); | ||
| 282 | |||
| 283 | // Initialize the KPageBuffer slab heap. | ||
| 284 | page_buffer_slab_heap.Initialize(system); | ||
| 285 | |||
| 286 | // Initialize the fixed-size slabheaps. | ||
| 287 | app_memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>(); | ||
| 288 | sys_memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>(); | ||
| 289 | block_info_heap = std::make_unique<KBlockInfoSlabHeap>(); | ||
| 290 | app_memory_block_heap->Initialize(resource_manager_page_manager.get(), | ||
| 291 | ApplicationMemoryBlockSlabHeapSize); | ||
| 292 | sys_memory_block_heap->Initialize(resource_manager_page_manager.get(), | ||
| 293 | SystemMemoryBlockSlabHeapSize); | ||
| 294 | block_info_heap->Initialize(resource_manager_page_manager.get(), BlockInfoSlabHeapSize); | ||
| 295 | |||
| 296 | // Reserve all but a fixed number of remaining pages for the page table heap. | ||
| 297 | const size_t num_pt_pages = resource_manager_page_manager->GetCount() - | ||
| 298 | resource_manager_page_manager->GetUsed() - | ||
| 299 | ReservedDynamicPageCount; | ||
| 300 | page_table_heap = std::make_unique<KPageTableSlabHeap>(); | ||
| 301 | |||
| 302 | // TODO(bunnei): Pass in address once we support kernel virtual memory allocations. | ||
| 303 | page_table_heap->Initialize( | ||
| 304 | resource_manager_page_manager.get(), num_pt_pages, | ||
| 305 | /*GetPointer<KPageTableManager::RefCount>(address + size)*/ nullptr); | ||
| 306 | |||
| 307 | // Setup the slab managers. | ||
| 308 | KDynamicPageManager* const app_dynamic_page_manager = nullptr; | ||
| 309 | KDynamicPageManager* const sys_dynamic_page_manager = | ||
| 310 | /*KTargetSystem::IsDynamicResourceLimitsEnabled()*/ true | ||
| 311 | ? resource_manager_page_manager.get() | ||
| 312 | : nullptr; | ||
| 259 | app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>(); | 313 | app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>(); |
| 260 | 314 | sys_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>(); | |
| 261 | dynamic_page_manager->Initialize(address, size); | 315 | app_block_info_manager = std::make_unique<KBlockInfoManager>(); |
| 262 | static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000; | 316 | sys_block_info_manager = std::make_unique<KBlockInfoManager>(); |
| 263 | memory_block_heap->Initialize(dynamic_page_manager.get(), | 317 | app_page_table_manager = std::make_unique<KPageTableManager>(); |
| 264 | ApplicationMemoryBlockSlabHeapSize); | 318 | sys_page_table_manager = std::make_unique<KPageTableManager>(); |
| 265 | app_memory_block_manager->Initialize(nullptr, memory_block_heap.get()); | 319 | |
| 320 | app_memory_block_manager->Initialize(app_dynamic_page_manager, app_memory_block_heap.get()); | ||
| 321 | sys_memory_block_manager->Initialize(sys_dynamic_page_manager, sys_memory_block_heap.get()); | ||
| 322 | |||
| 323 | app_block_info_manager->Initialize(app_dynamic_page_manager, block_info_heap.get()); | ||
| 324 | sys_block_info_manager->Initialize(sys_dynamic_page_manager, block_info_heap.get()); | ||
| 325 | |||
| 326 | app_page_table_manager->Initialize(app_dynamic_page_manager, page_table_heap.get()); | ||
| 327 | sys_page_table_manager->Initialize(sys_dynamic_page_manager, page_table_heap.get()); | ||
| 328 | |||
| 329 | // Check that we have the correct number of dynamic pages available. | ||
| 330 | ASSERT(resource_manager_page_manager->GetCount() - | ||
| 331 | resource_manager_page_manager->GetUsed() == | ||
| 332 | ReservedDynamicPageCount); | ||
| 333 | |||
| 334 | // Create the system page table managers. | ||
| 335 | app_system_resource = std::make_unique<KSystemResource>(kernel); | ||
| 336 | sys_system_resource = std::make_unique<KSystemResource>(kernel); | ||
| 337 | |||
| 338 | // Set the managers for the system resources. | ||
| 339 | app_system_resource->SetManagers(*app_memory_block_manager, *app_block_info_manager, | ||
| 340 | *app_page_table_manager); | ||
| 341 | sys_system_resource->SetManagers(*sys_memory_block_manager, *sys_block_info_manager, | ||
| 342 | *sys_page_table_manager); | ||
| 266 | } | 343 | } |
| 267 | 344 | ||
| 268 | void InitializeShutdownThreads() { | 345 | void InitializeShutdownThreads() { |
| @@ -300,15 +377,18 @@ struct KernelCore::Impl { | |||
| 300 | } | 377 | } |
| 301 | 378 | ||
| 302 | // Gets the dummy KThread for the caller, allocating a new one if this is the first time | 379 | // Gets the dummy KThread for the caller, allocating a new one if this is the first time |
| 303 | KThread* GetHostDummyThread() { | 380 | KThread* GetHostDummyThread(KThread* existing_thread) { |
| 304 | auto initialize = [this](KThread* thread) { | 381 | auto initialize = [this](KThread* thread) { |
| 305 | ASSERT(KThread::InitializeDummyThread(thread).IsSuccess()); | 382 | ASSERT(KThread::InitializeDummyThread(thread, nullptr).IsSuccess()); |
| 306 | thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId())); | 383 | thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId())); |
| 307 | return thread; | 384 | return thread; |
| 308 | }; | 385 | }; |
| 309 | 386 | ||
| 310 | thread_local auto raw_thread = KThread(system.Kernel()); | 387 | thread_local KThread raw_thread{system.Kernel()}; |
| 311 | thread_local auto thread = initialize(&raw_thread); | 388 | thread_local KThread* thread = nullptr; |
| 389 | if (thread == nullptr) { | ||
| 390 | thread = (existing_thread == nullptr) ? initialize(&raw_thread) : existing_thread; | ||
| 391 | } | ||
| 312 | 392 | ||
| 313 | return thread; | 393 | return thread; |
| 314 | } | 394 | } |
| @@ -323,9 +403,9 @@ struct KernelCore::Impl { | |||
| 323 | } | 403 | } |
| 324 | 404 | ||
| 325 | /// Registers a new host thread by allocating a host thread ID for it | 405 | /// Registers a new host thread by allocating a host thread ID for it |
| 326 | void RegisterHostThread() { | 406 | void RegisterHostThread(KThread* existing_thread) { |
| 327 | [[maybe_unused]] const auto this_id = GetHostThreadId(); | 407 | [[maybe_unused]] const auto this_id = GetHostThreadId(); |
| 328 | [[maybe_unused]] const auto dummy_thread = GetHostDummyThread(); | 408 | [[maybe_unused]] const auto dummy_thread = GetHostDummyThread(existing_thread); |
| 329 | } | 409 | } |
| 330 | 410 | ||
| 331 | [[nodiscard]] u32 GetCurrentHostThreadID() { | 411 | [[nodiscard]] u32 GetCurrentHostThreadID() { |
| @@ -356,7 +436,7 @@ struct KernelCore::Impl { | |||
| 356 | KThread* GetCurrentEmuThread() { | 436 | KThread* GetCurrentEmuThread() { |
| 357 | const auto thread_id = GetCurrentHostThreadID(); | 437 | const auto thread_id = GetCurrentHostThreadID(); |
| 358 | if (thread_id >= Core::Hardware::NUM_CPU_CORES) { | 438 | if (thread_id >= Core::Hardware::NUM_CPU_CORES) { |
| 359 | return GetHostDummyThread(); | 439 | return GetHostDummyThread(nullptr); |
| 360 | } | 440 | } |
| 361 | 441 | ||
| 362 | return current_thread; | 442 | return current_thread; |
| @@ -446,6 +526,9 @@ struct KernelCore::Impl { | |||
| 446 | ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( | 526 | ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( |
| 447 | misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc)); | 527 | misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc)); |
| 448 | 528 | ||
| 529 | // Determine if we'll use extra thread resources. | ||
| 530 | const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit(); | ||
| 531 | |||
| 449 | // Setup the stack region. | 532 | // Setup the stack region. |
| 450 | constexpr size_t StackRegionSize = 14_MiB; | 533 | constexpr size_t StackRegionSize = 14_MiB; |
| 451 | constexpr size_t StackRegionAlign = KernelAslrAlignment; | 534 | constexpr size_t StackRegionAlign = KernelAslrAlignment; |
| @@ -456,7 +539,8 @@ struct KernelCore::Impl { | |||
| 456 | stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack)); | 539 | stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack)); |
| 457 | 540 | ||
| 458 | // Determine the size of the resource region. | 541 | // Determine the size of the resource region. |
| 459 | const size_t resource_region_size = memory_layout->GetResourceRegionSizeForInit(); | 542 | const size_t resource_region_size = |
| 543 | memory_layout->GetResourceRegionSizeForInit(use_extra_resources); | ||
| 460 | 544 | ||
| 461 | // Determine the size of the slab region. | 545 | // Determine the size of the slab region. |
| 462 | const size_t slab_region_size = | 546 | const size_t slab_region_size = |
| @@ -702,33 +786,31 @@ struct KernelCore::Impl { | |||
| 702 | search->second(system.ServiceManager(), server_port); | 786 | search->second(system.ServiceManager(), server_port); |
| 703 | } | 787 | } |
| 704 | 788 | ||
| 705 | std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(KernelCore& kernel, | 789 | Kernel::ServiceThread& CreateServiceThread(KernelCore& kernel, const std::string& name) { |
| 706 | const std::string& name) { | 790 | auto* ptr = new ServiceThread(kernel, name); |
| 707 | auto service_thread = std::make_shared<Kernel::ServiceThread>(kernel, name); | ||
| 708 | 791 | ||
| 709 | service_threads_manager.QueueWork( | 792 | service_threads_manager.QueueWork( |
| 710 | [this, service_thread]() { service_threads.emplace(service_thread); }); | 793 | [this, ptr]() { service_threads.emplace(ptr, std::unique_ptr<ServiceThread>(ptr)); }); |
| 711 | 794 | ||
| 712 | return service_thread; | 795 | return *ptr; |
| 713 | } | 796 | } |
| 714 | 797 | ||
| 715 | void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) { | 798 | void ReleaseServiceThread(Kernel::ServiceThread& service_thread) { |
| 716 | if (auto strong_ptr = service_thread.lock()) { | 799 | auto* ptr = &service_thread; |
| 717 | if (strong_ptr == default_service_thread.lock()) { | ||
| 718 | // Nothing to do here, the service is using default_service_thread, which will be | ||
| 719 | // released on shutdown. | ||
| 720 | return; | ||
| 721 | } | ||
| 722 | 800 | ||
| 723 | service_threads_manager.QueueWork( | 801 | if (ptr == default_service_thread) { |
| 724 | [this, strong_ptr{std::move(strong_ptr)}]() { service_threads.erase(strong_ptr); }); | 802 | // Nothing to do here, the service is using default_service_thread, which will be |
| 803 | // released on shutdown. | ||
| 804 | return; | ||
| 725 | } | 805 | } |
| 806 | |||
| 807 | service_threads_manager.QueueWork([this, ptr]() { service_threads.erase(ptr); }); | ||
| 726 | } | 808 | } |
| 727 | 809 | ||
| 728 | void ClearServiceThreads() { | 810 | void ClearServiceThreads() { |
| 729 | service_threads_manager.QueueWork([this] { | 811 | service_threads_manager.QueueWork([this] { |
| 730 | service_threads.clear(); | 812 | service_threads.clear(); |
| 731 | default_service_thread.reset(); | 813 | default_service_thread = nullptr; |
| 732 | service_thread_barrier.Sync(); | 814 | service_thread_barrier.Sync(); |
| 733 | }); | 815 | }); |
| 734 | service_thread_barrier.Sync(); | 816 | service_thread_barrier.Sync(); |
| @@ -751,6 +833,8 @@ struct KernelCore::Impl { | |||
| 751 | Init::KSlabResourceCounts slab_resource_counts{}; | 833 | Init::KSlabResourceCounts slab_resource_counts{}; |
| 752 | KResourceLimit* system_resource_limit{}; | 834 | KResourceLimit* system_resource_limit{}; |
| 753 | 835 | ||
| 836 | KPageBufferSlabHeap page_buffer_slab_heap; | ||
| 837 | |||
| 754 | std::shared_ptr<Core::Timing::EventType> preemption_event; | 838 | std::shared_ptr<Core::Timing::EventType> preemption_event; |
| 755 | 839 | ||
| 756 | // This is the kernel's handle table or supervisor handle table which | 840 | // This is the kernel's handle table or supervisor handle table which |
| @@ -776,10 +860,20 @@ struct KernelCore::Impl { | |||
| 776 | // Kernel memory management | 860 | // Kernel memory management |
| 777 | std::unique_ptr<KMemoryManager> memory_manager; | 861 | std::unique_ptr<KMemoryManager> memory_manager; |
| 778 | 862 | ||
| 779 | // Dynamic slab managers | 863 | // Resource managers |
| 780 | std::unique_ptr<KDynamicPageManager> dynamic_page_manager; | 864 | std::unique_ptr<KDynamicPageManager> resource_manager_page_manager; |
| 781 | std::unique_ptr<KMemoryBlockSlabHeap> memory_block_heap; | 865 | std::unique_ptr<KPageTableSlabHeap> page_table_heap; |
| 866 | std::unique_ptr<KMemoryBlockSlabHeap> app_memory_block_heap; | ||
| 867 | std::unique_ptr<KMemoryBlockSlabHeap> sys_memory_block_heap; | ||
| 868 | std::unique_ptr<KBlockInfoSlabHeap> block_info_heap; | ||
| 869 | std::unique_ptr<KPageTableManager> app_page_table_manager; | ||
| 870 | std::unique_ptr<KPageTableManager> sys_page_table_manager; | ||
| 782 | std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager; | 871 | std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager; |
| 872 | std::unique_ptr<KMemoryBlockSlabManager> sys_memory_block_manager; | ||
| 873 | std::unique_ptr<KBlockInfoManager> app_block_info_manager; | ||
| 874 | std::unique_ptr<KBlockInfoManager> sys_block_info_manager; | ||
| 875 | std::unique_ptr<KSystemResource> app_system_resource; | ||
| 876 | std::unique_ptr<KSystemResource> sys_system_resource; | ||
| 783 | 877 | ||
| 784 | // Shared memory for services | 878 | // Shared memory for services |
| 785 | Kernel::KSharedMemory* hid_shared_mem{}; | 879 | Kernel::KSharedMemory* hid_shared_mem{}; |
| @@ -792,8 +886,8 @@ struct KernelCore::Impl { | |||
| 792 | std::unique_ptr<KMemoryLayout> memory_layout; | 886 | std::unique_ptr<KMemoryLayout> memory_layout; |
| 793 | 887 | ||
| 794 | // Threads used for services | 888 | // Threads used for services |
| 795 | std::unordered_set<std::shared_ptr<ServiceThread>> service_threads; | 889 | std::unordered_map<ServiceThread*, std::unique_ptr<ServiceThread>> service_threads; |
| 796 | std::weak_ptr<ServiceThread> default_service_thread; | 890 | ServiceThread* default_service_thread{}; |
| 797 | Common::ThreadWorker service_threads_manager; | 891 | Common::ThreadWorker service_threads_manager; |
| 798 | Common::Barrier service_thread_barrier; | 892 | Common::Barrier service_thread_barrier; |
| 799 | 893 | ||
| @@ -1033,8 +1127,12 @@ void KernelCore::RegisterCoreThread(std::size_t core_id) { | |||
| 1033 | impl->RegisterCoreThread(core_id); | 1127 | impl->RegisterCoreThread(core_id); |
| 1034 | } | 1128 | } |
| 1035 | 1129 | ||
| 1036 | void KernelCore::RegisterHostThread() { | 1130 | void KernelCore::RegisterHostThread(KThread* existing_thread) { |
| 1037 | impl->RegisterHostThread(); | 1131 | impl->RegisterHostThread(existing_thread); |
| 1132 | |||
| 1133 | if (existing_thread != nullptr) { | ||
| 1134 | ASSERT(GetCurrentEmuThread() == existing_thread); | ||
| 1135 | } | ||
| 1038 | } | 1136 | } |
| 1039 | 1137 | ||
| 1040 | u32 KernelCore::GetCurrentHostThreadID() const { | 1138 | u32 KernelCore::GetCurrentHostThreadID() const { |
| @@ -1057,12 +1155,12 @@ const KMemoryManager& KernelCore::MemoryManager() const { | |||
| 1057 | return *impl->memory_manager; | 1155 | return *impl->memory_manager; |
| 1058 | } | 1156 | } |
| 1059 | 1157 | ||
| 1060 | KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() { | 1158 | KSystemResource& KernelCore::GetSystemSystemResource() { |
| 1061 | return *impl->app_memory_block_manager; | 1159 | return *impl->sys_system_resource; |
| 1062 | } | 1160 | } |
| 1063 | 1161 | ||
| 1064 | const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const { | 1162 | const KSystemResource& KernelCore::GetSystemSystemResource() const { |
| 1065 | return *impl->app_memory_block_manager; | 1163 | return *impl->sys_system_resource; |
| 1066 | } | 1164 | } |
| 1067 | 1165 | ||
| 1068 | Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { | 1166 | Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { |
| @@ -1109,16 +1207,28 @@ void KernelCore::Suspend(bool suspended) { | |||
| 1109 | const bool should_suspend{exception_exited || suspended}; | 1207 | const bool should_suspend{exception_exited || suspended}; |
| 1110 | const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable; | 1208 | const auto activity = should_suspend ? ProcessActivity::Paused : ProcessActivity::Runnable; |
| 1111 | 1209 | ||
| 1112 | for (auto* process : GetProcessList()) { | 1210 | std::vector<KScopedAutoObject<KThread>> process_threads; |
| 1113 | process->SetActivity(activity); | 1211 | { |
| 1212 | KScopedSchedulerLock sl{*this}; | ||
| 1213 | |||
| 1214 | if (auto* process = CurrentProcess(); process != nullptr) { | ||
| 1215 | process->SetActivity(activity); | ||
| 1216 | |||
| 1217 | if (!should_suspend) { | ||
| 1218 | // Runnable now; no need to wait. | ||
| 1219 | return; | ||
| 1220 | } | ||
| 1114 | 1221 | ||
| 1115 | if (should_suspend) { | ||
| 1116 | // Wait for execution to stop | ||
| 1117 | for (auto* thread : process->GetThreadList()) { | 1222 | for (auto* thread : process->GetThreadList()) { |
| 1118 | thread->WaitUntilSuspended(); | 1223 | process_threads.emplace_back(thread); |
| 1119 | } | 1224 | } |
| 1120 | } | 1225 | } |
| 1121 | } | 1226 | } |
| 1227 | |||
| 1228 | // Wait for execution to stop. | ||
| 1229 | for (auto& thread : process_threads) { | ||
| 1230 | thread->WaitUntilSuspended(); | ||
| 1231 | } | ||
| 1122 | } | 1232 | } |
| 1123 | 1233 | ||
| 1124 | void KernelCore::ShutdownCores() { | 1234 | void KernelCore::ShutdownCores() { |
| @@ -1150,15 +1260,15 @@ void KernelCore::ExitSVCProfile() { | |||
| 1150 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]); | 1260 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]); |
| 1151 | } | 1261 | } |
| 1152 | 1262 | ||
| 1153 | std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { | 1263 | Kernel::ServiceThread& KernelCore::CreateServiceThread(const std::string& name) { |
| 1154 | return impl->CreateServiceThread(*this, name); | 1264 | return impl->CreateServiceThread(*this, name); |
| 1155 | } | 1265 | } |
| 1156 | 1266 | ||
| 1157 | std::weak_ptr<Kernel::ServiceThread> KernelCore::GetDefaultServiceThread() const { | 1267 | Kernel::ServiceThread& KernelCore::GetDefaultServiceThread() const { |
| 1158 | return impl->default_service_thread; | 1268 | return *impl->default_service_thread; |
| 1159 | } | 1269 | } |
| 1160 | 1270 | ||
| 1161 | void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) { | 1271 | void KernelCore::ReleaseServiceThread(Kernel::ServiceThread& service_thread) { |
| 1162 | impl->ReleaseServiceThread(service_thread); | 1272 | impl->ReleaseServiceThread(service_thread); |
| 1163 | } | 1273 | } |
| 1164 | 1274 | ||
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 4ae6b3923..2e22fe0f6 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h | |||
| @@ -34,13 +34,16 @@ class KClientPort; | |||
| 34 | class GlobalSchedulerContext; | 34 | class GlobalSchedulerContext; |
| 35 | class KAutoObjectWithListContainer; | 35 | class KAutoObjectWithListContainer; |
| 36 | class KClientSession; | 36 | class KClientSession; |
| 37 | class KDebug; | ||
| 38 | class KDynamicPageManager; | ||
| 37 | class KEvent; | 39 | class KEvent; |
| 40 | class KEventInfo; | ||
| 38 | class KHandleTable; | 41 | class KHandleTable; |
| 39 | class KLinkedListNode; | 42 | class KLinkedListNode; |
| 40 | class KMemoryBlockSlabManager; | ||
| 41 | class KMemoryLayout; | 43 | class KMemoryLayout; |
| 42 | class KMemoryManager; | 44 | class KMemoryManager; |
| 43 | class KPageBuffer; | 45 | class KPageBuffer; |
| 46 | class KPageBufferSlabHeap; | ||
| 44 | class KPort; | 47 | class KPort; |
| 45 | class KProcess; | 48 | class KProcess; |
| 46 | class KResourceLimit; | 49 | class KResourceLimit; |
| @@ -51,6 +54,7 @@ class KSession; | |||
| 51 | class KSessionRequest; | 54 | class KSessionRequest; |
| 52 | class KSharedMemory; | 55 | class KSharedMemory; |
| 53 | class KSharedMemoryInfo; | 56 | class KSharedMemoryInfo; |
| 57 | class KSecureSystemResource; | ||
| 54 | class KThread; | 58 | class KThread; |
| 55 | class KThreadLocalPage; | 59 | class KThreadLocalPage; |
| 56 | class KTransferMemory; | 60 | class KTransferMemory; |
| @@ -236,7 +240,7 @@ public: | |||
| 236 | void RegisterCoreThread(std::size_t core_id); | 240 | void RegisterCoreThread(std::size_t core_id); |
| 237 | 241 | ||
| 238 | /// Register the current thread as a non CPU core thread. | 242 | /// Register the current thread as a non CPU core thread. |
| 239 | void RegisterHostThread(); | 243 | void RegisterHostThread(KThread* existing_thread = nullptr); |
| 240 | 244 | ||
| 241 | /// Gets the virtual memory manager for the kernel. | 245 | /// Gets the virtual memory manager for the kernel. |
| 242 | KMemoryManager& MemoryManager(); | 246 | KMemoryManager& MemoryManager(); |
| @@ -244,11 +248,11 @@ public: | |||
| 244 | /// Gets the virtual memory manager for the kernel. | 248 | /// Gets the virtual memory manager for the kernel. |
| 245 | const KMemoryManager& MemoryManager() const; | 249 | const KMemoryManager& MemoryManager() const; |
| 246 | 250 | ||
| 247 | /// Gets the application memory block manager for the kernel. | 251 | /// Gets the system resource manager. |
| 248 | KMemoryBlockSlabManager& GetApplicationMemoryBlockManager(); | 252 | KSystemResource& GetSystemSystemResource(); |
| 249 | 253 | ||
| 250 | /// Gets the application memory block manager for the kernel. | 254 | /// Gets the system resource manager. |
| 251 | const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const; | 255 | const KSystemResource& GetSystemSystemResource() const; |
| 252 | 256 | ||
| 253 | /// Gets the shared memory object for HID services. | 257 | /// Gets the shared memory object for HID services. |
| 254 | Kernel::KSharedMemory& GetHidSharedMem(); | 258 | Kernel::KSharedMemory& GetHidSharedMem(); |
| @@ -305,24 +309,24 @@ public: | |||
| 305 | * See GetDefaultServiceThread. | 309 | * See GetDefaultServiceThread. |
| 306 | * @param name String name for the ServerSession creating this thread, used for debug | 310 | * @param name String name for the ServerSession creating this thread, used for debug |
| 307 | * purposes. | 311 | * purposes. |
| 308 | * @returns The a weak pointer newly created service thread. | 312 | * @returns A reference to the newly created service thread. |
| 309 | */ | 313 | */ |
| 310 | std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name); | 314 | Kernel::ServiceThread& CreateServiceThread(const std::string& name); |
| 311 | 315 | ||
| 312 | /** | 316 | /** |
| 313 | * Gets the default host service thread, which executes HLE service requests. Unless service | 317 | * Gets the default host service thread, which executes HLE service requests. Unless service |
| 314 | * requests need to block on the host, the default service thread should be used in favor of | 318 | * requests need to block on the host, the default service thread should be used in favor of |
| 315 | * creating a new service thread. | 319 | * creating a new service thread. |
| 316 | * @returns The a weak pointer for the default service thread. | 320 | * @returns A reference to the default service thread. |
| 317 | */ | 321 | */ |
| 318 | std::weak_ptr<Kernel::ServiceThread> GetDefaultServiceThread() const; | 322 | Kernel::ServiceThread& GetDefaultServiceThread() const; |
| 319 | 323 | ||
| 320 | /** | 324 | /** |
| 321 | * Releases a HLE service thread, instructing KernelCore to free it. This should be called when | 325 | * Releases a HLE service thread, instructing KernelCore to free it. This should be called when |
| 322 | * the ServerSession associated with the thread is destroyed. | 326 | * the ServerSession associated with the thread is destroyed. |
| 323 | * @param service_thread Service thread to release. | 327 | * @param service_thread Service thread to release. |
| 324 | */ | 328 | */ |
| 325 | void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread); | 329 | void ReleaseServiceThread(Kernel::ServiceThread& service_thread); |
| 326 | 330 | ||
| 327 | /// Workaround for single-core mode when preempting threads while idle. | 331 | /// Workaround for single-core mode when preempting threads while idle. |
| 328 | bool IsPhantomModeForSingleCore() const; | 332 | bool IsPhantomModeForSingleCore() const; |
| @@ -364,6 +368,12 @@ public: | |||
| 364 | return slab_heap_container->thread_local_page; | 368 | return slab_heap_container->thread_local_page; |
| 365 | } else if constexpr (std::is_same_v<T, KSessionRequest>) { | 369 | } else if constexpr (std::is_same_v<T, KSessionRequest>) { |
| 366 | return slab_heap_container->session_request; | 370 | return slab_heap_container->session_request; |
| 371 | } else if constexpr (std::is_same_v<T, KSecureSystemResource>) { | ||
| 372 | return slab_heap_container->secure_system_resource; | ||
| 373 | } else if constexpr (std::is_same_v<T, KEventInfo>) { | ||
| 374 | return slab_heap_container->event_info; | ||
| 375 | } else if constexpr (std::is_same_v<T, KDebug>) { | ||
| 376 | return slab_heap_container->debug; | ||
| 367 | } | 377 | } |
| 368 | } | 378 | } |
| 369 | 379 | ||
| @@ -427,6 +437,9 @@ private: | |||
| 427 | KSlabHeap<KPageBuffer> page_buffer; | 437 | KSlabHeap<KPageBuffer> page_buffer; |
| 428 | KSlabHeap<KThreadLocalPage> thread_local_page; | 438 | KSlabHeap<KThreadLocalPage> thread_local_page; |
| 429 | KSlabHeap<KSessionRequest> session_request; | 439 | KSlabHeap<KSessionRequest> session_request; |
| 440 | KSlabHeap<KSecureSystemResource> secure_system_resource; | ||
| 441 | KSlabHeap<KEventInfo> event_info; | ||
| 442 | KSlabHeap<KDebug> debug; | ||
| 430 | }; | 443 | }; |
| 431 | 444 | ||
| 432 | std::unique_ptr<SlabHeapContainer> slab_heap_container; | 445 | std::unique_ptr<SlabHeapContainer> slab_heap_container; |
diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp index c8fe42537..f5c2ab23f 100644 --- a/src/core/hle/kernel/service_thread.cpp +++ b/src/core/hle/kernel/service_thread.cpp | |||
| @@ -36,11 +36,12 @@ public: | |||
| 36 | private: | 36 | private: |
| 37 | KernelCore& kernel; | 37 | KernelCore& kernel; |
| 38 | 38 | ||
| 39 | std::jthread m_thread; | 39 | std::jthread m_host_thread; |
| 40 | std::mutex m_session_mutex; | 40 | std::mutex m_session_mutex; |
| 41 | std::map<KServerSession*, std::shared_ptr<SessionRequestManager>> m_sessions; | 41 | std::map<KServerSession*, std::shared_ptr<SessionRequestManager>> m_sessions; |
| 42 | KEvent* m_wakeup_event; | 42 | KEvent* m_wakeup_event; |
| 43 | KProcess* m_process; | 43 | KProcess* m_process; |
| 44 | KThread* m_thread; | ||
| 44 | std::atomic<bool> m_shutdown_requested; | 45 | std::atomic<bool> m_shutdown_requested; |
| 45 | const std::string m_service_name; | 46 | const std::string m_service_name; |
| 46 | }; | 47 | }; |
| @@ -132,7 +133,7 @@ void ServiceThread::Impl::SessionClosed(KServerSession* server_session, | |||
| 132 | void ServiceThread::Impl::LoopProcess() { | 133 | void ServiceThread::Impl::LoopProcess() { |
| 133 | Common::SetCurrentThreadName(m_service_name.c_str()); | 134 | Common::SetCurrentThreadName(m_service_name.c_str()); |
| 134 | 135 | ||
| 135 | kernel.RegisterHostThread(); | 136 | kernel.RegisterHostThread(m_thread); |
| 136 | 137 | ||
| 137 | while (!m_shutdown_requested.load()) { | 138 | while (!m_shutdown_requested.load()) { |
| 138 | WaitAndProcessImpl(); | 139 | WaitAndProcessImpl(); |
| @@ -160,7 +161,7 @@ ServiceThread::Impl::~Impl() { | |||
| 160 | // Shut down the processing thread. | 161 | // Shut down the processing thread. |
| 161 | m_shutdown_requested.store(true); | 162 | m_shutdown_requested.store(true); |
| 162 | m_wakeup_event->Signal(); | 163 | m_wakeup_event->Signal(); |
| 163 | m_thread.join(); | 164 | m_host_thread.join(); |
| 164 | 165 | ||
| 165 | // Lock mutex. | 166 | // Lock mutex. |
| 166 | m_session_mutex.lock(); | 167 | m_session_mutex.lock(); |
| @@ -177,6 +178,9 @@ ServiceThread::Impl::~Impl() { | |||
| 177 | m_wakeup_event->GetReadableEvent().Close(); | 178 | m_wakeup_event->GetReadableEvent().Close(); |
| 178 | m_wakeup_event->Close(); | 179 | m_wakeup_event->Close(); |
| 179 | 180 | ||
| 181 | // Close thread. | ||
| 182 | m_thread->Close(); | ||
| 183 | |||
| 180 | // Close process. | 184 | // Close process. |
| 181 | m_process->Close(); | 185 | m_process->Close(); |
| 182 | } | 186 | } |
| @@ -189,7 +193,7 @@ ServiceThread::Impl::Impl(KernelCore& kernel_, const std::string& service_name) | |||
| 189 | KProcess::ProcessType::KernelInternal, kernel.GetSystemResourceLimit()); | 193 | KProcess::ProcessType::KernelInternal, kernel.GetSystemResourceLimit()); |
| 190 | 194 | ||
| 191 | // Reserve a new event from the process resource limit | 195 | // Reserve a new event from the process resource limit |
| 192 | KScopedResourceReservation event_reservation(m_process, LimitableResource::Events); | 196 | KScopedResourceReservation event_reservation(m_process, LimitableResource::EventCountMax); |
| 193 | ASSERT(event_reservation.Succeeded()); | 197 | ASSERT(event_reservation.Succeeded()); |
| 194 | 198 | ||
| 195 | // Initialize event. | 199 | // Initialize event. |
| @@ -199,11 +203,19 @@ ServiceThread::Impl::Impl(KernelCore& kernel_, const std::string& service_name) | |||
| 199 | // Commit the event reservation. | 203 | // Commit the event reservation. |
| 200 | event_reservation.Commit(); | 204 | event_reservation.Commit(); |
| 201 | 205 | ||
| 202 | // Register the event. | 206 | // Reserve a new thread from the process resource limit |
| 203 | KEvent::Register(kernel, m_wakeup_event); | 207 | KScopedResourceReservation thread_reservation(m_process, LimitableResource::ThreadCountMax); |
| 208 | ASSERT(thread_reservation.Succeeded()); | ||
| 209 | |||
| 210 | // Initialize thread. | ||
| 211 | m_thread = KThread::Create(kernel); | ||
| 212 | ASSERT(KThread::InitializeDummyThread(m_thread, m_process).IsSuccess()); | ||
| 213 | |||
| 214 | // Commit the thread reservation. | ||
| 215 | thread_reservation.Commit(); | ||
| 204 | 216 | ||
| 205 | // Start thread. | 217 | // Start thread. |
| 206 | m_thread = std::jthread([this] { LoopProcess(); }); | 218 | m_host_thread = std::jthread([this] { LoopProcess(); }); |
| 207 | } | 219 | } |
| 208 | 220 | ||
| 209 | ServiceThread::ServiceThread(KernelCore& kernel, const std::string& name) | 221 | ServiceThread::ServiceThread(KernelCore& kernel, const std::string& name) |
diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h index 06b51e919..0228ce188 100644 --- a/src/core/hle/kernel/slab_helpers.h +++ b/src/core/hle/kernel/slab_helpers.h | |||
| @@ -53,6 +53,84 @@ public: | |||
| 53 | }; | 53 | }; |
| 54 | 54 | ||
| 55 | template <typename Derived, typename Base> | 55 | template <typename Derived, typename Base> |
| 56 | class KAutoObjectWithSlabHeap : public Base { | ||
| 57 | static_assert(std::is_base_of<KAutoObject, Base>::value); | ||
| 58 | |||
| 59 | private: | ||
| 60 | static Derived* Allocate(KernelCore& kernel) { | ||
| 61 | return kernel.SlabHeap<Derived>().Allocate(kernel); | ||
| 62 | } | ||
| 63 | |||
| 64 | static void Free(KernelCore& kernel, Derived* obj) { | ||
| 65 | kernel.SlabHeap<Derived>().Free(obj); | ||
| 66 | } | ||
| 67 | |||
| 68 | public: | ||
| 69 | explicit KAutoObjectWithSlabHeap(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {} | ||
| 70 | virtual ~KAutoObjectWithSlabHeap() = default; | ||
| 71 | |||
| 72 | virtual void Destroy() override { | ||
| 73 | const bool is_initialized = this->IsInitialized(); | ||
| 74 | uintptr_t arg = 0; | ||
| 75 | if (is_initialized) { | ||
| 76 | arg = this->GetPostDestroyArgument(); | ||
| 77 | this->Finalize(); | ||
| 78 | } | ||
| 79 | Free(kernel, static_cast<Derived*>(this)); | ||
| 80 | if (is_initialized) { | ||
| 81 | Derived::PostDestroy(arg); | ||
| 82 | } | ||
| 83 | } | ||
| 84 | |||
| 85 | virtual bool IsInitialized() const { | ||
| 86 | return true; | ||
| 87 | } | ||
| 88 | virtual uintptr_t GetPostDestroyArgument() const { | ||
| 89 | return 0; | ||
| 90 | } | ||
| 91 | |||
| 92 | size_t GetSlabIndex() const { | ||
| 93 | return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this)); | ||
| 94 | } | ||
| 95 | |||
| 96 | public: | ||
| 97 | static void InitializeSlabHeap(KernelCore& kernel, void* memory, size_t memory_size) { | ||
| 98 | kernel.SlabHeap<Derived>().Initialize(memory, memory_size); | ||
| 99 | } | ||
| 100 | |||
| 101 | static Derived* Create(KernelCore& kernel) { | ||
| 102 | Derived* obj = Allocate(kernel); | ||
| 103 | if (obj != nullptr) { | ||
| 104 | KAutoObject::Create(obj); | ||
| 105 | } | ||
| 106 | return obj; | ||
| 107 | } | ||
| 108 | |||
| 109 | static size_t GetObjectSize(KernelCore& kernel) { | ||
| 110 | return kernel.SlabHeap<Derived>().GetObjectSize(); | ||
| 111 | } | ||
| 112 | |||
| 113 | static size_t GetSlabHeapSize(KernelCore& kernel) { | ||
| 114 | return kernel.SlabHeap<Derived>().GetSlabHeapSize(); | ||
| 115 | } | ||
| 116 | |||
| 117 | static size_t GetPeakIndex(KernelCore& kernel) { | ||
| 118 | return kernel.SlabHeap<Derived>().GetPeakIndex(); | ||
| 119 | } | ||
| 120 | |||
| 121 | static uintptr_t GetSlabHeapAddress(KernelCore& kernel) { | ||
| 122 | return kernel.SlabHeap<Derived>().GetSlabHeapAddress(); | ||
| 123 | } | ||
| 124 | |||
| 125 | static size_t GetNumRemaining(KernelCore& kernel) { | ||
| 126 | return kernel.SlabHeap<Derived>().GetNumRemaining(); | ||
| 127 | } | ||
| 128 | |||
| 129 | protected: | ||
| 130 | KernelCore& kernel; | ||
| 131 | }; | ||
| 132 | |||
| 133 | template <typename Derived, typename Base> | ||
| 56 | class KAutoObjectWithSlabHeapAndContainer : public Base { | 134 | class KAutoObjectWithSlabHeapAndContainer : public Base { |
| 57 | static_assert(std::is_base_of<KAutoObjectWithList, Base>::value); | 135 | static_assert(std::is_base_of<KAutoObjectWithList, Base>::value); |
| 58 | 136 | ||
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 4c819f4b6..9962ad171 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -267,7 +267,7 @@ Result CreateSession(Core::System& system, Handle* out_server, Handle* out_clien | |||
| 267 | 267 | ||
| 268 | // Reserve a new session from the process resource limit. | 268 | // Reserve a new session from the process resource limit. |
| 269 | // FIXME: LimitableResource_SessionCountMax | 269 | // FIXME: LimitableResource_SessionCountMax |
| 270 | KScopedResourceReservation session_reservation(&process, LimitableResource::Sessions); | 270 | KScopedResourceReservation session_reservation(&process, LimitableResource::SessionCountMax); |
| 271 | if (session_reservation.Succeeded()) { | 271 | if (session_reservation.Succeeded()) { |
| 272 | session = T::Create(system.Kernel()); | 272 | session = T::Create(system.Kernel()); |
| 273 | } else { | 273 | } else { |
| @@ -298,7 +298,7 @@ Result CreateSession(Core::System& system, Handle* out_server, Handle* out_clien | |||
| 298 | 298 | ||
| 299 | // We successfully allocated a session, so add the object we allocated to the resource | 299 | // We successfully allocated a session, so add the object we allocated to the resource |
| 300 | // limit. | 300 | // limit. |
| 301 | // system.Kernel().GetSystemResourceLimit().Reserve(LimitableResource::Sessions, 1); | 301 | // system.Kernel().GetSystemResourceLimit().Reserve(LimitableResource::SessionCountMax, 1); |
| 302 | } | 302 | } |
| 303 | 303 | ||
| 304 | // Check that we successfully created a session. | 304 | // Check that we successfully created a session. |
| @@ -656,27 +656,12 @@ static Result ArbitrateUnlock32(Core::System& system, u32 address) { | |||
| 656 | return ArbitrateUnlock(system, address); | 656 | return ArbitrateUnlock(system, address); |
| 657 | } | 657 | } |
| 658 | 658 | ||
| 659 | enum class BreakType : u32 { | ||
| 660 | Panic = 0, | ||
| 661 | AssertionFailed = 1, | ||
| 662 | PreNROLoad = 3, | ||
| 663 | PostNROLoad = 4, | ||
| 664 | PreNROUnload = 5, | ||
| 665 | PostNROUnload = 6, | ||
| 666 | CppException = 7, | ||
| 667 | }; | ||
| 668 | |||
| 669 | struct BreakReason { | ||
| 670 | union { | ||
| 671 | u32 raw; | ||
| 672 | BitField<0, 30, BreakType> break_type; | ||
| 673 | BitField<31, 1, u32> signal_debugger; | ||
| 674 | }; | ||
| 675 | }; | ||
| 676 | |||
| 677 | /// Break program execution | 659 | /// Break program execution |
| 678 | static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { | 660 | static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { |
| 679 | BreakReason break_reason{reason}; | 661 | BreakReason break_reason = |
| 662 | static_cast<BreakReason>(reason & ~static_cast<u32>(BreakReason::NotificationOnlyFlag)); | ||
| 663 | bool notification_only = (reason & static_cast<u32>(BreakReason::NotificationOnlyFlag)) != 0; | ||
| 664 | |||
| 680 | bool has_dumped_buffer{}; | 665 | bool has_dumped_buffer{}; |
| 681 | std::vector<u8> debug_buffer; | 666 | std::vector<u8> debug_buffer; |
| 682 | 667 | ||
| @@ -705,57 +690,56 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { | |||
| 705 | } | 690 | } |
| 706 | has_dumped_buffer = true; | 691 | has_dumped_buffer = true; |
| 707 | }; | 692 | }; |
| 708 | switch (break_reason.break_type) { | 693 | switch (break_reason) { |
| 709 | case BreakType::Panic: | 694 | case BreakReason::Panic: |
| 710 | LOG_CRITICAL(Debug_Emulated, "Signalling debugger, PANIC! info1=0x{:016X}, info2=0x{:016X}", | 695 | LOG_CRITICAL(Debug_Emulated, "Userspace PANIC! info1=0x{:016X}, info2=0x{:016X}", info1, |
| 711 | info1, info2); | 696 | info2); |
| 712 | handle_debug_buffer(info1, info2); | 697 | handle_debug_buffer(info1, info2); |
| 713 | break; | 698 | break; |
| 714 | case BreakType::AssertionFailed: | 699 | case BreakReason::Assert: |
| 715 | LOG_CRITICAL(Debug_Emulated, | 700 | LOG_CRITICAL(Debug_Emulated, "Userspace Assertion failed! info1=0x{:016X}, info2=0x{:016X}", |
| 716 | "Signalling debugger, Assertion failed! info1=0x{:016X}, info2=0x{:016X}", | ||
| 717 | info1, info2); | 701 | info1, info2); |
| 718 | handle_debug_buffer(info1, info2); | 702 | handle_debug_buffer(info1, info2); |
| 719 | break; | 703 | break; |
| 720 | case BreakType::PreNROLoad: | 704 | case BreakReason::User: |
| 721 | LOG_WARNING( | 705 | LOG_WARNING(Debug_Emulated, "Userspace Break! 0x{:016X} with size 0x{:016X}", info1, info2); |
| 722 | Debug_Emulated, | 706 | handle_debug_buffer(info1, info2); |
| 723 | "Signalling debugger, Attempting to load an NRO at 0x{:016X} with size 0x{:016X}", | ||
| 724 | info1, info2); | ||
| 725 | break; | 707 | break; |
| 726 | case BreakType::PostNROLoad: | 708 | case BreakReason::PreLoadDll: |
| 727 | LOG_WARNING(Debug_Emulated, | 709 | LOG_INFO(Debug_Emulated, |
| 728 | "Signalling debugger, Loaded an NRO at 0x{:016X} with size 0x{:016X}", info1, | 710 | "Userspace Attempting to load an NRO at 0x{:016X} with size 0x{:016X}", info1, |
| 729 | info2); | 711 | info2); |
| 730 | break; | 712 | break; |
| 731 | case BreakType::PreNROUnload: | 713 | case BreakReason::PostLoadDll: |
| 732 | LOG_WARNING( | 714 | LOG_INFO(Debug_Emulated, "Userspace Loaded an NRO at 0x{:016X} with size 0x{:016X}", info1, |
| 733 | Debug_Emulated, | 715 | info2); |
| 734 | "Signalling debugger, Attempting to unload an NRO at 0x{:016X} with size 0x{:016X}", | ||
| 735 | info1, info2); | ||
| 736 | break; | 716 | break; |
| 737 | case BreakType::PostNROUnload: | 717 | case BreakReason::PreUnloadDll: |
| 738 | LOG_WARNING(Debug_Emulated, | 718 | LOG_INFO(Debug_Emulated, |
| 739 | "Signalling debugger, Unloaded an NRO at 0x{:016X} with size 0x{:016X}", info1, | 719 | "Userspace Attempting to unload an NRO at 0x{:016X} with size 0x{:016X}", info1, |
| 740 | info2); | 720 | info2); |
| 741 | break; | 721 | break; |
| 742 | case BreakType::CppException: | 722 | case BreakReason::PostUnloadDll: |
| 723 | LOG_INFO(Debug_Emulated, "Userspace Unloaded an NRO at 0x{:016X} with size 0x{:016X}", | ||
| 724 | info1, info2); | ||
| 725 | break; | ||
| 726 | case BreakReason::CppException: | ||
| 743 | LOG_CRITICAL(Debug_Emulated, "Signalling debugger. Uncaught C++ exception encountered."); | 727 | LOG_CRITICAL(Debug_Emulated, "Signalling debugger. Uncaught C++ exception encountered."); |
| 744 | break; | 728 | break; |
| 745 | default: | 729 | default: |
| 746 | LOG_WARNING( | 730 | LOG_WARNING( |
| 747 | Debug_Emulated, | 731 | Debug_Emulated, |
| 748 | "Signalling debugger, Unknown break reason {}, info1=0x{:016X}, info2=0x{:016X}", | 732 | "Signalling debugger, Unknown break reason {:#X}, info1=0x{:016X}, info2=0x{:016X}", |
| 749 | static_cast<u32>(break_reason.break_type.Value()), info1, info2); | 733 | reason, info1, info2); |
| 750 | handle_debug_buffer(info1, info2); | 734 | handle_debug_buffer(info1, info2); |
| 751 | break; | 735 | break; |
| 752 | } | 736 | } |
| 753 | 737 | ||
| 754 | system.GetReporter().SaveSvcBreakReport( | 738 | system.GetReporter().SaveSvcBreakReport(reason, notification_only, info1, info2, |
| 755 | static_cast<u32>(break_reason.break_type.Value()), break_reason.signal_debugger.As<bool>(), | 739 | has_dumped_buffer ? std::make_optional(debug_buffer) |
| 756 | info1, info2, has_dumped_buffer ? std::make_optional(debug_buffer) : std::nullopt); | 740 | : std::nullopt); |
| 757 | 741 | ||
| 758 | if (!break_reason.signal_debugger) { | 742 | if (!notification_only) { |
| 759 | LOG_CRITICAL( | 743 | LOG_CRITICAL( |
| 760 | Debug_Emulated, | 744 | Debug_Emulated, |
| 761 | "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}", | 745 | "Emulated program broke execution! reason=0x{:016X}, info1=0x{:016X}, info2=0x{:016X}", |
| @@ -1716,13 +1700,13 @@ static Result QueryProcessMemory(Core::System& system, VAddr memory_info_address | |||
| 1716 | auto& memory{system.Memory()}; | 1700 | auto& memory{system.Memory()}; |
| 1717 | const auto memory_info{process->PageTable().QueryInfo(address).GetSvcMemoryInfo()}; | 1701 | const auto memory_info{process->PageTable().QueryInfo(address).GetSvcMemoryInfo()}; |
| 1718 | 1702 | ||
| 1719 | memory.Write64(memory_info_address + 0x00, memory_info.addr); | 1703 | memory.Write64(memory_info_address + 0x00, memory_info.base_address); |
| 1720 | memory.Write64(memory_info_address + 0x08, memory_info.size); | 1704 | memory.Write64(memory_info_address + 0x08, memory_info.size); |
| 1721 | memory.Write32(memory_info_address + 0x10, static_cast<u32>(memory_info.state) & 0xff); | 1705 | memory.Write32(memory_info_address + 0x10, static_cast<u32>(memory_info.state) & 0xff); |
| 1722 | memory.Write32(memory_info_address + 0x14, static_cast<u32>(memory_info.attr)); | 1706 | memory.Write32(memory_info_address + 0x14, static_cast<u32>(memory_info.attribute)); |
| 1723 | memory.Write32(memory_info_address + 0x18, static_cast<u32>(memory_info.perm)); | 1707 | memory.Write32(memory_info_address + 0x18, static_cast<u32>(memory_info.permission)); |
| 1724 | memory.Write32(memory_info_address + 0x1c, memory_info.ipc_refcount); | 1708 | memory.Write32(memory_info_address + 0x1c, memory_info.ipc_count); |
| 1725 | memory.Write32(memory_info_address + 0x20, memory_info.device_refcount); | 1709 | memory.Write32(memory_info_address + 0x20, memory_info.device_count); |
| 1726 | memory.Write32(memory_info_address + 0x24, 0); | 1710 | memory.Write32(memory_info_address + 0x24, 0); |
| 1727 | 1711 | ||
| 1728 | // Page info appears to be currently unused by the kernel and is always set to zero. | 1712 | // Page info appears to be currently unused by the kernel and is always set to zero. |
| @@ -1943,7 +1927,7 @@ static Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry | |||
| 1943 | 1927 | ||
| 1944 | // Reserve a new thread from the process resource limit (waiting up to 100ms). | 1928 | // Reserve a new thread from the process resource limit (waiting up to 100ms). |
| 1945 | KScopedResourceReservation thread_reservation( | 1929 | KScopedResourceReservation thread_reservation( |
| 1946 | kernel.CurrentProcess(), LimitableResource::Threads, 1, | 1930 | kernel.CurrentProcess(), LimitableResource::ThreadCountMax, 1, |
| 1947 | system.CoreTiming().GetGlobalTimeNs().count() + 100000000); | 1931 | system.CoreTiming().GetGlobalTimeNs().count() + 100000000); |
| 1948 | if (!thread_reservation.Succeeded()) { | 1932 | if (!thread_reservation.Succeeded()) { |
| 1949 | LOG_ERROR(Kernel_SVC, "Could not reserve a new thread"); | 1933 | LOG_ERROR(Kernel_SVC, "Could not reserve a new thread"); |
| @@ -2247,7 +2231,7 @@ static u64 GetSystemTick(Core::System& system) { | |||
| 2247 | auto& core_timing = system.CoreTiming(); | 2231 | auto& core_timing = system.CoreTiming(); |
| 2248 | 2232 | ||
| 2249 | // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick) | 2233 | // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick) |
| 2250 | const u64 result{system.CoreTiming().GetClockTicks()}; | 2234 | const u64 result{core_timing.GetClockTicks()}; |
| 2251 | 2235 | ||
| 2252 | if (!system.Kernel().IsMulticore()) { | 2236 | if (!system.Kernel().IsMulticore()) { |
| 2253 | core_timing.AddTicks(400U); | 2237 | core_timing.AddTicks(400U); |
| @@ -2344,7 +2328,7 @@ static Result CreateTransferMemory(Core::System& system, Handle* out, VAddr addr | |||
| 2344 | 2328 | ||
| 2345 | // Reserve a new transfer memory from the process resource limit. | 2329 | // Reserve a new transfer memory from the process resource limit. |
| 2346 | KScopedResourceReservation trmem_reservation(kernel.CurrentProcess(), | 2330 | KScopedResourceReservation trmem_reservation(kernel.CurrentProcess(), |
| 2347 | LimitableResource::TransferMemory); | 2331 | LimitableResource::TransferMemoryCountMax); |
| 2348 | R_UNLESS(trmem_reservation.Succeeded(), ResultLimitReached); | 2332 | R_UNLESS(trmem_reservation.Succeeded(), ResultLimitReached); |
| 2349 | 2333 | ||
| 2350 | // Create the transfer memory. | 2334 | // Create the transfer memory. |
| @@ -2496,7 +2480,7 @@ static Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_r | |||
| 2496 | 2480 | ||
| 2497 | // Reserve a new event from the process resource limit | 2481 | // Reserve a new event from the process resource limit |
| 2498 | KScopedResourceReservation event_reservation(kernel.CurrentProcess(), | 2482 | KScopedResourceReservation event_reservation(kernel.CurrentProcess(), |
| 2499 | LimitableResource::Events); | 2483 | LimitableResource::EventCountMax); |
| 2500 | R_UNLESS(event_reservation.Succeeded(), ResultLimitReached); | 2484 | R_UNLESS(event_reservation.Succeeded(), ResultLimitReached); |
| 2501 | 2485 | ||
| 2502 | // Create a new event. | 2486 | // Create a new event. |
| @@ -2539,11 +2523,6 @@ static Result CreateEvent32(Core::System& system, Handle* out_write, Handle* out | |||
| 2539 | static Result GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) { | 2523 | static Result GetProcessInfo(Core::System& system, u64* out, Handle process_handle, u32 type) { |
| 2540 | LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type); | 2524 | LOG_DEBUG(Kernel_SVC, "called, handle=0x{:08X}, type=0x{:X}", process_handle, type); |
| 2541 | 2525 | ||
| 2542 | // This function currently only allows retrieving a process' status. | ||
| 2543 | enum class InfoType { | ||
| 2544 | Status, | ||
| 2545 | }; | ||
| 2546 | |||
| 2547 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); | 2526 | const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); |
| 2548 | KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle); | 2527 | KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle); |
| 2549 | if (process.IsNull()) { | 2528 | if (process.IsNull()) { |
| @@ -2552,9 +2531,9 @@ static Result GetProcessInfo(Core::System& system, u64* out, Handle process_hand | |||
| 2552 | return ResultInvalidHandle; | 2531 | return ResultInvalidHandle; |
| 2553 | } | 2532 | } |
| 2554 | 2533 | ||
| 2555 | const auto info_type = static_cast<InfoType>(type); | 2534 | const auto info_type = static_cast<ProcessInfoType>(type); |
| 2556 | if (info_type != InfoType::Status) { | 2535 | if (info_type != ProcessInfoType::ProcessState) { |
| 2557 | LOG_ERROR(Kernel_SVC, "Expected info_type to be Status but got {} instead", type); | 2536 | LOG_ERROR(Kernel_SVC, "Expected info_type to be ProcessState but got {} instead", type); |
| 2558 | return ResultInvalidEnumValue; | 2537 | return ResultInvalidEnumValue; |
| 2559 | } | 2538 | } |
| 2560 | 2539 | ||
diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h index f27cade33..b7ca53085 100644 --- a/src/core/hle/kernel/svc_results.h +++ b/src/core/hle/kernel/svc_results.h | |||
| @@ -37,6 +37,7 @@ constexpr Result ResultInvalidState{ErrorModule::Kernel, 125}; | |||
| 37 | constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126}; | 37 | constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126}; |
| 38 | constexpr Result ResultPortClosed{ErrorModule::Kernel, 131}; | 38 | constexpr Result ResultPortClosed{ErrorModule::Kernel, 131}; |
| 39 | constexpr Result ResultLimitReached{ErrorModule::Kernel, 132}; | 39 | constexpr Result ResultLimitReached{ErrorModule::Kernel, 132}; |
| 40 | constexpr Result ResultOutOfAddressSpace{ErrorModule::Kernel, 259}; | ||
| 40 | constexpr Result ResultInvalidId{ErrorModule::Kernel, 519}; | 41 | constexpr Result ResultInvalidId{ErrorModule::Kernel, 519}; |
| 41 | 42 | ||
| 42 | } // namespace Kernel | 43 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h index abb9847fe..33eebcef6 100644 --- a/src/core/hle/kernel/svc_types.h +++ b/src/core/hle/kernel/svc_types.h | |||
| @@ -8,6 +8,8 @@ | |||
| 8 | 8 | ||
| 9 | namespace Kernel::Svc { | 9 | namespace Kernel::Svc { |
| 10 | 10 | ||
| 11 | using Handle = u32; | ||
| 12 | |||
| 11 | enum class MemoryState : u32 { | 13 | enum class MemoryState : u32 { |
| 12 | Free = 0x00, | 14 | Free = 0x00, |
| 13 | Io = 0x01, | 15 | Io = 0x01, |
| @@ -22,8 +24,8 @@ enum class MemoryState : u32 { | |||
| 22 | Ipc = 0x0A, | 24 | Ipc = 0x0A, |
| 23 | Stack = 0x0B, | 25 | Stack = 0x0B, |
| 24 | ThreadLocal = 0x0C, | 26 | ThreadLocal = 0x0C, |
| 25 | Transferred = 0x0D, | 27 | Transfered = 0x0D, |
| 26 | SharedTransferred = 0x0E, | 28 | SharedTransfered = 0x0E, |
| 27 | SharedCode = 0x0F, | 29 | SharedCode = 0x0F, |
| 28 | Inaccessible = 0x10, | 30 | Inaccessible = 0x10, |
| 29 | NonSecureIpc = 0x11, | 31 | NonSecureIpc = 0x11, |
| @@ -32,6 +34,7 @@ enum class MemoryState : u32 { | |||
| 32 | GeneratedCode = 0x14, | 34 | GeneratedCode = 0x14, |
| 33 | CodeOut = 0x15, | 35 | CodeOut = 0x15, |
| 34 | Coverage = 0x16, | 36 | Coverage = 0x16, |
| 37 | Insecure = 0x17, | ||
| 35 | }; | 38 | }; |
| 36 | DECLARE_ENUM_FLAG_OPERATORS(MemoryState); | 39 | DECLARE_ENUM_FLAG_OPERATORS(MemoryState); |
| 37 | 40 | ||
| @@ -54,17 +57,6 @@ enum class MemoryPermission : u32 { | |||
| 54 | }; | 57 | }; |
| 55 | DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission); | 58 | DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission); |
| 56 | 59 | ||
| 57 | struct MemoryInfo { | ||
| 58 | u64 addr{}; | ||
| 59 | u64 size{}; | ||
| 60 | MemoryState state{}; | ||
| 61 | MemoryAttribute attr{}; | ||
| 62 | MemoryPermission perm{}; | ||
| 63 | u32 ipc_refcount{}; | ||
| 64 | u32 device_refcount{}; | ||
| 65 | u32 padding{}; | ||
| 66 | }; | ||
| 67 | |||
| 68 | enum class SignalType : u32 { | 60 | enum class SignalType : u32 { |
| 69 | Signal = 0, | 61 | Signal = 0, |
| 70 | SignalAndIncrementIfEqual = 1, | 62 | SignalAndIncrementIfEqual = 1, |
| @@ -83,6 +75,13 @@ enum class YieldType : s64 { | |||
| 83 | ToAnyThread = -2, | 75 | ToAnyThread = -2, |
| 84 | }; | 76 | }; |
| 85 | 77 | ||
| 78 | enum class ThreadExitReason : u32 { | ||
| 79 | ExitThread = 0, | ||
| 80 | TerminateThread = 1, | ||
| 81 | ExitProcess = 2, | ||
| 82 | TerminateProcess = 3, | ||
| 83 | }; | ||
| 84 | |||
| 86 | enum class ThreadActivity : u32 { | 85 | enum class ThreadActivity : u32 { |
| 87 | Runnable = 0, | 86 | Runnable = 0, |
| 88 | Paused = 1, | 87 | Paused = 1, |
| @@ -108,6 +107,489 @@ enum class ProcessState : u32 { | |||
| 108 | DebugBreak = 7, | 107 | DebugBreak = 7, |
| 109 | }; | 108 | }; |
| 110 | 109 | ||
| 110 | enum class ProcessExitReason : u32 { | ||
| 111 | ExitProcess = 0, | ||
| 112 | TerminateProcess = 1, | ||
| 113 | Exception = 2, | ||
| 114 | }; | ||
| 115 | |||
| 111 | constexpr inline size_t ThreadLocalRegionSize = 0x200; | 116 | constexpr inline size_t ThreadLocalRegionSize = 0x200; |
| 112 | 117 | ||
| 118 | struct PageInfo { | ||
| 119 | u32 flags; | ||
| 120 | }; | ||
| 121 | |||
| 122 | // Info Types. | ||
| 123 | enum class InfoType : u32 { | ||
| 124 | CoreMask = 0, | ||
| 125 | PriorityMask = 1, | ||
| 126 | AliasRegionAddress = 2, | ||
| 127 | AliasRegionSize = 3, | ||
| 128 | HeapRegionAddress = 4, | ||
| 129 | HeapRegionSize = 5, | ||
| 130 | TotalMemorySize = 6, | ||
| 131 | UsedMemorySize = 7, | ||
| 132 | DebuggerAttached = 8, | ||
| 133 | ResourceLimit = 9, | ||
| 134 | IdleTickCount = 10, | ||
| 135 | RandomEntropy = 11, | ||
| 136 | AslrRegionAddress = 12, | ||
| 137 | AslrRegionSize = 13, | ||
| 138 | StackRegionAddress = 14, | ||
| 139 | StackRegionSize = 15, | ||
| 140 | SystemResourceSizeTotal = 16, | ||
| 141 | SystemResourceSizeUsed = 17, | ||
| 142 | ProgramId = 18, | ||
| 143 | InitialProcessIdRange = 19, | ||
| 144 | UserExceptionContextAddress = 20, | ||
| 145 | TotalNonSystemMemorySize = 21, | ||
| 146 | UsedNonSystemMemorySize = 22, | ||
| 147 | IsApplication = 23, | ||
| 148 | FreeThreadCount = 24, | ||
| 149 | ThreadTickCount = 25, | ||
| 150 | IsSvcPermitted = 26, | ||
| 151 | |||
| 152 | MesosphereMeta = 65000, | ||
| 153 | MesosphereCurrentProcess = 65001, | ||
| 154 | }; | ||
| 155 | |||
| 156 | enum class BreakReason : u32 { | ||
| 157 | Panic = 0, | ||
| 158 | Assert = 1, | ||
| 159 | User = 2, | ||
| 160 | PreLoadDll = 3, | ||
| 161 | PostLoadDll = 4, | ||
| 162 | PreUnloadDll = 5, | ||
| 163 | PostUnloadDll = 6, | ||
| 164 | CppException = 7, | ||
| 165 | |||
| 166 | NotificationOnlyFlag = 0x80000000, | ||
| 167 | }; | ||
| 168 | |||
| 169 | enum class DebugEvent : u32 { | ||
| 170 | CreateProcess = 0, | ||
| 171 | CreateThread = 1, | ||
| 172 | ExitProcess = 2, | ||
| 173 | ExitThread = 3, | ||
| 174 | Exception = 4, | ||
| 175 | }; | ||
| 176 | |||
| 177 | enum class DebugThreadParam : u32 { | ||
| 178 | Priority = 0, | ||
| 179 | State = 1, | ||
| 180 | IdealCore = 2, | ||
| 181 | CurrentCore = 3, | ||
| 182 | AffinityMask = 4, | ||
| 183 | }; | ||
| 184 | |||
| 185 | enum class DebugException : u32 { | ||
| 186 | UndefinedInstruction = 0, | ||
| 187 | InstructionAbort = 1, | ||
| 188 | DataAbort = 2, | ||
| 189 | AlignmentFault = 3, | ||
| 190 | DebuggerAttached = 4, | ||
| 191 | BreakPoint = 5, | ||
| 192 | UserBreak = 6, | ||
| 193 | DebuggerBreak = 7, | ||
| 194 | UndefinedSystemCall = 8, | ||
| 195 | MemorySystemError = 9, | ||
| 196 | }; | ||
| 197 | |||
| 198 | enum class DebugEventFlag : u32 { | ||
| 199 | Stopped = (1u << 0), | ||
| 200 | }; | ||
| 201 | |||
| 202 | enum class BreakPointType : u32 { | ||
| 203 | HardwareInstruction = 0, | ||
| 204 | HardwareData = 1, | ||
| 205 | }; | ||
| 206 | |||
| 207 | enum class HardwareBreakPointRegisterName : u32 { | ||
| 208 | I0 = 0, | ||
| 209 | I1 = 1, | ||
| 210 | I2 = 2, | ||
| 211 | I3 = 3, | ||
| 212 | I4 = 4, | ||
| 213 | I5 = 5, | ||
| 214 | I6 = 6, | ||
| 215 | I7 = 7, | ||
| 216 | I8 = 8, | ||
| 217 | I9 = 9, | ||
| 218 | I10 = 10, | ||
| 219 | I11 = 11, | ||
| 220 | I12 = 12, | ||
| 221 | I13 = 13, | ||
| 222 | I14 = 14, | ||
| 223 | I15 = 15, | ||
| 224 | D0 = 16, | ||
| 225 | D1 = 17, | ||
| 226 | D2 = 18, | ||
| 227 | D3 = 19, | ||
| 228 | D4 = 20, | ||
| 229 | D5 = 21, | ||
| 230 | D6 = 22, | ||
| 231 | D7 = 23, | ||
| 232 | D8 = 24, | ||
| 233 | D9 = 25, | ||
| 234 | D10 = 26, | ||
| 235 | D11 = 27, | ||
| 236 | D12 = 28, | ||
| 237 | D13 = 29, | ||
| 238 | D14 = 30, | ||
| 239 | D15 = 31, | ||
| 240 | }; | ||
| 241 | |||
| 242 | namespace lp64 { | ||
| 243 | struct LastThreadContext { | ||
| 244 | u64 fp; | ||
| 245 | u64 sp; | ||
| 246 | u64 lr; | ||
| 247 | u64 pc; | ||
| 248 | }; | ||
| 249 | |||
| 250 | struct PhysicalMemoryInfo { | ||
| 251 | PAddr physical_address; | ||
| 252 | u64 virtual_address; | ||
| 253 | u64 size; | ||
| 254 | }; | ||
| 255 | |||
| 256 | struct DebugInfoCreateProcess { | ||
| 257 | u64 program_id; | ||
| 258 | u64 process_id; | ||
| 259 | std::array<char, 0xC> name; | ||
| 260 | u32 flags; | ||
| 261 | u64 user_exception_context_address; // 5.0.0+ | ||
| 262 | }; | ||
| 263 | |||
| 264 | struct DebugInfoCreateThread { | ||
| 265 | u64 thread_id; | ||
| 266 | u64 tls_address; | ||
| 267 | // Removed in 11.0.0 u64 entrypoint; | ||
| 268 | }; | ||
| 269 | |||
| 270 | struct DebugInfoExitProcess { | ||
| 271 | ProcessExitReason reason; | ||
| 272 | }; | ||
| 273 | |||
| 274 | struct DebugInfoExitThread { | ||
| 275 | ThreadExitReason reason; | ||
| 276 | }; | ||
| 277 | |||
| 278 | struct DebugInfoUndefinedInstructionException { | ||
| 279 | u32 insn; | ||
| 280 | }; | ||
| 281 | |||
| 282 | struct DebugInfoDataAbortException { | ||
| 283 | u64 address; | ||
| 284 | }; | ||
| 285 | |||
| 286 | struct DebugInfoAlignmentFaultException { | ||
| 287 | u64 address; | ||
| 288 | }; | ||
| 289 | |||
| 290 | struct DebugInfoBreakPointException { | ||
| 291 | BreakPointType type; | ||
| 292 | u64 address; | ||
| 293 | }; | ||
| 294 | |||
| 295 | struct DebugInfoUserBreakException { | ||
| 296 | BreakReason break_reason; | ||
| 297 | u64 address; | ||
| 298 | u64 size; | ||
| 299 | }; | ||
| 300 | |||
| 301 | struct DebugInfoDebuggerBreakException { | ||
| 302 | std::array<u64, 4> active_thread_ids; | ||
| 303 | }; | ||
| 304 | |||
| 305 | struct DebugInfoUndefinedSystemCallException { | ||
| 306 | u32 id; | ||
| 307 | }; | ||
| 308 | |||
| 309 | union DebugInfoSpecificException { | ||
| 310 | DebugInfoUndefinedInstructionException undefined_instruction; | ||
| 311 | DebugInfoDataAbortException data_abort; | ||
| 312 | DebugInfoAlignmentFaultException alignment_fault; | ||
| 313 | DebugInfoBreakPointException break_point; | ||
| 314 | DebugInfoUserBreakException user_break; | ||
| 315 | DebugInfoDebuggerBreakException debugger_break; | ||
| 316 | DebugInfoUndefinedSystemCallException undefined_system_call; | ||
| 317 | u64 raw; | ||
| 318 | }; | ||
| 319 | |||
| 320 | struct DebugInfoException { | ||
| 321 | DebugException type; | ||
| 322 | u64 address; | ||
| 323 | DebugInfoSpecificException specific; | ||
| 324 | }; | ||
| 325 | |||
| 326 | union DebugInfo { | ||
| 327 | DebugInfoCreateProcess create_process; | ||
| 328 | DebugInfoCreateThread create_thread; | ||
| 329 | DebugInfoExitProcess exit_process; | ||
| 330 | DebugInfoExitThread exit_thread; | ||
| 331 | DebugInfoException exception; | ||
| 332 | }; | ||
| 333 | |||
| 334 | struct DebugEventInfo { | ||
| 335 | DebugEvent type; | ||
| 336 | u32 flags; | ||
| 337 | u64 thread_id; | ||
| 338 | DebugInfo info; | ||
| 339 | }; | ||
| 340 | static_assert(sizeof(DebugEventInfo) >= 0x40); | ||
| 341 | |||
| 342 | struct SecureMonitorArguments { | ||
| 343 | std::array<u64, 8> r; | ||
| 344 | }; | ||
| 345 | static_assert(sizeof(SecureMonitorArguments) == 0x40); | ||
| 346 | } // namespace lp64 | ||
| 347 | |||
| 348 | namespace ilp32 { | ||
| 349 | struct LastThreadContext { | ||
| 350 | u32 fp; | ||
| 351 | u32 sp; | ||
| 352 | u32 lr; | ||
| 353 | u32 pc; | ||
| 354 | }; | ||
| 355 | |||
| 356 | struct PhysicalMemoryInfo { | ||
| 357 | PAddr physical_address; | ||
| 358 | u32 virtual_address; | ||
| 359 | u32 size; | ||
| 360 | }; | ||
| 361 | |||
| 362 | struct DebugInfoCreateProcess { | ||
| 363 | u64 program_id; | ||
| 364 | u64 process_id; | ||
| 365 | std::array<char, 0xC> name; | ||
| 366 | u32 flags; | ||
| 367 | u32 user_exception_context_address; // 5.0.0+ | ||
| 368 | }; | ||
| 369 | |||
| 370 | struct DebugInfoCreateThread { | ||
| 371 | u64 thread_id; | ||
| 372 | u32 tls_address; | ||
| 373 | // Removed in 11.0.0 u32 entrypoint; | ||
| 374 | }; | ||
| 375 | |||
| 376 | struct DebugInfoExitProcess { | ||
| 377 | ProcessExitReason reason; | ||
| 378 | }; | ||
| 379 | |||
| 380 | struct DebugInfoExitThread { | ||
| 381 | ThreadExitReason reason; | ||
| 382 | }; | ||
| 383 | |||
| 384 | struct DebugInfoUndefinedInstructionException { | ||
| 385 | u32 insn; | ||
| 386 | }; | ||
| 387 | |||
| 388 | struct DebugInfoDataAbortException { | ||
| 389 | u32 address; | ||
| 390 | }; | ||
| 391 | |||
| 392 | struct DebugInfoAlignmentFaultException { | ||
| 393 | u32 address; | ||
| 394 | }; | ||
| 395 | |||
| 396 | struct DebugInfoBreakPointException { | ||
| 397 | BreakPointType type; | ||
| 398 | u32 address; | ||
| 399 | }; | ||
| 400 | |||
| 401 | struct DebugInfoUserBreakException { | ||
| 402 | BreakReason break_reason; | ||
| 403 | u32 address; | ||
| 404 | u32 size; | ||
| 405 | }; | ||
| 406 | |||
| 407 | struct DebugInfoDebuggerBreakException { | ||
| 408 | std::array<u64, 4> active_thread_ids; | ||
| 409 | }; | ||
| 410 | |||
| 411 | struct DebugInfoUndefinedSystemCallException { | ||
| 412 | u32 id; | ||
| 413 | }; | ||
| 414 | |||
| 415 | union DebugInfoSpecificException { | ||
| 416 | DebugInfoUndefinedInstructionException undefined_instruction; | ||
| 417 | DebugInfoDataAbortException data_abort; | ||
| 418 | DebugInfoAlignmentFaultException alignment_fault; | ||
| 419 | DebugInfoBreakPointException break_point; | ||
| 420 | DebugInfoUserBreakException user_break; | ||
| 421 | DebugInfoDebuggerBreakException debugger_break; | ||
| 422 | DebugInfoUndefinedSystemCallException undefined_system_call; | ||
| 423 | u64 raw; | ||
| 424 | }; | ||
| 425 | |||
| 426 | struct DebugInfoException { | ||
| 427 | DebugException type; | ||
| 428 | u32 address; | ||
| 429 | DebugInfoSpecificException specific; | ||
| 430 | }; | ||
| 431 | |||
| 432 | union DebugInfo { | ||
| 433 | DebugInfoCreateProcess create_process; | ||
| 434 | DebugInfoCreateThread create_thread; | ||
| 435 | DebugInfoExitProcess exit_process; | ||
| 436 | DebugInfoExitThread exit_thread; | ||
| 437 | DebugInfoException exception; | ||
| 438 | }; | ||
| 439 | |||
| 440 | struct DebugEventInfo { | ||
| 441 | DebugEvent type; | ||
| 442 | u32 flags; | ||
| 443 | u64 thread_id; | ||
| 444 | DebugInfo info; | ||
| 445 | }; | ||
| 446 | |||
| 447 | struct SecureMonitorArguments { | ||
| 448 | std::array<u32, 8> r; | ||
| 449 | }; | ||
| 450 | static_assert(sizeof(SecureMonitorArguments) == 0x20); | ||
| 451 | } // namespace ilp32 | ||
| 452 | |||
| 453 | struct ThreadContext { | ||
| 454 | std::array<u64, 29> r; | ||
| 455 | u64 fp; | ||
| 456 | u64 lr; | ||
| 457 | u64 sp; | ||
| 458 | u64 pc; | ||
| 459 | u32 pstate; | ||
| 460 | u32 padding; | ||
| 461 | std::array<u128, 32> v; | ||
| 462 | u32 fpcr; | ||
| 463 | u32 fpsr; | ||
| 464 | u64 tpidr; | ||
| 465 | }; | ||
| 466 | static_assert(sizeof(ThreadContext) == 0x320); | ||
| 467 | |||
| 468 | struct MemoryInfo { | ||
| 469 | u64 base_address; | ||
| 470 | u64 size; | ||
| 471 | MemoryState state; | ||
| 472 | MemoryAttribute attribute; | ||
| 473 | MemoryPermission permission; | ||
| 474 | u32 ipc_count; | ||
| 475 | u32 device_count; | ||
| 476 | u32 padding; | ||
| 477 | }; | ||
| 478 | |||
| 479 | enum class LimitableResource : u32 { | ||
| 480 | PhysicalMemoryMax = 0, | ||
| 481 | ThreadCountMax = 1, | ||
| 482 | EventCountMax = 2, | ||
| 483 | TransferMemoryCountMax = 3, | ||
| 484 | SessionCountMax = 4, | ||
| 485 | Count, | ||
| 486 | }; | ||
| 487 | |||
| 488 | enum class IoPoolType : u32 { | ||
| 489 | // Not supported. | ||
| 490 | Count = 0, | ||
| 491 | }; | ||
| 492 | |||
| 493 | enum class MemoryMapping : u32 { | ||
| 494 | IoRegister = 0, | ||
| 495 | Uncached = 1, | ||
| 496 | Memory = 2, | ||
| 497 | }; | ||
| 498 | |||
| 499 | enum class KernelDebugType : u32 { | ||
| 500 | Thread = 0, | ||
| 501 | ThreadCallStack = 1, | ||
| 502 | KernelObject = 2, | ||
| 503 | Handle_ = 3, | ||
| 504 | Memory = 4, | ||
| 505 | PageTable = 5, | ||
| 506 | CpuUtilization = 6, | ||
| 507 | Process = 7, | ||
| 508 | SuspendProcess = 8, | ||
| 509 | ResumeProcess = 9, | ||
| 510 | Port = 10, | ||
| 511 | }; | ||
| 512 | |||
| 513 | enum class KernelTraceState : u32 { | ||
| 514 | Disabled = 0, | ||
| 515 | Enabled = 1, | ||
| 516 | }; | ||
| 517 | |||
| 518 | enum class CodeMemoryOperation : u32 { | ||
| 519 | Map = 0, | ||
| 520 | MapToOwner = 1, | ||
| 521 | Unmap = 2, | ||
| 522 | UnmapFromOwner = 3, | ||
| 523 | }; | ||
| 524 | |||
| 525 | enum class InterruptType : u32 { | ||
| 526 | Edge = 0, | ||
| 527 | Level = 1, | ||
| 528 | }; | ||
| 529 | |||
| 530 | enum class DeviceName { | ||
| 531 | Afi = 0, | ||
| 532 | Avpc = 1, | ||
| 533 | Dc = 2, | ||
| 534 | Dcb = 3, | ||
| 535 | Hc = 4, | ||
| 536 | Hda = 5, | ||
| 537 | Isp2 = 6, | ||
| 538 | MsencNvenc = 7, | ||
| 539 | Nv = 8, | ||
| 540 | Nv2 = 9, | ||
| 541 | Ppcs = 10, | ||
| 542 | Sata = 11, | ||
| 543 | Vi = 12, | ||
| 544 | Vic = 13, | ||
| 545 | XusbHost = 14, | ||
| 546 | XusbDev = 15, | ||
| 547 | Tsec = 16, | ||
| 548 | Ppcs1 = 17, | ||
| 549 | Dc1 = 18, | ||
| 550 | Sdmmc1a = 19, | ||
| 551 | Sdmmc2a = 20, | ||
| 552 | Sdmmc3a = 21, | ||
| 553 | Sdmmc4a = 22, | ||
| 554 | Isp2b = 23, | ||
| 555 | Gpu = 24, | ||
| 556 | Gpub = 25, | ||
| 557 | Ppcs2 = 26, | ||
| 558 | Nvdec = 27, | ||
| 559 | Ape = 28, | ||
| 560 | Se = 29, | ||
| 561 | Nvjpg = 30, | ||
| 562 | Hc1 = 31, | ||
| 563 | Se1 = 32, | ||
| 564 | Axiap = 33, | ||
| 565 | Etr = 34, | ||
| 566 | Tsecb = 35, | ||
| 567 | Tsec1 = 36, | ||
| 568 | Tsecb1 = 37, | ||
| 569 | Nvdec1 = 38, | ||
| 570 | Count, | ||
| 571 | }; | ||
| 572 | |||
| 573 | enum class SystemInfoType : u32 { | ||
| 574 | TotalPhysicalMemorySize = 0, | ||
| 575 | UsedPhysicalMemorySize = 1, | ||
| 576 | InitialProcessIdRange = 2, | ||
| 577 | }; | ||
| 578 | |||
| 579 | enum class ProcessInfoType : u32 { | ||
| 580 | ProcessState = 0, | ||
| 581 | }; | ||
| 582 | |||
| 583 | struct CreateProcessParameter { | ||
| 584 | std::array<char, 12> name; | ||
| 585 | u32 version; | ||
| 586 | u64 program_id; | ||
| 587 | u64 code_address; | ||
| 588 | s32 code_num_pages; | ||
| 589 | u32 flags; | ||
| 590 | Handle reslimit; | ||
| 591 | s32 system_resource_num_pages; | ||
| 592 | }; | ||
| 593 | static_assert(sizeof(CreateProcessParameter) == 0x30); | ||
| 594 | |||
| 113 | } // namespace Kernel::Svc | 595 | } // namespace Kernel::Svc |
diff --git a/src/core/hle/result.h b/src/core/hle/result.h index ef4b2d417..56c990728 100644 --- a/src/core/hle/result.h +++ b/src/core/hle/result.h | |||
| @@ -423,16 +423,17 @@ constexpr void UpdateCurrentResultReference<const Result>(Result result_referenc | |||
| 423 | } // namespace ResultImpl | 423 | } // namespace ResultImpl |
| 424 | 424 | ||
| 425 | #define DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(COUNTER_VALUE) \ | 425 | #define DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(COUNTER_VALUE) \ |
| 426 | [[maybe_unused]] constexpr bool HasPrevRef_##COUNTER_VALUE = \ | 426 | [[maybe_unused]] constexpr bool CONCAT2(HasPrevRef_, COUNTER_VALUE) = \ |
| 427 | std::same_as<decltype(__TmpCurrentResultReference), Result&>; \ | 427 | std::same_as<decltype(__TmpCurrentResultReference), Result&>; \ |
| 428 | [[maybe_unused]] auto& PrevRef_##COUNTER_VALUE = __TmpCurrentResultReference; \ | 428 | [[maybe_unused]] Result CONCAT2(PrevRef_, COUNTER_VALUE) = __TmpCurrentResultReference; \ |
| 429 | [[maybe_unused]] Result __tmp_result_##COUNTER_VALUE = ResultSuccess; \ | 429 | [[maybe_unused]] Result CONCAT2(__tmp_result_, COUNTER_VALUE) = ResultSuccess; \ |
| 430 | Result& __TmpCurrentResultReference = \ | 430 | Result& __TmpCurrentResultReference = CONCAT2(HasPrevRef_, COUNTER_VALUE) \ |
| 431 | HasPrevRef_##COUNTER_VALUE ? PrevRef_##COUNTER_VALUE : __tmp_result_##COUNTER_VALUE | 431 | ? CONCAT2(PrevRef_, COUNTER_VALUE) \ |
| 432 | : CONCAT2(__tmp_result_, COUNTER_VALUE) | ||
| 432 | 433 | ||
| 433 | #define ON_RESULT_RETURN_IMPL(...) \ | 434 | #define ON_RESULT_RETURN_IMPL(...) \ |
| 434 | static_assert(std::same_as<decltype(__TmpCurrentResultReference), Result&>); \ | 435 | static_assert(std::same_as<decltype(__TmpCurrentResultReference), Result&>); \ |
| 435 | auto RESULT_GUARD_STATE_##__COUNTER__ = \ | 436 | auto CONCAT2(RESULT_GUARD_STATE_, __COUNTER__) = \ |
| 436 | ResultImpl::ResultReferenceForScopedResultGuard<__VA_ARGS__>( \ | 437 | ResultImpl::ResultReferenceForScopedResultGuard<__VA_ARGS__>( \ |
| 437 | __TmpCurrentResultReference) + \ | 438 | __TmpCurrentResultReference) + \ |
| 438 | [&]() | 439 | [&]() |
diff --git a/src/core/hle/service/kernel_helpers.cpp b/src/core/hle/service/kernel_helpers.cpp index af133af93..42991928e 100644 --- a/src/core/hle/service/kernel_helpers.cpp +++ b/src/core/hle/service/kernel_helpers.cpp | |||
| @@ -31,7 +31,7 @@ ServiceContext::~ServiceContext() { | |||
| 31 | Kernel::KEvent* ServiceContext::CreateEvent(std::string&& name) { | 31 | Kernel::KEvent* ServiceContext::CreateEvent(std::string&& name) { |
| 32 | // Reserve a new event from the process resource limit | 32 | // Reserve a new event from the process resource limit |
| 33 | Kernel::KScopedResourceReservation event_reservation(process, | 33 | Kernel::KScopedResourceReservation event_reservation(process, |
| 34 | Kernel::LimitableResource::Events); | 34 | Kernel::LimitableResource::EventCountMax); |
| 35 | if (!event_reservation.Succeeded()) { | 35 | if (!event_reservation.Succeeded()) { |
| 36 | LOG_CRITICAL(Service, "Resource limit reached!"); | 36 | LOG_CRITICAL(Service, "Resource limit reached!"); |
| 37 | return {}; | 37 | return {}; |
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 44388655d..fa29db758 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp | |||
| @@ -126,10 +126,12 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) | |||
| 126 | LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); | 126 | LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); |
| 127 | return result; | 127 | return result; |
| 128 | } | 128 | } |
| 129 | bool is_out_io{}; | ||
| 129 | ASSERT(system.CurrentProcess() | 130 | ASSERT(system.CurrentProcess() |
| 130 | ->PageTable() | 131 | ->PageTable() |
| 131 | .LockForMapDeviceAddressSpace(handle_description->address, handle_description->size, | 132 | .LockForMapDeviceAddressSpace(&is_out_io, handle_description->address, |
| 132 | Kernel::KMemoryPermission::None, true) | 133 | handle_description->size, |
| 134 | Kernel::KMemoryPermission::None, true, false) | ||
| 133 | .IsSuccess()); | 135 | .IsSuccess()); |
| 134 | std::memcpy(output.data(), ¶ms, sizeof(params)); | 136 | std::memcpy(output.data(), ¶ms, sizeof(params)); |
| 135 | return result; | 137 | return result; |
diff --git a/src/core/hle/service/sm/sm_controller.cpp b/src/core/hle/service/sm/sm_controller.cpp index 69e0fe808..1cf9dd1c4 100644 --- a/src/core/hle/service/sm/sm_controller.cpp +++ b/src/core/hle/service/sm/sm_controller.cpp | |||
| @@ -34,8 +34,8 @@ void Controller::CloneCurrentObject(Kernel::HLERequestContext& ctx) { | |||
| 34 | // once this is a proper process | 34 | // once this is a proper process |
| 35 | 35 | ||
| 36 | // Reserve a new session from the process resource limit. | 36 | // Reserve a new session from the process resource limit. |
| 37 | Kernel::KScopedResourceReservation session_reservation(&process, | 37 | Kernel::KScopedResourceReservation session_reservation( |
| 38 | Kernel::LimitableResource::Sessions); | 38 | &process, Kernel::LimitableResource::SessionCountMax); |
| 39 | ASSERT(session_reservation.Succeeded()); | 39 | ASSERT(session_reservation.Succeeded()); |
| 40 | 40 | ||
| 41 | // Create the session. | 41 | // Create the session. |
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index f9794dfe4..4a2f2c1fd 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp | |||
| @@ -631,47 +631,40 @@ void Maxwell3D::ProcessDeferredDraw() { | |||
| 631 | Instance, | 631 | Instance, |
| 632 | }; | 632 | }; |
| 633 | DrawMode draw_mode{DrawMode::Undefined}; | 633 | DrawMode draw_mode{DrawMode::Undefined}; |
| 634 | u32 instance_count = 1; | ||
| 635 | |||
| 636 | u32 index = 0; | ||
| 637 | u32 method = 0; | ||
| 638 | u32 method_count = static_cast<u32>(deferred_draw_method.size()); | 634 | u32 method_count = static_cast<u32>(deferred_draw_method.size()); |
| 639 | for (; index < method_count && | 635 | u32 method = deferred_draw_method[method_count - 1]; |
| 640 | (method = deferred_draw_method[index]) != MAXWELL3D_REG_INDEX(draw.begin); | 636 | if (MAXWELL3D_REG_INDEX(draw.end) != method) { |
| 641 | ++index) | ||
| 642 | ; | ||
| 643 | |||
| 644 | if (MAXWELL3D_REG_INDEX(draw.begin) != method) { | ||
| 645 | return; | ||
| 646 | } | ||
| 647 | |||
| 648 | // The minimum number of methods for drawing must be greater than or equal to | ||
| 649 | // 3[draw.begin->vertex(index)count(first)->draw.end] to avoid errors in index mode drawing | ||
| 650 | if ((method_count - index) < 3) { | ||
| 651 | return; | 637 | return; |
| 652 | } | 638 | } |
| 653 | draw_mode = (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Subsequent) || | 639 | draw_mode = (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Subsequent) || |
| 654 | (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Unchanged) | 640 | (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Unchanged) |
| 655 | ? DrawMode::Instance | 641 | ? DrawMode::Instance |
| 656 | : DrawMode::General; | 642 | : DrawMode::General; |
| 657 | 643 | u32 instance_count = 0; | |
| 658 | // Drawing will only begin with draw.begin or index_buffer method, other methods directly | ||
| 659 | // clear | ||
| 660 | if (draw_mode == DrawMode::Undefined) { | ||
| 661 | deferred_draw_method.clear(); | ||
| 662 | return; | ||
| 663 | } | ||
| 664 | |||
| 665 | if (draw_mode == DrawMode::Instance) { | 644 | if (draw_mode == DrawMode::Instance) { |
| 666 | ASSERT_MSG(deferred_draw_method.size() % 4 == 0, "Instance mode method size error"); | 645 | u32 vertex_buffer_count = 0; |
| 667 | instance_count = static_cast<u32>(method_count - index) / 4; | 646 | u32 index_buffer_count = 0; |
| 647 | for (u32 index = 0; index < method_count; ++index) { | ||
| 648 | method = deferred_draw_method[index]; | ||
| 649 | if (method == MAXWELL3D_REG_INDEX(vertex_buffer.count)) { | ||
| 650 | instance_count = ++vertex_buffer_count; | ||
| 651 | } else if (method == MAXWELL3D_REG_INDEX(index_buffer.count)) { | ||
| 652 | instance_count = ++index_buffer_count; | ||
| 653 | } | ||
| 654 | } | ||
| 655 | ASSERT_MSG(!(vertex_buffer_count && index_buffer_count), | ||
| 656 | "Instance both indexed and direct?"); | ||
| 668 | } else { | 657 | } else { |
| 669 | method = deferred_draw_method[index + 1]; | 658 | instance_count = 1; |
| 670 | if (MAXWELL3D_REG_INDEX(draw_inline_index) == method || | 659 | for (u32 index = 0; index < method_count; ++index) { |
| 671 | MAXWELL3D_REG_INDEX(inline_index_2x16.even) == method || | 660 | method = deferred_draw_method[index]; |
| 672 | MAXWELL3D_REG_INDEX(inline_index_4x8.index0) == method) { | 661 | if (MAXWELL3D_REG_INDEX(draw_inline_index) == method || |
| 673 | regs.index_buffer.count = static_cast<u32>(inline_index_draw_indexes.size() / 4); | 662 | MAXWELL3D_REG_INDEX(inline_index_2x16.even) == method || |
| 674 | regs.index_buffer.format = Regs::IndexFormat::UnsignedInt; | 663 | MAXWELL3D_REG_INDEX(inline_index_4x8.index0) == method) { |
| 664 | regs.index_buffer.count = static_cast<u32>(inline_index_draw_indexes.size() / 4); | ||
| 665 | regs.index_buffer.format = Regs::IndexFormat::UnsignedInt; | ||
| 666 | break; | ||
| 667 | } | ||
| 675 | } | 668 | } |
| 676 | } | 669 | } |
| 677 | 670 | ||
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 384350dbd..8c8dfcca6 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp | |||
| @@ -45,7 +45,7 @@ MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 | |||
| 45 | kind_valus.fill(PTEKind::INVALID); | 45 | kind_valus.fill(PTEKind::INVALID); |
| 46 | big_kinds.resize(big_page_table_size / 32, kind_valus); | 46 | big_kinds.resize(big_page_table_size / 32, kind_valus); |
| 47 | entries.resize(page_table_size / 32, 0); | 47 | entries.resize(page_table_size / 32, 0); |
| 48 | kinds.resize(big_page_table_size / 32, kind_valus); | 48 | kinds.resize(page_table_size / 32, kind_valus); |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | MemoryManager::~MemoryManager() = default; | 51 | MemoryManager::~MemoryManager() = default; |