diff options
Diffstat (limited to 'src')
49 files changed, 2879 insertions, 833 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 113e663b5..f6e082c36 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -190,11 +190,13 @@ add_library(core STATIC | |||
| 190 | hle/kernel/k_code_memory.h | 190 | hle/kernel/k_code_memory.h |
| 191 | hle/kernel/k_condition_variable.cpp | 191 | hle/kernel/k_condition_variable.cpp |
| 192 | hle/kernel/k_condition_variable.h | 192 | hle/kernel/k_condition_variable.h |
| 193 | hle/kernel/k_debug.h | ||
| 193 | hle/kernel/k_dynamic_page_manager.h | 194 | hle/kernel/k_dynamic_page_manager.h |
| 194 | hle/kernel/k_dynamic_resource_manager.h | 195 | hle/kernel/k_dynamic_resource_manager.h |
| 195 | hle/kernel/k_dynamic_slab_heap.h | 196 | hle/kernel/k_dynamic_slab_heap.h |
| 196 | hle/kernel/k_event.cpp | 197 | hle/kernel/k_event.cpp |
| 197 | hle/kernel/k_event.h | 198 | hle/kernel/k_event.h |
| 199 | hle/kernel/k_event_info.h | ||
| 198 | hle/kernel/k_handle_table.cpp | 200 | hle/kernel/k_handle_table.cpp |
| 199 | hle/kernel/k_handle_table.h | 201 | hle/kernel/k_handle_table.h |
| 200 | hle/kernel/k_interrupt_manager.cpp | 202 | hle/kernel/k_interrupt_manager.cpp |
| @@ -222,6 +224,8 @@ add_library(core STATIC | |||
| 222 | hle/kernel/k_page_group.h | 224 | hle/kernel/k_page_group.h |
| 223 | hle/kernel/k_page_table.cpp | 225 | hle/kernel/k_page_table.cpp |
| 224 | hle/kernel/k_page_table.h | 226 | hle/kernel/k_page_table.h |
| 227 | hle/kernel/k_page_table_manager.h | ||
| 228 | hle/kernel/k_page_table_slab_heap.h | ||
| 225 | hle/kernel/k_port.cpp | 229 | hle/kernel/k_port.cpp |
| 226 | hle/kernel/k_port.h | 230 | hle/kernel/k_port.h |
| 227 | hle/kernel/k_priority_queue.h | 231 | hle/kernel/k_priority_queue.h |
| @@ -254,6 +258,8 @@ add_library(core STATIC | |||
| 254 | hle/kernel/k_synchronization_object.cpp | 258 | hle/kernel/k_synchronization_object.cpp |
| 255 | hle/kernel/k_synchronization_object.h | 259 | hle/kernel/k_synchronization_object.h |
| 256 | hle/kernel/k_system_control.h | 260 | hle/kernel/k_system_control.h |
| 261 | hle/kernel/k_system_resource.cpp | ||
| 262 | hle/kernel/k_system_resource.h | ||
| 257 | hle/kernel/k_thread.cpp | 263 | hle/kernel/k_thread.cpp |
| 258 | hle/kernel/k_thread.h | 264 | hle/kernel/k_thread.h |
| 259 | hle/kernel/k_thread_local_page.cpp | 265 | hle/kernel/k_thread_local_page.cpp |
diff --git a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h index fe375769e..4b717d091 100644 --- a/src/core/hle/kernel/board/nintendo/nx/k_system_control.h +++ b/src/core/hle/kernel/board/nintendo/nx/k_system_control.h | |||
| @@ -9,6 +9,10 @@ namespace Kernel::Board::Nintendo::Nx { | |||
| 9 | 9 | ||
| 10 | class KSystemControl { | 10 | class KSystemControl { |
| 11 | public: | 11 | public: |
| 12 | // This can be overridden as needed. | ||
| 13 | static constexpr size_t SecureAppletMemorySize = 4 * 1024 * 1024; // 4_MB | ||
| 14 | |||
| 15 | public: | ||
| 12 | class Init { | 16 | class Init { |
| 13 | public: | 17 | public: |
| 14 | // Initialization. | 18 | // Initialization. |
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index fd354d484..06010b8d1 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp | |||
| @@ -27,16 +27,12 @@ namespace Kernel { | |||
| 27 | 27 | ||
| 28 | SessionRequestHandler::SessionRequestHandler(KernelCore& kernel_, const char* service_name_, | 28 | SessionRequestHandler::SessionRequestHandler(KernelCore& kernel_, const char* service_name_, |
| 29 | ServiceThreadType thread_type) | 29 | ServiceThreadType thread_type) |
| 30 | : kernel{kernel_} { | 30 | : kernel{kernel_}, service_thread{thread_type == ServiceThreadType::CreateNew |
| 31 | if (thread_type == ServiceThreadType::CreateNew) { | 31 | ? kernel.CreateServiceThread(service_name_) |
| 32 | service_thread = kernel.CreateServiceThread(service_name_); | 32 | : kernel.GetDefaultServiceThread()} {} |
| 33 | } else { | ||
| 34 | service_thread = kernel.GetDefaultServiceThread(); | ||
| 35 | } | ||
| 36 | } | ||
| 37 | 33 | ||
| 38 | SessionRequestHandler::~SessionRequestHandler() { | 34 | SessionRequestHandler::~SessionRequestHandler() { |
| 39 | kernel.ReleaseServiceThread(service_thread.lock()); | 35 | kernel.ReleaseServiceThread(service_thread); |
| 40 | } | 36 | } |
| 41 | 37 | ||
| 42 | void SessionRequestHandler::AcceptSession(KServerPort* server_port) { | 38 | void SessionRequestHandler::AcceptSession(KServerPort* server_port) { |
| @@ -49,7 +45,7 @@ void SessionRequestHandler::AcceptSession(KServerPort* server_port) { | |||
| 49 | void SessionRequestHandler::RegisterSession(KServerSession* server_session, | 45 | void SessionRequestHandler::RegisterSession(KServerSession* server_session, |
| 50 | std::shared_ptr<SessionRequestManager> manager) { | 46 | std::shared_ptr<SessionRequestManager> manager) { |
| 51 | manager->SetSessionHandler(shared_from_this()); | 47 | manager->SetSessionHandler(shared_from_this()); |
| 52 | service_thread.lock()->RegisterServerSession(server_session, manager); | 48 | service_thread.RegisterServerSession(server_session, manager); |
| 53 | server_session->Close(); | 49 | server_session->Close(); |
| 54 | } | 50 | } |
| 55 | 51 | ||
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index 67da8e7e1..d87be72d6 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h | |||
| @@ -82,13 +82,13 @@ public: | |||
| 82 | void RegisterSession(KServerSession* server_session, | 82 | void RegisterSession(KServerSession* server_session, |
| 83 | std::shared_ptr<SessionRequestManager> manager); | 83 | std::shared_ptr<SessionRequestManager> manager); |
| 84 | 84 | ||
| 85 | std::weak_ptr<ServiceThread> GetServiceThread() const { | 85 | ServiceThread& GetServiceThread() const { |
| 86 | return service_thread; | 86 | return service_thread; |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | protected: | 89 | protected: |
| 90 | KernelCore& kernel; | 90 | KernelCore& kernel; |
| 91 | std::weak_ptr<ServiceThread> service_thread; | 91 | ServiceThread& service_thread; |
| 92 | }; | 92 | }; |
| 93 | 93 | ||
| 94 | using SessionRequestHandlerWeakPtr = std::weak_ptr<SessionRequestHandler>; | 94 | using SessionRequestHandlerWeakPtr = std::weak_ptr<SessionRequestHandler>; |
| @@ -154,7 +154,7 @@ public: | |||
| 154 | session_handler = std::move(handler); | 154 | session_handler = std::move(handler); |
| 155 | } | 155 | } |
| 156 | 156 | ||
| 157 | std::weak_ptr<ServiceThread> GetServiceThread() const { | 157 | ServiceThread& GetServiceThread() const { |
| 158 | return session_handler->GetServiceThread(); | 158 | return session_handler->GetServiceThread(); |
| 159 | } | 159 | } |
| 160 | 160 | ||
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index 477e4e407..aa2dddcc6 100644 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp | |||
| @@ -10,7 +10,9 @@ | |||
| 10 | #include "core/hardware_properties.h" | 10 | #include "core/hardware_properties.h" |
| 11 | #include "core/hle/kernel/init/init_slab_setup.h" | 11 | #include "core/hle/kernel/init/init_slab_setup.h" |
| 12 | #include "core/hle/kernel/k_code_memory.h" | 12 | #include "core/hle/kernel/k_code_memory.h" |
| 13 | #include "core/hle/kernel/k_debug.h" | ||
| 13 | #include "core/hle/kernel/k_event.h" | 14 | #include "core/hle/kernel/k_event.h" |
| 15 | #include "core/hle/kernel/k_event_info.h" | ||
| 14 | #include "core/hle/kernel/k_memory_layout.h" | 16 | #include "core/hle/kernel/k_memory_layout.h" |
| 15 | #include "core/hle/kernel/k_memory_manager.h" | 17 | #include "core/hle/kernel/k_memory_manager.h" |
| 16 | #include "core/hle/kernel/k_page_buffer.h" | 18 | #include "core/hle/kernel/k_page_buffer.h" |
| @@ -22,6 +24,7 @@ | |||
| 22 | #include "core/hle/kernel/k_shared_memory.h" | 24 | #include "core/hle/kernel/k_shared_memory.h" |
| 23 | #include "core/hle/kernel/k_shared_memory_info.h" | 25 | #include "core/hle/kernel/k_shared_memory_info.h" |
| 24 | #include "core/hle/kernel/k_system_control.h" | 26 | #include "core/hle/kernel/k_system_control.h" |
| 27 | #include "core/hle/kernel/k_system_resource.h" | ||
| 25 | #include "core/hle/kernel/k_thread.h" | 28 | #include "core/hle/kernel/k_thread.h" |
| 26 | #include "core/hle/kernel/k_thread_local_page.h" | 29 | #include "core/hle/kernel/k_thread_local_page.h" |
| 27 | #include "core/hle/kernel/k_transfer_memory.h" | 30 | #include "core/hle/kernel/k_transfer_memory.h" |
| @@ -44,7 +47,10 @@ namespace Kernel::Init { | |||
| 44 | HANDLER(KThreadLocalPage, \ | 47 | HANDLER(KThreadLocalPage, \ |
| 45 | (SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \ | 48 | (SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \ |
| 46 | ##__VA_ARGS__) \ | 49 | ##__VA_ARGS__) \ |
| 47 | HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) | 50 | HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) \ |
| 51 | HANDLER(KEventInfo, (SLAB_COUNT(KThread) + SLAB_COUNT(KDebug)), ##__VA_ARGS__) \ | ||
| 52 | HANDLER(KDebug, (SLAB_COUNT(KDebug)), ##__VA_ARGS__) \ | ||
| 53 | HANDLER(KSecureSystemResource, (SLAB_COUNT(KProcess)), ##__VA_ARGS__) | ||
| 48 | 54 | ||
| 49 | namespace { | 55 | namespace { |
| 50 | 56 | ||
| @@ -73,8 +79,20 @@ constexpr size_t SlabCountKResourceLimit = 5; | |||
| 73 | constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES; | 79 | constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES; |
| 74 | constexpr size_t SlabCountKIoPool = 1; | 80 | constexpr size_t SlabCountKIoPool = 1; |
| 75 | constexpr size_t SlabCountKIoRegion = 6; | 81 | constexpr size_t SlabCountKIoRegion = 6; |
| 82 | constexpr size_t SlabcountKSessionRequestMappings = 40; | ||
| 76 | 83 | ||
| 77 | constexpr size_t SlabCountExtraKThread = 160; | 84 | constexpr size_t SlabCountExtraKThread = (1024 + 256 + 256) - SlabCountKThread; |
| 85 | |||
| 86 | namespace test { | ||
| 87 | |||
| 88 | static_assert(KernelPageBufferHeapSize == | ||
| 89 | 2 * PageSize + (SlabCountKProcess + SlabCountKThread + | ||
| 90 | (SlabCountKProcess + SlabCountKThread) / 8) * | ||
| 91 | PageSize); | ||
| 92 | static_assert(KernelPageBufferAdditionalSize == | ||
| 93 | (SlabCountExtraKThread + (SlabCountExtraKThread / 8)) * PageSize); | ||
| 94 | |||
| 95 | } // namespace test | ||
| 78 | 96 | ||
| 79 | /// Helper function to translate from the slab virtual address to the reserved location in physical | 97 | /// Helper function to translate from the slab virtual address to the reserved location in physical |
| 80 | /// memory. | 98 | /// memory. |
| @@ -109,7 +127,7 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd | |||
| 109 | } | 127 | } |
| 110 | 128 | ||
| 111 | size_t CalculateSlabHeapGapSize() { | 129 | size_t CalculateSlabHeapGapSize() { |
| 112 | constexpr size_t KernelSlabHeapGapSize = 2_MiB - 296_KiB; | 130 | constexpr size_t KernelSlabHeapGapSize = 2_MiB - 320_KiB; |
| 113 | static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax); | 131 | static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax); |
| 114 | return KernelSlabHeapGapSize; | 132 | return KernelSlabHeapGapSize; |
| 115 | } | 133 | } |
| @@ -134,6 +152,7 @@ KSlabResourceCounts KSlabResourceCounts::CreateDefault() { | |||
| 134 | .num_KDebug = SlabCountKDebug, | 152 | .num_KDebug = SlabCountKDebug, |
| 135 | .num_KIoPool = SlabCountKIoPool, | 153 | .num_KIoPool = SlabCountKIoPool, |
| 136 | .num_KIoRegion = SlabCountKIoRegion, | 154 | .num_KIoRegion = SlabCountKIoRegion, |
| 155 | .num_KSessionRequestMappings = SlabcountKSessionRequestMappings, | ||
| 137 | }; | 156 | }; |
| 138 | } | 157 | } |
| 139 | 158 | ||
| @@ -164,29 +183,6 @@ size_t CalculateTotalSlabHeapSize(const KernelCore& kernel) { | |||
| 164 | return size; | 183 | return size; |
| 165 | } | 184 | } |
| 166 | 185 | ||
| 167 | void InitializeKPageBufferSlabHeap(Core::System& system) { | ||
| 168 | auto& kernel = system.Kernel(); | ||
| 169 | |||
| 170 | const auto& counts = kernel.SlabResourceCounts(); | ||
| 171 | const size_t num_pages = | ||
| 172 | counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8; | ||
| 173 | const size_t slab_size = num_pages * PageSize; | ||
| 174 | |||
| 175 | // Reserve memory from the system resource limit. | ||
| 176 | ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size)); | ||
| 177 | |||
| 178 | // Allocate memory for the slab. | ||
| 179 | constexpr auto AllocateOption = KMemoryManager::EncodeOption( | ||
| 180 | KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront); | ||
| 181 | const PAddr slab_address = | ||
| 182 | kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption); | ||
| 183 | ASSERT(slab_address != 0); | ||
| 184 | |||
| 185 | // Initialize the slabheap. | ||
| 186 | KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address), | ||
| 187 | slab_size); | ||
| 188 | } | ||
| 189 | |||
| 190 | void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { | 186 | void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { |
| 191 | auto& kernel = system.Kernel(); | 187 | auto& kernel = system.Kernel(); |
| 192 | 188 | ||
| @@ -258,3 +254,29 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { | |||
| 258 | } | 254 | } |
| 259 | 255 | ||
| 260 | } // namespace Kernel::Init | 256 | } // namespace Kernel::Init |
| 257 | |||
| 258 | namespace Kernel { | ||
| 259 | |||
| 260 | void KPageBufferSlabHeap::Initialize(Core::System& system) { | ||
| 261 | auto& kernel = system.Kernel(); | ||
| 262 | const auto& counts = kernel.SlabResourceCounts(); | ||
| 263 | const size_t num_pages = | ||
| 264 | counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8; | ||
| 265 | const size_t slab_size = num_pages * PageSize; | ||
| 266 | |||
| 267 | // Reserve memory from the system resource limit. | ||
| 268 | ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size)); | ||
| 269 | |||
| 270 | // Allocate memory for the slab. | ||
| 271 | constexpr auto AllocateOption = KMemoryManager::EncodeOption( | ||
| 272 | KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront); | ||
| 273 | const PAddr slab_address = | ||
| 274 | kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption); | ||
| 275 | ASSERT(slab_address != 0); | ||
| 276 | |||
| 277 | // Initialize the slabheap. | ||
| 278 | KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address), | ||
| 279 | slab_size); | ||
| 280 | } | ||
| 281 | |||
| 282 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/init/init_slab_setup.h b/src/core/hle/kernel/init/init_slab_setup.h index 13be63c87..5e22821bc 100644 --- a/src/core/hle/kernel/init/init_slab_setup.h +++ b/src/core/hle/kernel/init/init_slab_setup.h | |||
| @@ -33,11 +33,11 @@ struct KSlabResourceCounts { | |||
| 33 | size_t num_KDebug; | 33 | size_t num_KDebug; |
| 34 | size_t num_KIoPool; | 34 | size_t num_KIoPool; |
| 35 | size_t num_KIoRegion; | 35 | size_t num_KIoRegion; |
| 36 | size_t num_KSessionRequestMappings; | ||
| 36 | }; | 37 | }; |
| 37 | 38 | ||
| 38 | void InitializeSlabResourceCounts(KernelCore& kernel); | 39 | void InitializeSlabResourceCounts(KernelCore& kernel); |
| 39 | size_t CalculateTotalSlabHeapSize(const KernelCore& kernel); | 40 | size_t CalculateTotalSlabHeapSize(const KernelCore& kernel); |
| 40 | void InitializeKPageBufferSlabHeap(Core::System& system); | ||
| 41 | void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout); | 41 | void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout); |
| 42 | 42 | ||
| 43 | } // namespace Kernel::Init | 43 | } // namespace Kernel::Init |
diff --git a/src/core/hle/kernel/k_class_token.cpp b/src/core/hle/kernel/k_class_token.cpp index 10265c23c..a850db3c4 100644 --- a/src/core/hle/kernel/k_class_token.cpp +++ b/src/core/hle/kernel/k_class_token.cpp | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include "core/hle/kernel/k_session.h" | 16 | #include "core/hle/kernel/k_session.h" |
| 17 | #include "core/hle/kernel/k_shared_memory.h" | 17 | #include "core/hle/kernel/k_shared_memory.h" |
| 18 | #include "core/hle/kernel/k_synchronization_object.h" | 18 | #include "core/hle/kernel/k_synchronization_object.h" |
| 19 | #include "core/hle/kernel/k_system_resource.h" | ||
| 19 | #include "core/hle/kernel/k_thread.h" | 20 | #include "core/hle/kernel/k_thread.h" |
| 20 | #include "core/hle/kernel/k_transfer_memory.h" | 21 | #include "core/hle/kernel/k_transfer_memory.h" |
| 21 | 22 | ||
| @@ -119,4 +120,6 @@ static_assert(std::is_final_v<KTransferMemory> && std::is_base_of_v<KAutoObject, | |||
| 119 | // static_assert(std::is_final_v<KCodeMemory> && | 120 | // static_assert(std::is_final_v<KCodeMemory> && |
| 120 | // std::is_base_of_v<KAutoObject, KCodeMemory>); | 121 | // std::is_base_of_v<KAutoObject, KCodeMemory>); |
| 121 | 122 | ||
| 123 | static_assert(std::is_base_of_v<KAutoObject, KSystemResource>); | ||
| 124 | |||
| 122 | } // namespace Kernel | 125 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_class_token.h b/src/core/hle/kernel/k_class_token.h index ab20e00ff..e75b1c035 100644 --- a/src/core/hle/kernel/k_class_token.h +++ b/src/core/hle/kernel/k_class_token.h | |||
| @@ -10,6 +10,8 @@ namespace Kernel { | |||
| 10 | 10 | ||
| 11 | class KAutoObject; | 11 | class KAutoObject; |
| 12 | 12 | ||
| 13 | class KSystemResource; | ||
| 14 | |||
| 13 | class KClassTokenGenerator { | 15 | class KClassTokenGenerator { |
| 14 | public: | 16 | public: |
| 15 | using TokenBaseType = u16; | 17 | using TokenBaseType = u16; |
| @@ -58,7 +60,7 @@ private: | |||
| 58 | if constexpr (std::is_same<T, KAutoObject>::value) { | 60 | if constexpr (std::is_same<T, KAutoObject>::value) { |
| 59 | static_assert(T::ObjectType == ObjectType::KAutoObject); | 61 | static_assert(T::ObjectType == ObjectType::KAutoObject); |
| 60 | return 0; | 62 | return 0; |
| 61 | } else if constexpr (!std::is_final<T>::value) { | 63 | } else if constexpr (!std::is_final<T>::value && !std::same_as<T, KSystemResource>) { |
| 62 | static_assert(ObjectType::BaseClassesStart <= T::ObjectType && | 64 | static_assert(ObjectType::BaseClassesStart <= T::ObjectType && |
| 63 | T::ObjectType < ObjectType::BaseClassesEnd); | 65 | T::ObjectType < ObjectType::BaseClassesEnd); |
| 64 | constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) - | 66 | constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) - |
| @@ -108,6 +110,8 @@ public: | |||
| 108 | KSessionRequest, | 110 | KSessionRequest, |
| 109 | KCodeMemory, | 111 | KCodeMemory, |
| 110 | 112 | ||
| 113 | KSystemResource, | ||
| 114 | |||
| 111 | // NOTE: True order for these has not been determined yet. | 115 | // NOTE: True order for these has not been determined yet. |
| 112 | KAlpha, | 116 | KAlpha, |
| 113 | KBeta, | 117 | KBeta, |
diff --git a/src/core/hle/kernel/k_debug.h b/src/core/hle/kernel/k_debug.h new file mode 100644 index 000000000..e3a0689c8 --- /dev/null +++ b/src/core/hle/kernel/k_debug.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include "core/hle/kernel/k_auto_object.h" | ||
| 7 | #include "core/hle/kernel/slab_helpers.h" | ||
| 8 | |||
| 9 | namespace Kernel { | ||
| 10 | |||
| 11 | class KDebug final : public KAutoObjectWithSlabHeapAndContainer<KDebug, KAutoObjectWithList> { | ||
| 12 | KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject); | ||
| 13 | |||
| 14 | public: | ||
| 15 | explicit KDebug(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} | ||
| 16 | |||
| 17 | static void PostDestroy([[maybe_unused]] uintptr_t arg) {} | ||
| 18 | }; | ||
| 19 | |||
| 20 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h index 9076c8fa3..ac80d60a1 100644 --- a/src/core/hle/kernel/k_dynamic_page_manager.h +++ b/src/core/hle/kernel/k_dynamic_page_manager.h | |||
| @@ -3,6 +3,8 @@ | |||
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <vector> | ||
| 7 | |||
| 6 | #include "common/alignment.h" | 8 | #include "common/alignment.h" |
| 7 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 8 | #include "core/hle/kernel/k_page_bitmap.h" | 10 | #include "core/hle/kernel/k_page_bitmap.h" |
| @@ -33,28 +35,36 @@ public: | |||
| 33 | return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); | 35 | return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); |
| 34 | } | 36 | } |
| 35 | 37 | ||
| 36 | Result Initialize(VAddr addr, size_t sz) { | 38 | Result Initialize(VAddr memory, size_t size, size_t align) { |
| 37 | // We need to have positive size. | 39 | // We need to have positive size. |
| 38 | R_UNLESS(sz > 0, ResultOutOfMemory); | 40 | R_UNLESS(size > 0, ResultOutOfMemory); |
| 39 | m_backing_memory.resize(sz); | 41 | m_backing_memory.resize(size); |
| 42 | |||
| 43 | // Set addresses. | ||
| 44 | m_address = memory; | ||
| 45 | m_aligned_address = Common::AlignDown(memory, align); | ||
| 40 | 46 | ||
| 41 | // Calculate management overhead. | 47 | // Calculate extents. |
| 42 | const size_t management_size = | 48 | const size_t managed_size = m_address + size - m_aligned_address; |
| 43 | KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer)); | 49 | const size_t overhead_size = Common::AlignUp( |
| 44 | const size_t allocatable_size = sz - management_size; | 50 | KPageBitmap::CalculateManagementOverheadSize(managed_size / sizeof(PageBuffer)), |
| 51 | sizeof(PageBuffer)); | ||
| 52 | R_UNLESS(overhead_size < size, ResultOutOfMemory); | ||
| 45 | 53 | ||
| 46 | // Set tracking fields. | 54 | // Set tracking fields. |
| 47 | m_address = addr; | 55 | m_size = Common::AlignDown(size - overhead_size, sizeof(PageBuffer)); |
| 48 | m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer)); | 56 | m_count = m_size / sizeof(PageBuffer); |
| 49 | m_count = allocatable_size / sizeof(PageBuffer); | ||
| 50 | R_UNLESS(m_count > 0, ResultOutOfMemory); | ||
| 51 | 57 | ||
| 52 | // Clear the management region. | 58 | // Clear the management region. |
| 53 | u64* management_ptr = GetPointer<u64>(m_address + allocatable_size); | 59 | u64* management_ptr = GetPointer<u64>(m_address + size - overhead_size); |
| 54 | std::memset(management_ptr, 0, management_size); | 60 | std::memset(management_ptr, 0, overhead_size); |
| 55 | 61 | ||
| 56 | // Initialize the bitmap. | 62 | // Initialize the bitmap. |
| 57 | m_page_bitmap.Initialize(management_ptr, m_count); | 63 | const size_t allocatable_region_size = |
| 64 | (m_address + size - overhead_size) - m_aligned_address; | ||
| 65 | ASSERT(allocatable_region_size >= sizeof(PageBuffer)); | ||
| 66 | |||
| 67 | m_page_bitmap.Initialize(management_ptr, allocatable_region_size / sizeof(PageBuffer)); | ||
| 58 | 68 | ||
| 59 | // Free the pages to the bitmap. | 69 | // Free the pages to the bitmap. |
| 60 | for (size_t i = 0; i < m_count; i++) { | 70 | for (size_t i = 0; i < m_count; i++) { |
| @@ -62,7 +72,8 @@ public: | |||
| 62 | std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize); | 72 | std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize); |
| 63 | 73 | ||
| 64 | // Set the bit for the free page. | 74 | // Set the bit for the free page. |
| 65 | m_page_bitmap.SetBit(i); | 75 | m_page_bitmap.SetBit((m_address + (i * sizeof(PageBuffer)) - m_aligned_address) / |
| 76 | sizeof(PageBuffer)); | ||
| 66 | } | 77 | } |
| 67 | 78 | ||
| 68 | R_SUCCEED(); | 79 | R_SUCCEED(); |
| @@ -101,7 +112,28 @@ public: | |||
| 101 | m_page_bitmap.ClearBit(offset); | 112 | m_page_bitmap.ClearBit(offset); |
| 102 | m_peak = std::max(m_peak, (++m_used)); | 113 | m_peak = std::max(m_peak, (++m_used)); |
| 103 | 114 | ||
| 104 | return GetPointer<PageBuffer>(m_address) + offset; | 115 | return GetPointer<PageBuffer>(m_aligned_address) + offset; |
| 116 | } | ||
| 117 | |||
| 118 | PageBuffer* Allocate(size_t count) { | ||
| 119 | // Take the lock. | ||
| 120 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. | ||
| 121 | KScopedSpinLock lk(m_lock); | ||
| 122 | |||
| 123 | // Find a random free block. | ||
| 124 | s64 soffset = m_page_bitmap.FindFreeRange(count); | ||
| 125 | if (soffset < 0) [[likely]] { | ||
| 126 | return nullptr; | ||
| 127 | } | ||
| 128 | |||
| 129 | const size_t offset = static_cast<size_t>(soffset); | ||
| 130 | |||
| 131 | // Update our tracking. | ||
| 132 | m_page_bitmap.ClearRange(offset, count); | ||
| 133 | m_used += count; | ||
| 134 | m_peak = std::max(m_peak, m_used); | ||
| 135 | |||
| 136 | return GetPointer<PageBuffer>(m_aligned_address) + offset; | ||
| 105 | } | 137 | } |
| 106 | 138 | ||
| 107 | void Free(PageBuffer* pb) { | 139 | void Free(PageBuffer* pb) { |
| @@ -113,7 +145,7 @@ public: | |||
| 113 | KScopedSpinLock lk(m_lock); | 145 | KScopedSpinLock lk(m_lock); |
| 114 | 146 | ||
| 115 | // Set the bit for the free page. | 147 | // Set the bit for the free page. |
| 116 | size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_address) / sizeof(PageBuffer); | 148 | size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_aligned_address) / sizeof(PageBuffer); |
| 117 | m_page_bitmap.SetBit(offset); | 149 | m_page_bitmap.SetBit(offset); |
| 118 | 150 | ||
| 119 | // Decrement our used count. | 151 | // Decrement our used count. |
| @@ -127,6 +159,7 @@ private: | |||
| 127 | size_t m_peak{}; | 159 | size_t m_peak{}; |
| 128 | size_t m_count{}; | 160 | size_t m_count{}; |
| 129 | VAddr m_address{}; | 161 | VAddr m_address{}; |
| 162 | VAddr m_aligned_address{}; | ||
| 130 | size_t m_size{}; | 163 | size_t m_size{}; |
| 131 | 164 | ||
| 132 | // TODO(bunnei): Back by host memory until we emulate kernel virtual address space. | 165 | // TODO(bunnei): Back by host memory until we emulate kernel virtual address space. |
diff --git a/src/core/hle/kernel/k_dynamic_resource_manager.h b/src/core/hle/kernel/k_dynamic_resource_manager.h index 1ce517e8e..b6a27d648 100644 --- a/src/core/hle/kernel/k_dynamic_resource_manager.h +++ b/src/core/hle/kernel/k_dynamic_resource_manager.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include "common/common_funcs.h" | 6 | #include "common/common_funcs.h" |
| 7 | #include "core/hle/kernel/k_dynamic_slab_heap.h" | 7 | #include "core/hle/kernel/k_dynamic_slab_heap.h" |
| 8 | #include "core/hle/kernel/k_memory_block.h" | 8 | #include "core/hle/kernel/k_memory_block.h" |
| 9 | #include "core/hle/kernel/k_page_group.h" | ||
| 9 | 10 | ||
| 10 | namespace Kernel { | 11 | namespace Kernel { |
| 11 | 12 | ||
| @@ -51,8 +52,10 @@ private: | |||
| 51 | DynamicSlabType* m_slab_heap{}; | 52 | DynamicSlabType* m_slab_heap{}; |
| 52 | }; | 53 | }; |
| 53 | 54 | ||
| 55 | class KBlockInfoManager : public KDynamicResourceManager<KBlockInfo> {}; | ||
| 54 | class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {}; | 56 | class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {}; |
| 55 | 57 | ||
| 58 | using KBlockInfoSlabHeap = typename KBlockInfoManager::DynamicSlabType; | ||
| 56 | using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType; | 59 | using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType; |
| 57 | 60 | ||
| 58 | } // namespace Kernel | 61 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_event_info.h b/src/core/hle/kernel/k_event_info.h new file mode 100644 index 000000000..25b3ff594 --- /dev/null +++ b/src/core/hle/kernel/k_event_info.h | |||
| @@ -0,0 +1,64 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <array> | ||
| 7 | |||
| 8 | #include <boost/intrusive/list.hpp> | ||
| 9 | |||
| 10 | #include "core/hle/kernel/slab_helpers.h" | ||
| 11 | #include "core/hle/kernel/svc_types.h" | ||
| 12 | |||
| 13 | namespace Kernel { | ||
| 14 | |||
| 15 | class KEventInfo : public KSlabAllocated<KEventInfo>, public boost::intrusive::list_base_hook<> { | ||
| 16 | public: | ||
| 17 | struct InfoCreateThread { | ||
| 18 | u32 thread_id{}; | ||
| 19 | uintptr_t tls_address{}; | ||
| 20 | }; | ||
| 21 | |||
| 22 | struct InfoExitProcess { | ||
| 23 | Svc::ProcessExitReason reason{}; | ||
| 24 | }; | ||
| 25 | |||
| 26 | struct InfoExitThread { | ||
| 27 | Svc::ThreadExitReason reason{}; | ||
| 28 | }; | ||
| 29 | |||
| 30 | struct InfoException { | ||
| 31 | Svc::DebugException exception_type{}; | ||
| 32 | s32 exception_data_count{}; | ||
| 33 | uintptr_t exception_address{}; | ||
| 34 | std::array<uintptr_t, 4> exception_data{}; | ||
| 35 | }; | ||
| 36 | |||
| 37 | struct InfoSystemCall { | ||
| 38 | s64 tick{}; | ||
| 39 | s32 id{}; | ||
| 40 | }; | ||
| 41 | |||
| 42 | public: | ||
| 43 | KEventInfo() = default; | ||
| 44 | ~KEventInfo() = default; | ||
| 45 | |||
| 46 | public: | ||
| 47 | Svc::DebugEvent event{}; | ||
| 48 | u32 thread_id{}; | ||
| 49 | u32 flags{}; | ||
| 50 | bool is_attached{}; | ||
| 51 | bool continue_flag{}; | ||
| 52 | bool ignore_continue{}; | ||
| 53 | bool close_once{}; | ||
| 54 | union { | ||
| 55 | InfoCreateThread create_thread; | ||
| 56 | InfoExitProcess exit_process; | ||
| 57 | InfoExitThread exit_thread; | ||
| 58 | InfoException exception; | ||
| 59 | InfoSystemCall system_call; | ||
| 60 | } info{}; | ||
| 61 | KThread* debug_thread{}; | ||
| 62 | }; | ||
| 63 | |||
| 64 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp index e830ca46e..1c7a766c8 100644 --- a/src/core/hle/kernel/k_handle_table.cpp +++ b/src/core/hle/kernel/k_handle_table.cpp | |||
| @@ -5,14 +5,11 @@ | |||
| 5 | 5 | ||
| 6 | namespace Kernel { | 6 | namespace Kernel { |
| 7 | 7 | ||
| 8 | KHandleTable::KHandleTable(KernelCore& kernel_) : kernel{kernel_} {} | ||
| 9 | KHandleTable::~KHandleTable() = default; | ||
| 10 | |||
| 11 | Result KHandleTable::Finalize() { | 8 | Result KHandleTable::Finalize() { |
| 12 | // Get the table and clear our record of it. | 9 | // Get the table and clear our record of it. |
| 13 | u16 saved_table_size = 0; | 10 | u16 saved_table_size = 0; |
| 14 | { | 11 | { |
| 15 | KScopedDisableDispatch dd(kernel); | 12 | KScopedDisableDispatch dd{m_kernel}; |
| 16 | KScopedSpinLock lk(m_lock); | 13 | KScopedSpinLock lk(m_lock); |
| 17 | 14 | ||
| 18 | std::swap(m_table_size, saved_table_size); | 15 | std::swap(m_table_size, saved_table_size); |
| @@ -25,28 +22,28 @@ Result KHandleTable::Finalize() { | |||
| 25 | } | 22 | } |
| 26 | } | 23 | } |
| 27 | 24 | ||
| 28 | return ResultSuccess; | 25 | R_SUCCEED(); |
| 29 | } | 26 | } |
| 30 | 27 | ||
| 31 | bool KHandleTable::Remove(Handle handle) { | 28 | bool KHandleTable::Remove(Handle handle) { |
| 32 | // Don't allow removal of a pseudo-handle. | 29 | // Don't allow removal of a pseudo-handle. |
| 33 | if (Svc::IsPseudoHandle(handle)) { | 30 | if (Svc::IsPseudoHandle(handle)) [[unlikely]] { |
| 34 | return false; | 31 | return false; |
| 35 | } | 32 | } |
| 36 | 33 | ||
| 37 | // Handles must not have reserved bits set. | 34 | // Handles must not have reserved bits set. |
| 38 | const auto handle_pack = HandlePack(handle); | 35 | const auto handle_pack = HandlePack(handle); |
| 39 | if (handle_pack.reserved != 0) { | 36 | if (handle_pack.reserved != 0) [[unlikely]] { |
| 40 | return false; | 37 | return false; |
| 41 | } | 38 | } |
| 42 | 39 | ||
| 43 | // Find the object and free the entry. | 40 | // Find the object and free the entry. |
| 44 | KAutoObject* obj = nullptr; | 41 | KAutoObject* obj = nullptr; |
| 45 | { | 42 | { |
| 46 | KScopedDisableDispatch dd(kernel); | 43 | KScopedDisableDispatch dd{m_kernel}; |
| 47 | KScopedSpinLock lk(m_lock); | 44 | KScopedSpinLock lk(m_lock); |
| 48 | 45 | ||
| 49 | if (this->IsValidHandle(handle)) { | 46 | if (this->IsValidHandle(handle)) [[likely]] { |
| 50 | const auto index = handle_pack.index; | 47 | const auto index = handle_pack.index; |
| 51 | 48 | ||
| 52 | obj = m_objects[index]; | 49 | obj = m_objects[index]; |
| @@ -57,13 +54,13 @@ bool KHandleTable::Remove(Handle handle) { | |||
| 57 | } | 54 | } |
| 58 | 55 | ||
| 59 | // Close the object. | 56 | // Close the object. |
| 60 | kernel.UnregisterInUseObject(obj); | 57 | m_kernel.UnregisterInUseObject(obj); |
| 61 | obj->Close(); | 58 | obj->Close(); |
| 62 | return true; | 59 | return true; |
| 63 | } | 60 | } |
| 64 | 61 | ||
| 65 | Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) { | 62 | Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) { |
| 66 | KScopedDisableDispatch dd(kernel); | 63 | KScopedDisableDispatch dd{m_kernel}; |
| 67 | KScopedSpinLock lk(m_lock); | 64 | KScopedSpinLock lk(m_lock); |
| 68 | 65 | ||
| 69 | // Never exceed our capacity. | 66 | // Never exceed our capacity. |
| @@ -82,22 +79,22 @@ Result KHandleTable::Add(Handle* out_handle, KAutoObject* obj) { | |||
| 82 | *out_handle = EncodeHandle(static_cast<u16>(index), linear_id); | 79 | *out_handle = EncodeHandle(static_cast<u16>(index), linear_id); |
| 83 | } | 80 | } |
| 84 | 81 | ||
| 85 | return ResultSuccess; | 82 | R_SUCCEED(); |
| 86 | } | 83 | } |
| 87 | 84 | ||
| 88 | Result KHandleTable::Reserve(Handle* out_handle) { | 85 | Result KHandleTable::Reserve(Handle* out_handle) { |
| 89 | KScopedDisableDispatch dd(kernel); | 86 | KScopedDisableDispatch dd{m_kernel}; |
| 90 | KScopedSpinLock lk(m_lock); | 87 | KScopedSpinLock lk(m_lock); |
| 91 | 88 | ||
| 92 | // Never exceed our capacity. | 89 | // Never exceed our capacity. |
| 93 | R_UNLESS(m_count < m_table_size, ResultOutOfHandles); | 90 | R_UNLESS(m_count < m_table_size, ResultOutOfHandles); |
| 94 | 91 | ||
| 95 | *out_handle = EncodeHandle(static_cast<u16>(this->AllocateEntry()), this->AllocateLinearId()); | 92 | *out_handle = EncodeHandle(static_cast<u16>(this->AllocateEntry()), this->AllocateLinearId()); |
| 96 | return ResultSuccess; | 93 | R_SUCCEED(); |
| 97 | } | 94 | } |
| 98 | 95 | ||
| 99 | void KHandleTable::Unreserve(Handle handle) { | 96 | void KHandleTable::Unreserve(Handle handle) { |
| 100 | KScopedDisableDispatch dd(kernel); | 97 | KScopedDisableDispatch dd{m_kernel}; |
| 101 | KScopedSpinLock lk(m_lock); | 98 | KScopedSpinLock lk(m_lock); |
| 102 | 99 | ||
| 103 | // Unpack the handle. | 100 | // Unpack the handle. |
| @@ -108,7 +105,7 @@ void KHandleTable::Unreserve(Handle handle) { | |||
| 108 | ASSERT(reserved == 0); | 105 | ASSERT(reserved == 0); |
| 109 | ASSERT(linear_id != 0); | 106 | ASSERT(linear_id != 0); |
| 110 | 107 | ||
| 111 | if (index < m_table_size) { | 108 | if (index < m_table_size) [[likely]] { |
| 112 | // NOTE: This code does not check the linear id. | 109 | // NOTE: This code does not check the linear id. |
| 113 | ASSERT(m_objects[index] == nullptr); | 110 | ASSERT(m_objects[index] == nullptr); |
| 114 | this->FreeEntry(index); | 111 | this->FreeEntry(index); |
| @@ -116,7 +113,7 @@ void KHandleTable::Unreserve(Handle handle) { | |||
| 116 | } | 113 | } |
| 117 | 114 | ||
| 118 | void KHandleTable::Register(Handle handle, KAutoObject* obj) { | 115 | void KHandleTable::Register(Handle handle, KAutoObject* obj) { |
| 119 | KScopedDisableDispatch dd(kernel); | 116 | KScopedDisableDispatch dd{m_kernel}; |
| 120 | KScopedSpinLock lk(m_lock); | 117 | KScopedSpinLock lk(m_lock); |
| 121 | 118 | ||
| 122 | // Unpack the handle. | 119 | // Unpack the handle. |
| @@ -127,7 +124,7 @@ void KHandleTable::Register(Handle handle, KAutoObject* obj) { | |||
| 127 | ASSERT(reserved == 0); | 124 | ASSERT(reserved == 0); |
| 128 | ASSERT(linear_id != 0); | 125 | ASSERT(linear_id != 0); |
| 129 | 126 | ||
| 130 | if (index < m_table_size) { | 127 | if (index < m_table_size) [[likely]] { |
| 131 | // Set the entry. | 128 | // Set the entry. |
| 132 | ASSERT(m_objects[index] == nullptr); | 129 | ASSERT(m_objects[index] == nullptr); |
| 133 | 130 | ||
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h index 0864a737c..65cae3b27 100644 --- a/src/core/hle/kernel/k_handle_table.h +++ b/src/core/hle/kernel/k_handle_table.h | |||
| @@ -21,33 +21,38 @@ namespace Kernel { | |||
| 21 | class KernelCore; | 21 | class KernelCore; |
| 22 | 22 | ||
| 23 | class KHandleTable { | 23 | class KHandleTable { |
| 24 | public: | ||
| 25 | YUZU_NON_COPYABLE(KHandleTable); | 24 | YUZU_NON_COPYABLE(KHandleTable); |
| 26 | YUZU_NON_MOVEABLE(KHandleTable); | 25 | YUZU_NON_MOVEABLE(KHandleTable); |
| 27 | 26 | ||
| 27 | public: | ||
| 28 | static constexpr size_t MaxTableSize = 1024; | 28 | static constexpr size_t MaxTableSize = 1024; |
| 29 | 29 | ||
| 30 | explicit KHandleTable(KernelCore& kernel_); | 30 | public: |
| 31 | ~KHandleTable(); | 31 | explicit KHandleTable(KernelCore& kernel) : m_kernel(kernel) {} |
| 32 | 32 | ||
| 33 | Result Initialize(s32 size) { | 33 | Result Initialize(s32 size) { |
| 34 | // Check that the table size is valid. | ||
| 34 | R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory); | 35 | R_UNLESS(size <= static_cast<s32>(MaxTableSize), ResultOutOfMemory); |
| 35 | 36 | ||
| 37 | // Lock. | ||
| 38 | KScopedDisableDispatch dd{m_kernel}; | ||
| 39 | KScopedSpinLock lk(m_lock); | ||
| 40 | |||
| 36 | // Initialize all fields. | 41 | // Initialize all fields. |
| 37 | m_max_count = 0; | 42 | m_max_count = 0; |
| 38 | m_table_size = static_cast<u16>((size <= 0) ? MaxTableSize : size); | 43 | m_table_size = static_cast<s16>((size <= 0) ? MaxTableSize : size); |
| 39 | m_next_linear_id = MinLinearId; | 44 | m_next_linear_id = MinLinearId; |
| 40 | m_count = 0; | 45 | m_count = 0; |
| 41 | m_free_head_index = -1; | 46 | m_free_head_index = -1; |
| 42 | 47 | ||
| 43 | // Free all entries. | 48 | // Free all entries. |
| 44 | for (s16 i = 0; i < static_cast<s16>(m_table_size); ++i) { | 49 | for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) { |
| 45 | m_objects[i] = nullptr; | 50 | m_objects[i] = nullptr; |
| 46 | m_entry_infos[i].next_free_index = i - 1; | 51 | m_entry_infos[i].next_free_index = static_cast<s16>(i - 1); |
| 47 | m_free_head_index = i; | 52 | m_free_head_index = i; |
| 48 | } | 53 | } |
| 49 | 54 | ||
| 50 | return ResultSuccess; | 55 | R_SUCCEED(); |
| 51 | } | 56 | } |
| 52 | 57 | ||
| 53 | size_t GetTableSize() const { | 58 | size_t GetTableSize() const { |
| @@ -66,13 +71,13 @@ public: | |||
| 66 | template <typename T = KAutoObject> | 71 | template <typename T = KAutoObject> |
| 67 | KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { | 72 | KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { |
| 68 | // Lock and look up in table. | 73 | // Lock and look up in table. |
| 69 | KScopedDisableDispatch dd(kernel); | 74 | KScopedDisableDispatch dd{m_kernel}; |
| 70 | KScopedSpinLock lk(m_lock); | 75 | KScopedSpinLock lk(m_lock); |
| 71 | 76 | ||
| 72 | if constexpr (std::is_same_v<T, KAutoObject>) { | 77 | if constexpr (std::is_same_v<T, KAutoObject>) { |
| 73 | return this->GetObjectImpl(handle); | 78 | return this->GetObjectImpl(handle); |
| 74 | } else { | 79 | } else { |
| 75 | if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) { | 80 | if (auto* obj = this->GetObjectImpl(handle); obj != nullptr) [[likely]] { |
| 76 | return obj->DynamicCast<T*>(); | 81 | return obj->DynamicCast<T*>(); |
| 77 | } else { | 82 | } else { |
| 78 | return nullptr; | 83 | return nullptr; |
| @@ -85,13 +90,13 @@ public: | |||
| 85 | // Handle pseudo-handles. | 90 | // Handle pseudo-handles. |
| 86 | if constexpr (std::derived_from<KProcess, T>) { | 91 | if constexpr (std::derived_from<KProcess, T>) { |
| 87 | if (handle == Svc::PseudoHandle::CurrentProcess) { | 92 | if (handle == Svc::PseudoHandle::CurrentProcess) { |
| 88 | auto* const cur_process = kernel.CurrentProcess(); | 93 | auto* const cur_process = m_kernel.CurrentProcess(); |
| 89 | ASSERT(cur_process != nullptr); | 94 | ASSERT(cur_process != nullptr); |
| 90 | return cur_process; | 95 | return cur_process; |
| 91 | } | 96 | } |
| 92 | } else if constexpr (std::derived_from<KThread, T>) { | 97 | } else if constexpr (std::derived_from<KThread, T>) { |
| 93 | if (handle == Svc::PseudoHandle::CurrentThread) { | 98 | if (handle == Svc::PseudoHandle::CurrentThread) { |
| 94 | auto* const cur_thread = GetCurrentThreadPointer(kernel); | 99 | auto* const cur_thread = GetCurrentThreadPointer(m_kernel); |
| 95 | ASSERT(cur_thread != nullptr); | 100 | ASSERT(cur_thread != nullptr); |
| 96 | return cur_thread; | 101 | return cur_thread; |
| 97 | } | 102 | } |
| @@ -100,6 +105,37 @@ public: | |||
| 100 | return this->template GetObjectWithoutPseudoHandle<T>(handle); | 105 | return this->template GetObjectWithoutPseudoHandle<T>(handle); |
| 101 | } | 106 | } |
| 102 | 107 | ||
| 108 | KScopedAutoObject<KAutoObject> GetObjectForIpcWithoutPseudoHandle(Handle handle) const { | ||
| 109 | // Lock and look up in table. | ||
| 110 | KScopedDisableDispatch dd{m_kernel}; | ||
| 111 | KScopedSpinLock lk(m_lock); | ||
| 112 | |||
| 113 | return this->GetObjectImpl(handle); | ||
| 114 | } | ||
| 115 | |||
| 116 | KScopedAutoObject<KAutoObject> GetObjectForIpc(Handle handle, KThread* cur_thread) const { | ||
| 117 | // Handle pseudo-handles. | ||
| 118 | ASSERT(cur_thread != nullptr); | ||
| 119 | if (handle == Svc::PseudoHandle::CurrentProcess) { | ||
| 120 | auto* const cur_process = | ||
| 121 | static_cast<KAutoObject*>(static_cast<void*>(cur_thread->GetOwnerProcess())); | ||
| 122 | ASSERT(cur_process != nullptr); | ||
| 123 | return cur_process; | ||
| 124 | } | ||
| 125 | if (handle == Svc::PseudoHandle::CurrentThread) { | ||
| 126 | return static_cast<KAutoObject*>(cur_thread); | ||
| 127 | } | ||
| 128 | |||
| 129 | return GetObjectForIpcWithoutPseudoHandle(handle); | ||
| 130 | } | ||
| 131 | |||
| 132 | KScopedAutoObject<KAutoObject> GetObjectByIndex(Handle* out_handle, size_t index) const { | ||
| 133 | KScopedDisableDispatch dd{m_kernel}; | ||
| 134 | KScopedSpinLock lk(m_lock); | ||
| 135 | |||
| 136 | return this->GetObjectByIndexImpl(out_handle, index); | ||
| 137 | } | ||
| 138 | |||
| 103 | Result Reserve(Handle* out_handle); | 139 | Result Reserve(Handle* out_handle); |
| 104 | void Unreserve(Handle handle); | 140 | void Unreserve(Handle handle); |
| 105 | 141 | ||
| @@ -112,7 +148,7 @@ public: | |||
| 112 | size_t num_opened; | 148 | size_t num_opened; |
| 113 | { | 149 | { |
| 114 | // Lock the table. | 150 | // Lock the table. |
| 115 | KScopedDisableDispatch dd(kernel); | 151 | KScopedDisableDispatch dd{m_kernel}; |
| 116 | KScopedSpinLock lk(m_lock); | 152 | KScopedSpinLock lk(m_lock); |
| 117 | for (num_opened = 0; num_opened < num_handles; num_opened++) { | 153 | for (num_opened = 0; num_opened < num_handles; num_opened++) { |
| 118 | // Get the current handle. | 154 | // Get the current handle. |
| @@ -120,13 +156,13 @@ public: | |||
| 120 | 156 | ||
| 121 | // Get the object for the current handle. | 157 | // Get the object for the current handle. |
| 122 | KAutoObject* cur_object = this->GetObjectImpl(cur_handle); | 158 | KAutoObject* cur_object = this->GetObjectImpl(cur_handle); |
| 123 | if (cur_object == nullptr) { | 159 | if (cur_object == nullptr) [[unlikely]] { |
| 124 | break; | 160 | break; |
| 125 | } | 161 | } |
| 126 | 162 | ||
| 127 | // Cast the current object to the desired type. | 163 | // Cast the current object to the desired type. |
| 128 | T* cur_t = cur_object->DynamicCast<T*>(); | 164 | T* cur_t = cur_object->DynamicCast<T*>(); |
| 129 | if (cur_t == nullptr) { | 165 | if (cur_t == nullptr) [[unlikely]] { |
| 130 | break; | 166 | break; |
| 131 | } | 167 | } |
| 132 | 168 | ||
| @@ -137,7 +173,7 @@ public: | |||
| 137 | } | 173 | } |
| 138 | 174 | ||
| 139 | // If we converted every object, succeed. | 175 | // If we converted every object, succeed. |
| 140 | if (num_opened == num_handles) { | 176 | if (num_opened == num_handles) [[likely]] { |
| 141 | return true; | 177 | return true; |
| 142 | } | 178 | } |
| 143 | 179 | ||
| @@ -191,21 +227,21 @@ private: | |||
| 191 | ASSERT(reserved == 0); | 227 | ASSERT(reserved == 0); |
| 192 | 228 | ||
| 193 | // Validate our indexing information. | 229 | // Validate our indexing information. |
| 194 | if (raw_value == 0) { | 230 | if (raw_value == 0) [[unlikely]] { |
| 195 | return false; | 231 | return false; |
| 196 | } | 232 | } |
| 197 | if (linear_id == 0) { | 233 | if (linear_id == 0) [[unlikely]] { |
| 198 | return false; | 234 | return false; |
| 199 | } | 235 | } |
| 200 | if (index >= m_table_size) { | 236 | if (index >= m_table_size) [[unlikely]] { |
| 201 | return false; | 237 | return false; |
| 202 | } | 238 | } |
| 203 | 239 | ||
| 204 | // Check that there's an object, and our serial id is correct. | 240 | // Check that there's an object, and our serial id is correct. |
| 205 | if (m_objects[index] == nullptr) { | 241 | if (m_objects[index] == nullptr) [[unlikely]] { |
| 206 | return false; | 242 | return false; |
| 207 | } | 243 | } |
| 208 | if (m_entry_infos[index].GetLinearId() != linear_id) { | 244 | if (m_entry_infos[index].GetLinearId() != linear_id) [[unlikely]] { |
| 209 | return false; | 245 | return false; |
| 210 | } | 246 | } |
| 211 | 247 | ||
| @@ -215,11 +251,11 @@ private: | |||
| 215 | KAutoObject* GetObjectImpl(Handle handle) const { | 251 | KAutoObject* GetObjectImpl(Handle handle) const { |
| 216 | // Handles must not have reserved bits set. | 252 | // Handles must not have reserved bits set. |
| 217 | const auto handle_pack = HandlePack(handle); | 253 | const auto handle_pack = HandlePack(handle); |
| 218 | if (handle_pack.reserved != 0) { | 254 | if (handle_pack.reserved != 0) [[unlikely]] { |
| 219 | return nullptr; | 255 | return nullptr; |
| 220 | } | 256 | } |
| 221 | 257 | ||
| 222 | if (this->IsValidHandle(handle)) { | 258 | if (this->IsValidHandle(handle)) [[likely]] { |
| 223 | return m_objects[handle_pack.index]; | 259 | return m_objects[handle_pack.index]; |
| 224 | } else { | 260 | } else { |
| 225 | return nullptr; | 261 | return nullptr; |
| @@ -227,9 +263,8 @@ private: | |||
| 227 | } | 263 | } |
| 228 | 264 | ||
| 229 | KAutoObject* GetObjectByIndexImpl(Handle* out_handle, size_t index) const { | 265 | KAutoObject* GetObjectByIndexImpl(Handle* out_handle, size_t index) const { |
| 230 | |||
| 231 | // Index must be in bounds. | 266 | // Index must be in bounds. |
| 232 | if (index >= m_table_size) { | 267 | if (index >= m_table_size) [[unlikely]] { |
| 233 | return nullptr; | 268 | return nullptr; |
| 234 | } | 269 | } |
| 235 | 270 | ||
| @@ -244,18 +279,15 @@ private: | |||
| 244 | 279 | ||
| 245 | private: | 280 | private: |
| 246 | union HandlePack { | 281 | union HandlePack { |
| 247 | HandlePack() = default; | 282 | constexpr HandlePack() = default; |
| 248 | HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {} | 283 | constexpr HandlePack(Handle handle) : raw{static_cast<u32>(handle)} {} |
| 249 | 284 | ||
| 250 | u32 raw; | 285 | u32 raw{}; |
| 251 | BitField<0, 15, u32> index; | 286 | BitField<0, 15, u32> index; |
| 252 | BitField<15, 15, u32> linear_id; | 287 | BitField<15, 15, u32> linear_id; |
| 253 | BitField<30, 2, u32> reserved; | 288 | BitField<30, 2, u32> reserved; |
| 254 | }; | 289 | }; |
| 255 | 290 | ||
| 256 | static constexpr u16 MinLinearId = 1; | ||
| 257 | static constexpr u16 MaxLinearId = 0x7FFF; | ||
| 258 | |||
| 259 | static constexpr Handle EncodeHandle(u16 index, u16 linear_id) { | 291 | static constexpr Handle EncodeHandle(u16 index, u16 linear_id) { |
| 260 | HandlePack handle{}; | 292 | HandlePack handle{}; |
| 261 | handle.index.Assign(index); | 293 | handle.index.Assign(index); |
| @@ -264,6 +296,10 @@ private: | |||
| 264 | return handle.raw; | 296 | return handle.raw; |
| 265 | } | 297 | } |
| 266 | 298 | ||
| 299 | private: | ||
| 300 | static constexpr u16 MinLinearId = 1; | ||
| 301 | static constexpr u16 MaxLinearId = 0x7FFF; | ||
| 302 | |||
| 267 | union EntryInfo { | 303 | union EntryInfo { |
| 268 | u16 linear_id; | 304 | u16 linear_id; |
| 269 | s16 next_free_index; | 305 | s16 next_free_index; |
| @@ -271,21 +307,21 @@ private: | |||
| 271 | constexpr u16 GetLinearId() const { | 307 | constexpr u16 GetLinearId() const { |
| 272 | return linear_id; | 308 | return linear_id; |
| 273 | } | 309 | } |
| 274 | constexpr s16 GetNextFreeIndex() const { | 310 | constexpr s32 GetNextFreeIndex() const { |
| 275 | return next_free_index; | 311 | return next_free_index; |
| 276 | } | 312 | } |
| 277 | }; | 313 | }; |
| 278 | 314 | ||
| 279 | private: | 315 | private: |
| 316 | KernelCore& m_kernel; | ||
| 280 | std::array<EntryInfo, MaxTableSize> m_entry_infos{}; | 317 | std::array<EntryInfo, MaxTableSize> m_entry_infos{}; |
| 281 | std::array<KAutoObject*, MaxTableSize> m_objects{}; | 318 | std::array<KAutoObject*, MaxTableSize> m_objects{}; |
| 282 | s32 m_free_head_index{-1}; | 319 | mutable KSpinLock m_lock; |
| 320 | s32 m_free_head_index{}; | ||
| 283 | u16 m_table_size{}; | 321 | u16 m_table_size{}; |
| 284 | u16 m_max_count{}; | 322 | u16 m_max_count{}; |
| 285 | u16 m_next_linear_id{MinLinearId}; | 323 | u16 m_next_linear_id{}; |
| 286 | u16 m_count{}; | 324 | u16 m_count{}; |
| 287 | mutable KSpinLock m_lock; | ||
| 288 | KernelCore& kernel; | ||
| 289 | }; | 325 | }; |
| 290 | 326 | ||
| 291 | } // namespace Kernel | 327 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h index 9444f6bd2..6f845d675 100644 --- a/src/core/hle/kernel/k_memory_block.h +++ b/src/core/hle/kernel/k_memory_block.h | |||
| @@ -35,26 +35,32 @@ enum class KMemoryState : u32 { | |||
| 35 | FlagCanMapProcess = (1 << 23), | 35 | FlagCanMapProcess = (1 << 23), |
| 36 | FlagCanChangeAttribute = (1 << 24), | 36 | FlagCanChangeAttribute = (1 << 24), |
| 37 | FlagCanCodeMemory = (1 << 25), | 37 | FlagCanCodeMemory = (1 << 25), |
| 38 | FlagLinearMapped = (1 << 26), | ||
| 38 | 39 | ||
| 39 | FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | | 40 | FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | |
| 40 | FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical | | 41 | FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical | |
| 41 | FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer | | 42 | FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer | |
| 42 | FlagReferenceCounted | FlagCanChangeAttribute, | 43 | FlagReferenceCounted | FlagCanChangeAttribute | FlagLinearMapped, |
| 43 | 44 | ||
| 44 | FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | | 45 | FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | |
| 45 | FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap | | 46 | FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap | |
| 46 | FlagCanAlignedDeviceMap | FlagReferenceCounted, | 47 | FlagCanAlignedDeviceMap | FlagReferenceCounted | FlagLinearMapped, |
| 47 | 48 | ||
| 48 | FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap, | 49 | FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap | |
| 50 | FlagLinearMapped, | ||
| 49 | 51 | ||
| 50 | Free = static_cast<u32>(Svc::MemoryState::Free), | 52 | Free = static_cast<u32>(Svc::MemoryState::Free), |
| 51 | Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped, | 53 | Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap | |
| 54 | FlagCanAlignedDeviceMap, | ||
| 52 | Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical, | 55 | Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical, |
| 53 | Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess, | 56 | Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess, |
| 54 | CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess | | 57 | CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess | |
| 55 | FlagCanCodeMemory, | 58 | FlagCanCodeMemory, |
| 56 | Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted, | ||
| 57 | Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory, | 59 | Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory, |
| 60 | Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted | | ||
| 61 | FlagLinearMapped, | ||
| 62 | |||
| 63 | // Alias was removed after 1.0.0. | ||
| 58 | 64 | ||
| 59 | AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess | | 65 | AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess | |
| 60 | FlagCanCodeAlias, | 66 | FlagCanCodeAlias, |
| @@ -67,18 +73,18 @@ enum class KMemoryState : u32 { | |||
| 67 | Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap | | 73 | Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap | |
| 68 | FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | 74 | FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, |
| 69 | 75 | ||
| 70 | ThreadLocal = | 76 | ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagLinearMapped, |
| 71 | static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted, | ||
| 72 | 77 | ||
| 73 | Transfered = static_cast<u32>(Svc::MemoryState::Transferred) | FlagsMisc | | 78 | Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc | |
| 74 | FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | | 79 | FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | |
| 75 | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | 80 | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, |
| 76 | 81 | ||
| 77 | SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransferred) | FlagsMisc | | 82 | SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransfered) | FlagsMisc | |
| 78 | FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | 83 | FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, |
| 79 | 84 | ||
| 80 | SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped | | 85 | SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped | |
| 81 | FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | 86 | FlagReferenceCounted | FlagLinearMapped | FlagCanUseNonSecureIpc | |
| 87 | FlagCanUseNonDeviceIpc, | ||
| 82 | 88 | ||
| 83 | Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible), | 89 | Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible), |
| 84 | 90 | ||
| @@ -91,69 +97,69 @@ enum class KMemoryState : u32 { | |||
| 91 | Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped, | 97 | Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped, |
| 92 | 98 | ||
| 93 | GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped | | 99 | GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped | |
| 94 | FlagReferenceCounted | FlagCanDebug, | 100 | FlagReferenceCounted | FlagCanDebug | FlagLinearMapped, |
| 95 | CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted, | 101 | CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted | |
| 102 | FlagLinearMapped, | ||
| 96 | 103 | ||
| 97 | Coverage = static_cast<u32>(Svc::MemoryState::Coverage) | FlagMapped, | 104 | Coverage = static_cast<u32>(Svc::MemoryState::Coverage) | FlagMapped, |
| 105 | |||
| 106 | Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted | | ||
| 107 | FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap | | ||
| 108 | FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, | ||
| 98 | }; | 109 | }; |
| 99 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryState); | 110 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryState); |
| 100 | 111 | ||
| 101 | static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000); | 112 | static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000); |
| 102 | static_assert(static_cast<u32>(KMemoryState::Io) == 0x00002001); | 113 | static_assert(static_cast<u32>(KMemoryState::Io) == 0x00182001); |
| 103 | static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002); | 114 | static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002); |
| 104 | static_assert(static_cast<u32>(KMemoryState::Code) == 0x00DC7E03); | 115 | static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03); |
| 105 | static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x03FEBD04); | 116 | static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x07FEBD04); |
| 106 | static_assert(static_cast<u32>(KMemoryState::Normal) == 0x037EBD05); | 117 | static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05); |
| 107 | static_assert(static_cast<u32>(KMemoryState::Shared) == 0x00402006); | 118 | static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006); |
| 108 | static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x00DD7E08); | 119 | |
| 109 | static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x03FFBD09); | 120 | static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08); |
| 110 | static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x005C3C0A); | 121 | static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x07FFBD09); |
| 111 | static_assert(static_cast<u32>(KMemoryState::Stack) == 0x005C3C0B); | 122 | static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A); |
| 112 | static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0040200C); | 123 | static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B); |
| 113 | static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x015C3C0D); | 124 | static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400200C); |
| 114 | static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x005C380E); | 125 | static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D); |
| 115 | static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0040380F); | 126 | static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E); |
| 127 | static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F); | ||
| 116 | static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010); | 128 | static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010); |
| 117 | static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x005C3811); | 129 | static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811); |
| 118 | static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x004C2812); | 130 | static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812); |
| 119 | static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013); | 131 | static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013); |
| 120 | static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x00402214); | 132 | static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214); |
| 121 | static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x00402015); | 133 | static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015); |
| 122 | static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016); | 134 | static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016); |
| 135 | static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x05583817); | ||
| 123 | 136 | ||
| 124 | enum class KMemoryPermission : u8 { | 137 | enum class KMemoryPermission : u8 { |
| 125 | None = 0, | 138 | None = 0, |
| 126 | All = static_cast<u8>(~None), | 139 | All = static_cast<u8>(~None), |
| 127 | 140 | ||
| 128 | Read = 1 << 0, | ||
| 129 | Write = 1 << 1, | ||
| 130 | Execute = 1 << 2, | ||
| 131 | |||
| 132 | ReadAndWrite = Read | Write, | ||
| 133 | ReadAndExecute = Read | Execute, | ||
| 134 | |||
| 135 | UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write | | ||
| 136 | Svc::MemoryPermission::Execute), | ||
| 137 | |||
| 138 | KernelShift = 3, | 141 | KernelShift = 3, |
| 139 | 142 | ||
| 140 | KernelRead = Read << KernelShift, | 143 | KernelRead = static_cast<u8>(Svc::MemoryPermission::Read) << KernelShift, |
| 141 | KernelWrite = Write << KernelShift, | 144 | KernelWrite = static_cast<u8>(Svc::MemoryPermission::Write) << KernelShift, |
| 142 | KernelExecute = Execute << KernelShift, | 145 | KernelExecute = static_cast<u8>(Svc::MemoryPermission::Execute) << KernelShift, |
| 143 | 146 | ||
| 144 | NotMapped = (1 << (2 * KernelShift)), | 147 | NotMapped = (1 << (2 * KernelShift)), |
| 145 | 148 | ||
| 146 | KernelReadWrite = KernelRead | KernelWrite, | 149 | KernelReadWrite = KernelRead | KernelWrite, |
| 147 | KernelReadExecute = KernelRead | KernelExecute, | 150 | KernelReadExecute = KernelRead | KernelExecute, |
| 148 | 151 | ||
| 149 | UserRead = Read | KernelRead, | 152 | UserRead = static_cast<u8>(Svc::MemoryPermission::Read) | KernelRead, |
| 150 | UserWrite = Write | KernelWrite, | 153 | UserWrite = static_cast<u8>(Svc::MemoryPermission::Write) | KernelWrite, |
| 151 | UserExecute = Execute, | 154 | UserExecute = static_cast<u8>(Svc::MemoryPermission::Execute), |
| 152 | 155 | ||
| 153 | UserReadWrite = UserRead | UserWrite, | 156 | UserReadWrite = UserRead | UserWrite, |
| 154 | UserReadExecute = UserRead | UserExecute, | 157 | UserReadExecute = UserRead | UserExecute, |
| 155 | 158 | ||
| 156 | IpcLockChangeMask = NotMapped | UserReadWrite | 159 | UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write | |
| 160 | Svc::MemoryPermission::Execute), | ||
| 161 | |||
| 162 | IpcLockChangeMask = NotMapped | UserReadWrite, | ||
| 157 | }; | 163 | }; |
| 158 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission); | 164 | DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission); |
| 159 | 165 | ||
| @@ -468,6 +474,7 @@ public: | |||
| 468 | 474 | ||
| 469 | constexpr void UpdateDeviceDisableMergeStateForShareLeft( | 475 | constexpr void UpdateDeviceDisableMergeStateForShareLeft( |
| 470 | [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { | 476 | [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { |
| 477 | // New permission/right aren't used. | ||
| 471 | if (left) { | 478 | if (left) { |
| 472 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( | 479 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( |
| 473 | m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft); | 480 | m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft); |
| @@ -478,6 +485,7 @@ public: | |||
| 478 | 485 | ||
| 479 | constexpr void UpdateDeviceDisableMergeStateForShareRight( | 486 | constexpr void UpdateDeviceDisableMergeStateForShareRight( |
| 480 | [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { | 487 | [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { |
| 488 | // New permission/left aren't used. | ||
| 481 | if (right) { | 489 | if (right) { |
| 482 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( | 490 | m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( |
| 483 | m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight); | 491 | m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight); |
| @@ -494,6 +502,8 @@ public: | |||
| 494 | 502 | ||
| 495 | constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, | 503 | constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, |
| 496 | bool right) { | 504 | bool right) { |
| 505 | // New permission isn't used. | ||
| 506 | |||
| 497 | // We must either be shared or have a zero lock count. | 507 | // We must either be shared or have a zero lock count. |
| 498 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || | 508 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || |
| 499 | m_device_use_count == 0); | 509 | m_device_use_count == 0); |
| @@ -509,6 +519,7 @@ public: | |||
| 509 | 519 | ||
| 510 | constexpr void UpdateDeviceDisableMergeStateForUnshareLeft( | 520 | constexpr void UpdateDeviceDisableMergeStateForUnshareLeft( |
| 511 | [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { | 521 | [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { |
| 522 | // New permission/right aren't used. | ||
| 512 | 523 | ||
| 513 | if (left) { | 524 | if (left) { |
| 514 | if (!m_device_disable_merge_left_count) { | 525 | if (!m_device_disable_merge_left_count) { |
| @@ -528,6 +539,8 @@ public: | |||
| 528 | 539 | ||
| 529 | constexpr void UpdateDeviceDisableMergeStateForUnshareRight( | 540 | constexpr void UpdateDeviceDisableMergeStateForUnshareRight( |
| 530 | [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { | 541 | [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { |
| 542 | // New permission/left aren't used. | ||
| 543 | |||
| 531 | if (right) { | 544 | if (right) { |
| 532 | const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--; | 545 | const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--; |
| 533 | ASSERT(old_device_disable_merge_right_count > 0); | 546 | ASSERT(old_device_disable_merge_right_count > 0); |
| @@ -546,6 +559,8 @@ public: | |||
| 546 | 559 | ||
| 547 | constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, | 560 | constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, |
| 548 | bool right) { | 561 | bool right) { |
| 562 | // New permission isn't used. | ||
| 563 | |||
| 549 | // We must be shared. | 564 | // We must be shared. |
| 550 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); | 565 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); |
| 551 | 566 | ||
| @@ -563,6 +578,7 @@ public: | |||
| 563 | 578 | ||
| 564 | constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left, | 579 | constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left, |
| 565 | bool right) { | 580 | bool right) { |
| 581 | // New permission isn't used. | ||
| 566 | 582 | ||
| 567 | // We must be shared. | 583 | // We must be shared. |
| 568 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); | 584 | ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); |
| @@ -613,6 +629,8 @@ public: | |||
| 613 | 629 | ||
| 614 | constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left, | 630 | constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left, |
| 615 | [[maybe_unused]] bool right) { | 631 | [[maybe_unused]] bool right) { |
| 632 | // New permission isn't used. | ||
| 633 | |||
| 616 | // We must be locked. | 634 | // We must be locked. |
| 617 | ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked); | 635 | ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked); |
| 618 | 636 | ||
diff --git a/src/core/hle/kernel/k_memory_layout.cpp b/src/core/hle/kernel/k_memory_layout.cpp index 55dc296d0..72c3ee4b7 100644 --- a/src/core/hle/kernel/k_memory_layout.cpp +++ b/src/core/hle/kernel/k_memory_layout.cpp | |||
| @@ -153,13 +153,9 @@ void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_ | |||
| 153 | } | 153 | } |
| 154 | } | 154 | } |
| 155 | 155 | ||
| 156 | size_t KMemoryLayout::GetResourceRegionSizeForInit() { | 156 | size_t KMemoryLayout::GetResourceRegionSizeForInit(bool use_extra_resource) { |
| 157 | // Calculate resource region size based on whether we allow extra threads. | 157 | return KernelResourceSize + KSystemControl::SecureAppletMemorySize + |
| 158 | const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit(); | 158 | (use_extra_resource ? KernelSlabHeapAdditionalSize + KernelPageBufferAdditionalSize : 0); |
| 159 | size_t resource_region_size = | ||
| 160 | KernelResourceSize + (use_extra_resources ? KernelSlabHeapAdditionalSize : 0); | ||
| 161 | |||
| 162 | return resource_region_size; | ||
| 163 | } | 159 | } |
| 164 | 160 | ||
| 165 | } // namespace Kernel | 161 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index 884fc623a..fd6e1d3e6 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h | |||
| @@ -60,10 +60,12 @@ constexpr std::size_t KernelSlabHeapGapsSizeMax = 2_MiB - 64_KiB; | |||
| 60 | constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax; | 60 | constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax; |
| 61 | 61 | ||
| 62 | // NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860. | 62 | // NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860. |
| 63 | constexpr std::size_t KernelSlabHeapAdditionalSize = 0x68000; | 63 | constexpr size_t KernelPageBufferHeapSize = 0x3E0000; |
| 64 | constexpr size_t KernelSlabHeapAdditionalSize = 0x148000; | ||
| 65 | constexpr size_t KernelPageBufferAdditionalSize = 0x33C000; | ||
| 64 | 66 | ||
| 65 | constexpr std::size_t KernelResourceSize = | 67 | constexpr std::size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize + |
| 66 | KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize; | 68 | KernelSlabHeapSize + KernelPageBufferHeapSize; |
| 67 | 69 | ||
| 68 | constexpr bool IsKernelAddressKey(VAddr key) { | 70 | constexpr bool IsKernelAddressKey(VAddr key) { |
| 69 | return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast; | 71 | return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast; |
| @@ -168,6 +170,11 @@ public: | |||
| 168 | KMemoryRegionType_VirtualDramKernelTraceBuffer)); | 170 | KMemoryRegionType_VirtualDramKernelTraceBuffer)); |
| 169 | } | 171 | } |
| 170 | 172 | ||
| 173 | const KMemoryRegion& GetSecureAppletMemoryRegion() { | ||
| 174 | return Dereference(GetVirtualMemoryRegionTree().FindByType( | ||
| 175 | KMemoryRegionType_VirtualDramKernelSecureAppletMemory)); | ||
| 176 | } | ||
| 177 | |||
| 171 | const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const { | 178 | const KMemoryRegion& GetVirtualLinearRegion(VAddr address) const { |
| 172 | return Dereference(FindVirtualLinear(address)); | 179 | return Dereference(FindVirtualLinear(address)); |
| 173 | } | 180 | } |
| @@ -229,7 +236,7 @@ public: | |||
| 229 | 236 | ||
| 230 | void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, | 237 | void InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, |
| 231 | VAddr linear_virtual_start); | 238 | VAddr linear_virtual_start); |
| 232 | static size_t GetResourceRegionSizeForInit(); | 239 | static size_t GetResourceRegionSizeForInit(bool use_extra_resource); |
| 233 | 240 | ||
| 234 | auto GetKernelRegionExtents() const { | 241 | auto GetKernelRegionExtents() const { |
| 235 | return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel); | 242 | return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel); |
| @@ -279,6 +286,10 @@ public: | |||
| 279 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | 286 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( |
| 280 | KMemoryRegionType_DramKernelSlab); | 287 | KMemoryRegionType_DramKernelSlab); |
| 281 | } | 288 | } |
| 289 | auto GetKernelSecureAppletMemoryRegionPhysicalExtents() { | ||
| 290 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | ||
| 291 | KMemoryRegionType_DramKernelSecureAppletMemory); | ||
| 292 | } | ||
| 282 | auto GetKernelPageTableHeapRegionPhysicalExtents() const { | 293 | auto GetKernelPageTableHeapRegionPhysicalExtents() const { |
| 283 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( | 294 | return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( |
| 284 | KMemoryRegionType_DramKernelPtHeap); | 295 | KMemoryRegionType_DramKernelPtHeap); |
diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 646711505..c4bf306e8 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp | |||
| @@ -29,43 +29,44 @@ constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) { | |||
| 29 | } else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) { | 29 | } else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) { |
| 30 | return KMemoryManager::Pool::SystemNonSecure; | 30 | return KMemoryManager::Pool::SystemNonSecure; |
| 31 | } else { | 31 | } else { |
| 32 | ASSERT_MSG(false, "InvalidMemoryRegionType for conversion to Pool"); | 32 | UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool"); |
| 33 | return {}; | ||
| 34 | } | 33 | } |
| 35 | } | 34 | } |
| 36 | 35 | ||
| 37 | } // namespace | 36 | } // namespace |
| 38 | 37 | ||
| 39 | KMemoryManager::KMemoryManager(Core::System& system_) | 38 | KMemoryManager::KMemoryManager(Core::System& system) |
| 40 | : system{system_}, pool_locks{ | 39 | : m_system{system}, m_memory_layout{system.Kernel().MemoryLayout()}, |
| 41 | KLightLock{system_.Kernel()}, | 40 | m_pool_locks{ |
| 42 | KLightLock{system_.Kernel()}, | 41 | KLightLock{system.Kernel()}, |
| 43 | KLightLock{system_.Kernel()}, | 42 | KLightLock{system.Kernel()}, |
| 44 | KLightLock{system_.Kernel()}, | 43 | KLightLock{system.Kernel()}, |
| 45 | } {} | 44 | KLightLock{system.Kernel()}, |
| 45 | } {} | ||
| 46 | 46 | ||
| 47 | void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) { | 47 | void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) { |
| 48 | 48 | ||
| 49 | // Clear the management region to zero. | 49 | // Clear the management region to zero. |
| 50 | const VAddr management_region_end = management_region + management_region_size; | 50 | const VAddr management_region_end = management_region + management_region_size; |
| 51 | // std::memset(GetVoidPointer(management_region), 0, management_region_size); | ||
| 51 | 52 | ||
| 52 | // Reset our manager count. | 53 | // Reset our manager count. |
| 53 | num_managers = 0; | 54 | m_num_managers = 0; |
| 54 | 55 | ||
| 55 | // Traverse the virtual memory layout tree, initializing each manager as appropriate. | 56 | // Traverse the virtual memory layout tree, initializing each manager as appropriate. |
| 56 | while (num_managers != MaxManagerCount) { | 57 | while (m_num_managers != MaxManagerCount) { |
| 57 | // Locate the region that should initialize the current manager. | 58 | // Locate the region that should initialize the current manager. |
| 58 | PAddr region_address = 0; | 59 | PAddr region_address = 0; |
| 59 | size_t region_size = 0; | 60 | size_t region_size = 0; |
| 60 | Pool region_pool = Pool::Count; | 61 | Pool region_pool = Pool::Count; |
| 61 | for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { | 62 | for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { |
| 62 | // We only care about regions that we need to create managers for. | 63 | // We only care about regions that we need to create managers for. |
| 63 | if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { | 64 | if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { |
| 64 | continue; | 65 | continue; |
| 65 | } | 66 | } |
| 66 | 67 | ||
| 67 | // We want to initialize the managers in order. | 68 | // We want to initialize the managers in order. |
| 68 | if (it.GetAttributes() != num_managers) { | 69 | if (it.GetAttributes() != m_num_managers) { |
| 69 | continue; | 70 | continue; |
| 70 | } | 71 | } |
| 71 | 72 | ||
| @@ -97,8 +98,8 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | |||
| 97 | } | 98 | } |
| 98 | 99 | ||
| 99 | // Initialize a new manager for the region. | 100 | // Initialize a new manager for the region. |
| 100 | Impl* manager = std::addressof(managers[num_managers++]); | 101 | Impl* manager = std::addressof(m_managers[m_num_managers++]); |
| 101 | ASSERT(num_managers <= managers.size()); | 102 | ASSERT(m_num_managers <= m_managers.size()); |
| 102 | 103 | ||
| 103 | const size_t cur_size = manager->Initialize(region_address, region_size, management_region, | 104 | const size_t cur_size = manager->Initialize(region_address, region_size, management_region, |
| 104 | management_region_end, region_pool); | 105 | management_region_end, region_pool); |
| @@ -107,13 +108,13 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | |||
| 107 | 108 | ||
| 108 | // Insert the manager into the pool list. | 109 | // Insert the manager into the pool list. |
| 109 | const auto region_pool_index = static_cast<u32>(region_pool); | 110 | const auto region_pool_index = static_cast<u32>(region_pool); |
| 110 | if (pool_managers_tail[region_pool_index] == nullptr) { | 111 | if (m_pool_managers_tail[region_pool_index] == nullptr) { |
| 111 | pool_managers_head[region_pool_index] = manager; | 112 | m_pool_managers_head[region_pool_index] = manager; |
| 112 | } else { | 113 | } else { |
| 113 | pool_managers_tail[region_pool_index]->SetNext(manager); | 114 | m_pool_managers_tail[region_pool_index]->SetNext(manager); |
| 114 | manager->SetPrev(pool_managers_tail[region_pool_index]); | 115 | manager->SetPrev(m_pool_managers_tail[region_pool_index]); |
| 115 | } | 116 | } |
| 116 | pool_managers_tail[region_pool_index] = manager; | 117 | m_pool_managers_tail[region_pool_index] = manager; |
| 117 | } | 118 | } |
| 118 | 119 | ||
| 119 | // Free each region to its corresponding heap. | 120 | // Free each region to its corresponding heap. |
| @@ -121,11 +122,10 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | |||
| 121 | const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress(); | 122 | const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress(); |
| 122 | const PAddr ini_end = ini_start + InitialProcessBinarySizeMax; | 123 | const PAddr ini_end = ini_start + InitialProcessBinarySizeMax; |
| 123 | const PAddr ini_last = ini_end - 1; | 124 | const PAddr ini_last = ini_end - 1; |
| 124 | for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { | 125 | for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { |
| 125 | if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { | 126 | if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { |
| 126 | // Get the manager for the region. | 127 | // Get the manager for the region. |
| 127 | auto index = it.GetAttributes(); | 128 | auto& manager = m_managers[it.GetAttributes()]; |
| 128 | auto& manager = managers[index]; | ||
| 129 | 129 | ||
| 130 | const PAddr cur_start = it.GetAddress(); | 130 | const PAddr cur_start = it.GetAddress(); |
| 131 | const PAddr cur_last = it.GetLastAddress(); | 131 | const PAddr cur_last = it.GetLastAddress(); |
| @@ -162,11 +162,19 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio | |||
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | // Update the used size for all managers. | 164 | // Update the used size for all managers. |
| 165 | for (size_t i = 0; i < num_managers; ++i) { | 165 | for (size_t i = 0; i < m_num_managers; ++i) { |
| 166 | managers[i].SetInitialUsedHeapSize(reserved_sizes[i]); | 166 | m_managers[i].SetInitialUsedHeapSize(reserved_sizes[i]); |
| 167 | } | 167 | } |
| 168 | } | 168 | } |
| 169 | 169 | ||
| 170 | Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) { | ||
| 171 | UNREACHABLE(); | ||
| 172 | } | ||
| 173 | |||
| 174 | void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) { | ||
| 175 | UNREACHABLE(); | ||
| 176 | } | ||
| 177 | |||
| 170 | PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) { | 178 | PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) { |
| 171 | // Early return if we're allocating no pages. | 179 | // Early return if we're allocating no pages. |
| 172 | if (num_pages == 0) { | 180 | if (num_pages == 0) { |
| @@ -175,7 +183,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | |||
| 175 | 183 | ||
| 176 | // Lock the pool that we're allocating from. | 184 | // Lock the pool that we're allocating from. |
| 177 | const auto [pool, dir] = DecodeOption(option); | 185 | const auto [pool, dir] = DecodeOption(option); |
| 178 | KScopedLightLock lk(pool_locks[static_cast<std::size_t>(pool)]); | 186 | KScopedLightLock lk(m_pool_locks[static_cast<std::size_t>(pool)]); |
| 179 | 187 | ||
| 180 | // Choose a heap based on our page size request. | 188 | // Choose a heap based on our page size request. |
| 181 | const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages); | 189 | const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages); |
| @@ -185,7 +193,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | |||
| 185 | PAddr allocated_block = 0; | 193 | PAddr allocated_block = 0; |
| 186 | for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; | 194 | for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; |
| 187 | chosen_manager = this->GetNextManager(chosen_manager, dir)) { | 195 | chosen_manager = this->GetNextManager(chosen_manager, dir)) { |
| 188 | allocated_block = chosen_manager->AllocateBlock(heap_index, true); | 196 | allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages); |
| 189 | if (allocated_block != 0) { | 197 | if (allocated_block != 0) { |
| 190 | break; | 198 | break; |
| 191 | } | 199 | } |
| @@ -196,10 +204,9 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | |||
| 196 | return 0; | 204 | return 0; |
| 197 | } | 205 | } |
| 198 | 206 | ||
| 199 | // If we allocated more than we need, free some. | 207 | // Maintain the optimized memory bitmap, if we should. |
| 200 | const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index); | 208 | if (m_has_optimized_process[static_cast<size_t>(pool)]) { |
| 201 | if (allocated_pages > num_pages) { | 209 | UNIMPLEMENTED(); |
| 202 | chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); | ||
| 203 | } | 210 | } |
| 204 | 211 | ||
| 205 | // Open the first reference to the pages. | 212 | // Open the first reference to the pages. |
| @@ -209,20 +216,21 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p | |||
| 209 | } | 216 | } |
| 210 | 217 | ||
| 211 | Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, | 218 | Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, |
| 212 | Direction dir, bool random) { | 219 | Direction dir, bool unoptimized, bool random) { |
| 213 | // Choose a heap based on our page size request. | 220 | // Choose a heap based on our page size request. |
| 214 | const s32 heap_index = KPageHeap::GetBlockIndex(num_pages); | 221 | const s32 heap_index = KPageHeap::GetBlockIndex(num_pages); |
| 215 | R_UNLESS(0 <= heap_index, ResultOutOfMemory); | 222 | R_UNLESS(0 <= heap_index, ResultOutOfMemory); |
| 216 | 223 | ||
| 217 | // Ensure that we don't leave anything un-freed. | 224 | // Ensure that we don't leave anything un-freed. |
| 218 | auto group_guard = SCOPE_GUARD({ | 225 | ON_RESULT_FAILURE { |
| 219 | for (const auto& it : out->Nodes()) { | 226 | for (const auto& it : out->Nodes()) { |
| 220 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), it.GetAddress()); | 227 | auto& manager = this->GetManager(it.GetAddress()); |
| 221 | const size_t num_pages_to_free = | 228 | const size_t node_num_pages = |
| 222 | std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); | 229 | std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize); |
| 223 | manager.Free(it.GetAddress(), num_pages_to_free); | 230 | manager.Free(it.GetAddress(), node_num_pages); |
| 224 | } | 231 | } |
| 225 | }); | 232 | out->Finalize(); |
| 233 | }; | ||
| 226 | 234 | ||
| 227 | // Keep allocating until we've allocated all our pages. | 235 | // Keep allocating until we've allocated all our pages. |
| 228 | for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) { | 236 | for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) { |
| @@ -236,12 +244,17 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, | |||
| 236 | break; | 244 | break; |
| 237 | } | 245 | } |
| 238 | 246 | ||
| 239 | // Safely add it to our group. | 247 | // Ensure we don't leak the block if we fail. |
| 240 | { | 248 | ON_RESULT_FAILURE_2 { |
| 241 | auto block_guard = | 249 | cur_manager->Free(allocated_block, pages_per_alloc); |
| 242 | SCOPE_GUARD({ cur_manager->Free(allocated_block, pages_per_alloc); }); | 250 | }; |
| 243 | R_TRY(out->AddBlock(allocated_block, pages_per_alloc)); | 251 | |
| 244 | block_guard.Cancel(); | 252 | // Add the block to our group. |
| 253 | R_TRY(out->AddBlock(allocated_block, pages_per_alloc)); | ||
| 254 | |||
| 255 | // Maintain the optimized memory bitmap, if we should. | ||
| 256 | if (unoptimized) { | ||
| 257 | UNIMPLEMENTED(); | ||
| 245 | } | 258 | } |
| 246 | 259 | ||
| 247 | num_pages -= pages_per_alloc; | 260 | num_pages -= pages_per_alloc; |
| @@ -253,8 +266,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, | |||
| 253 | R_UNLESS(num_pages == 0, ResultOutOfMemory); | 266 | R_UNLESS(num_pages == 0, ResultOutOfMemory); |
| 254 | 267 | ||
| 255 | // We succeeded! | 268 | // We succeeded! |
| 256 | group_guard.Cancel(); | 269 | R_SUCCEED(); |
| 257 | return ResultSuccess; | ||
| 258 | } | 270 | } |
| 259 | 271 | ||
| 260 | Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) { | 272 | Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) { |
| @@ -266,10 +278,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op | |||
| 266 | 278 | ||
| 267 | // Lock the pool that we're allocating from. | 279 | // Lock the pool that we're allocating from. |
| 268 | const auto [pool, dir] = DecodeOption(option); | 280 | const auto [pool, dir] = DecodeOption(option); |
| 269 | KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]); | 281 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); |
| 270 | 282 | ||
| 271 | // Allocate the page group. | 283 | // Allocate the page group. |
| 272 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false)); | 284 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, |
| 285 | m_has_optimized_process[static_cast<size_t>(pool)], true)); | ||
| 273 | 286 | ||
| 274 | // Open the first reference to the pages. | 287 | // Open the first reference to the pages. |
| 275 | for (const auto& block : out->Nodes()) { | 288 | for (const auto& block : out->Nodes()) { |
| @@ -277,7 +290,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op | |||
| 277 | size_t remaining_pages = block.GetNumPages(); | 290 | size_t remaining_pages = block.GetNumPages(); |
| 278 | while (remaining_pages > 0) { | 291 | while (remaining_pages > 0) { |
| 279 | // Get the manager for the current address. | 292 | // Get the manager for the current address. |
| 280 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address); | 293 | auto& manager = this->GetManager(cur_address); |
| 281 | 294 | ||
| 282 | // Process part or all of the block. | 295 | // Process part or all of the block. |
| 283 | const size_t cur_pages = | 296 | const size_t cur_pages = |
| @@ -290,11 +303,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op | |||
| 290 | } | 303 | } |
| 291 | } | 304 | } |
| 292 | 305 | ||
| 293 | return ResultSuccess; | 306 | R_SUCCEED(); |
| 294 | } | 307 | } |
| 295 | 308 | ||
| 296 | Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, | 309 | Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, |
| 297 | u64 process_id, u8 fill_pattern) { | 310 | u64 process_id, u8 fill_pattern) { |
| 298 | ASSERT(out != nullptr); | 311 | ASSERT(out != nullptr); |
| 299 | ASSERT(out->GetNumPages() == 0); | 312 | ASSERT(out->GetNumPages() == 0); |
| 300 | 313 | ||
| @@ -302,83 +315,89 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag | |||
| 302 | const auto [pool, dir] = DecodeOption(option); | 315 | const auto [pool, dir] = DecodeOption(option); |
| 303 | 316 | ||
| 304 | // Allocate the memory. | 317 | // Allocate the memory. |
| 318 | bool optimized; | ||
| 305 | { | 319 | { |
| 306 | // Lock the pool that we're allocating from. | 320 | // Lock the pool that we're allocating from. |
| 307 | KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]); | 321 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); |
| 322 | |||
| 323 | // Check if we have an optimized process. | ||
| 324 | const bool has_optimized = m_has_optimized_process[static_cast<size_t>(pool)]; | ||
| 325 | const bool is_optimized = m_optimized_process_ids[static_cast<size_t>(pool)] == process_id; | ||
| 308 | 326 | ||
| 309 | // Allocate the page group. | 327 | // Allocate the page group. |
| 310 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false)); | 328 | R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, |
| 329 | false)); | ||
| 311 | 330 | ||
| 312 | // Open the first reference to the pages. | 331 | // Set whether we should optimize. |
| 313 | for (const auto& block : out->Nodes()) { | 332 | optimized = has_optimized && is_optimized; |
| 314 | PAddr cur_address = block.GetAddress(); | ||
| 315 | size_t remaining_pages = block.GetNumPages(); | ||
| 316 | while (remaining_pages > 0) { | ||
| 317 | // Get the manager for the current address. | ||
| 318 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address); | ||
| 319 | |||
| 320 | // Process part or all of the block. | ||
| 321 | const size_t cur_pages = | ||
| 322 | std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); | ||
| 323 | manager.OpenFirst(cur_address, cur_pages); | ||
| 324 | |||
| 325 | // Advance. | ||
| 326 | cur_address += cur_pages * PageSize; | ||
| 327 | remaining_pages -= cur_pages; | ||
| 328 | } | ||
| 329 | } | ||
| 330 | } | 333 | } |
| 331 | 334 | ||
| 332 | // Set all the allocated memory. | 335 | // Perform optimized memory tracking, if we should. |
| 333 | for (const auto& block : out->Nodes()) { | 336 | if (optimized) { |
| 334 | std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, | 337 | // Iterate over the allocated blocks. |
| 335 | block.GetSize()); | 338 | for (const auto& block : out->Nodes()) { |
| 336 | } | 339 | // Get the block extents. |
| 340 | const PAddr block_address = block.GetAddress(); | ||
| 341 | const size_t block_pages = block.GetNumPages(); | ||
| 337 | 342 | ||
| 338 | return ResultSuccess; | 343 | // If it has no pages, we don't need to do anything. |
| 339 | } | 344 | if (block_pages == 0) { |
| 345 | continue; | ||
| 346 | } | ||
| 340 | 347 | ||
| 341 | void KMemoryManager::Open(PAddr address, size_t num_pages) { | 348 | // Fill all the pages that we need to fill. |
| 342 | // Repeatedly open references until we've done so for all pages. | 349 | bool any_new = false; |
| 343 | while (num_pages) { | 350 | { |
| 344 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address); | 351 | PAddr cur_address = block_address; |
| 345 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | 352 | size_t remaining_pages = block_pages; |
| 353 | while (remaining_pages > 0) { | ||
| 354 | // Get the manager for the current address. | ||
| 355 | auto& manager = this->GetManager(cur_address); | ||
| 356 | |||
| 357 | // Process part or all of the block. | ||
| 358 | const size_t cur_pages = | ||
| 359 | std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); | ||
| 360 | any_new = | ||
| 361 | manager.ProcessOptimizedAllocation(cur_address, cur_pages, fill_pattern); | ||
| 362 | |||
| 363 | // Advance. | ||
| 364 | cur_address += cur_pages * PageSize; | ||
| 365 | remaining_pages -= cur_pages; | ||
| 366 | } | ||
| 367 | } | ||
| 346 | 368 | ||
| 347 | { | 369 | // If there are new pages, update tracking for the allocation. |
| 348 | KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]); | 370 | if (any_new) { |
| 349 | manager.Open(address, cur_pages); | 371 | // Update tracking for the allocation. |
| 372 | PAddr cur_address = block_address; | ||
| 373 | size_t remaining_pages = block_pages; | ||
| 374 | while (remaining_pages > 0) { | ||
| 375 | // Get the manager for the current address. | ||
| 376 | auto& manager = this->GetManager(cur_address); | ||
| 377 | |||
| 378 | // Lock the pool for the manager. | ||
| 379 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 380 | |||
| 381 | // Track some or all of the current pages. | ||
| 382 | const size_t cur_pages = | ||
| 383 | std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address)); | ||
| 384 | manager.TrackOptimizedAllocation(cur_address, cur_pages); | ||
| 385 | |||
| 386 | // Advance. | ||
| 387 | cur_address += cur_pages * PageSize; | ||
| 388 | remaining_pages -= cur_pages; | ||
| 389 | } | ||
| 390 | } | ||
| 350 | } | 391 | } |
| 351 | 392 | } else { | |
| 352 | num_pages -= cur_pages; | 393 | // Set all the allocated memory. |
| 353 | address += cur_pages * PageSize; | 394 | for (const auto& block : out->Nodes()) { |
| 354 | } | 395 | std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, |
| 355 | } | 396 | block.GetSize()); |
| 356 | |||
| 357 | void KMemoryManager::Close(PAddr address, size_t num_pages) { | ||
| 358 | // Repeatedly close references until we've done so for all pages. | ||
| 359 | while (num_pages) { | ||
| 360 | auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address); | ||
| 361 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||
| 362 | |||
| 363 | { | ||
| 364 | KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 365 | manager.Close(address, cur_pages); | ||
| 366 | } | 397 | } |
| 367 | |||
| 368 | num_pages -= cur_pages; | ||
| 369 | address += cur_pages * PageSize; | ||
| 370 | } | 398 | } |
| 371 | } | ||
| 372 | 399 | ||
| 373 | void KMemoryManager::Close(const KPageGroup& pg) { | 400 | R_SUCCEED(); |
| 374 | for (const auto& node : pg.Nodes()) { | ||
| 375 | Close(node.GetAddress(), node.GetNumPages()); | ||
| 376 | } | ||
| 377 | } | ||
| 378 | void KMemoryManager::Open(const KPageGroup& pg) { | ||
| 379 | for (const auto& node : pg.Nodes()) { | ||
| 380 | Open(node.GetAddress(), node.GetNumPages()); | ||
| 381 | } | ||
| 382 | } | 401 | } |
| 383 | 402 | ||
| 384 | size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management, | 403 | size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management, |
| @@ -394,18 +413,31 @@ size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr manage | |||
| 394 | ASSERT(Common::IsAligned(total_management_size, PageSize)); | 413 | ASSERT(Common::IsAligned(total_management_size, PageSize)); |
| 395 | 414 | ||
| 396 | // Setup region. | 415 | // Setup region. |
| 397 | pool = p; | 416 | m_pool = p; |
| 398 | management_region = management; | 417 | m_management_region = management; |
| 399 | page_reference_counts.resize( | 418 | m_page_reference_counts.resize( |
| 400 | Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize); | 419 | Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize); |
| 401 | ASSERT(Common::IsAligned(management_region, PageSize)); | 420 | ASSERT(Common::IsAligned(m_management_region, PageSize)); |
| 402 | 421 | ||
| 403 | // Initialize the manager's KPageHeap. | 422 | // Initialize the manager's KPageHeap. |
| 404 | heap.Initialize(address, size, management + manager_size, page_heap_size); | 423 | m_heap.Initialize(address, size, management + manager_size, page_heap_size); |
| 405 | 424 | ||
| 406 | return total_management_size; | 425 | return total_management_size; |
| 407 | } | 426 | } |
| 408 | 427 | ||
| 428 | void KMemoryManager::Impl::TrackUnoptimizedAllocation(PAddr block, size_t num_pages) { | ||
| 429 | UNREACHABLE(); | ||
| 430 | } | ||
| 431 | |||
| 432 | void KMemoryManager::Impl::TrackOptimizedAllocation(PAddr block, size_t num_pages) { | ||
| 433 | UNREACHABLE(); | ||
| 434 | } | ||
| 435 | |||
| 436 | bool KMemoryManager::Impl::ProcessOptimizedAllocation(PAddr block, size_t num_pages, | ||
| 437 | u8 fill_pattern) { | ||
| 438 | UNREACHABLE(); | ||
| 439 | } | ||
| 440 | |||
| 409 | size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) { | 441 | size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) { |
| 410 | const size_t ref_count_size = (region_size / PageSize) * sizeof(u16); | 442 | const size_t ref_count_size = (region_size / PageSize) * sizeof(u16); |
| 411 | const size_t optimize_map_size = | 443 | const size_t optimize_map_size = |
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h index dcb9b6348..401d4e644 100644 --- a/src/core/hle/kernel/k_memory_manager.h +++ b/src/core/hle/kernel/k_memory_manager.h | |||
| @@ -21,11 +21,8 @@ namespace Kernel { | |||
| 21 | 21 | ||
| 22 | class KPageGroup; | 22 | class KPageGroup; |
| 23 | 23 | ||
| 24 | class KMemoryManager final { | 24 | class KMemoryManager { |
| 25 | public: | 25 | public: |
| 26 | YUZU_NON_COPYABLE(KMemoryManager); | ||
| 27 | YUZU_NON_MOVEABLE(KMemoryManager); | ||
| 28 | |||
| 29 | enum class Pool : u32 { | 26 | enum class Pool : u32 { |
| 30 | Application = 0, | 27 | Application = 0, |
| 31 | Applet = 1, | 28 | Applet = 1, |
| @@ -45,16 +42,85 @@ public: | |||
| 45 | enum class Direction : u32 { | 42 | enum class Direction : u32 { |
| 46 | FromFront = 0, | 43 | FromFront = 0, |
| 47 | FromBack = 1, | 44 | FromBack = 1, |
| 48 | |||
| 49 | Shift = 0, | 45 | Shift = 0, |
| 50 | Mask = (0xF << Shift), | 46 | Mask = (0xF << Shift), |
| 51 | }; | 47 | }; |
| 52 | 48 | ||
| 53 | explicit KMemoryManager(Core::System& system_); | 49 | static constexpr size_t MaxManagerCount = 10; |
| 50 | |||
| 51 | explicit KMemoryManager(Core::System& system); | ||
| 54 | 52 | ||
| 55 | void Initialize(VAddr management_region, size_t management_region_size); | 53 | void Initialize(VAddr management_region, size_t management_region_size); |
| 56 | 54 | ||
| 57 | constexpr size_t GetSize(Pool pool) const { | 55 | Result InitializeOptimizedMemory(u64 process_id, Pool pool); |
| 56 | void FinalizeOptimizedMemory(u64 process_id, Pool pool); | ||
| 57 | |||
| 58 | PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); | ||
| 59 | Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option); | ||
| 60 | Result AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id, | ||
| 61 | u8 fill_pattern); | ||
| 62 | |||
| 63 | Pool GetPool(PAddr address) const { | ||
| 64 | return this->GetManager(address).GetPool(); | ||
| 65 | } | ||
| 66 | |||
| 67 | void Open(PAddr address, size_t num_pages) { | ||
| 68 | // Repeatedly open references until we've done so for all pages. | ||
| 69 | while (num_pages) { | ||
| 70 | auto& manager = this->GetManager(address); | ||
| 71 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||
| 72 | |||
| 73 | { | ||
| 74 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 75 | manager.Open(address, cur_pages); | ||
| 76 | } | ||
| 77 | |||
| 78 | num_pages -= cur_pages; | ||
| 79 | address += cur_pages * PageSize; | ||
| 80 | } | ||
| 81 | } | ||
| 82 | |||
| 83 | void OpenFirst(PAddr address, size_t num_pages) { | ||
| 84 | // Repeatedly open references until we've done so for all pages. | ||
| 85 | while (num_pages) { | ||
| 86 | auto& manager = this->GetManager(address); | ||
| 87 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||
| 88 | |||
| 89 | { | ||
| 90 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 91 | manager.OpenFirst(address, cur_pages); | ||
| 92 | } | ||
| 93 | |||
| 94 | num_pages -= cur_pages; | ||
| 95 | address += cur_pages * PageSize; | ||
| 96 | } | ||
| 97 | } | ||
| 98 | |||
| 99 | void Close(PAddr address, size_t num_pages) { | ||
| 100 | // Repeatedly close references until we've done so for all pages. | ||
| 101 | while (num_pages) { | ||
| 102 | auto& manager = this->GetManager(address); | ||
| 103 | const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address)); | ||
| 104 | |||
| 105 | { | ||
| 106 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]); | ||
| 107 | manager.Close(address, cur_pages); | ||
| 108 | } | ||
| 109 | |||
| 110 | num_pages -= cur_pages; | ||
| 111 | address += cur_pages * PageSize; | ||
| 112 | } | ||
| 113 | } | ||
| 114 | |||
| 115 | size_t GetSize() { | ||
| 116 | size_t total = 0; | ||
| 117 | for (size_t i = 0; i < m_num_managers; i++) { | ||
| 118 | total += m_managers[i].GetSize(); | ||
| 119 | } | ||
| 120 | return total; | ||
| 121 | } | ||
| 122 | |||
| 123 | size_t GetSize(Pool pool) { | ||
| 58 | constexpr Direction GetSizeDirection = Direction::FromFront; | 124 | constexpr Direction GetSizeDirection = Direction::FromFront; |
| 59 | size_t total = 0; | 125 | size_t total = 0; |
| 60 | for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; | 126 | for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; |
| @@ -64,18 +130,36 @@ public: | |||
| 64 | return total; | 130 | return total; |
| 65 | } | 131 | } |
| 66 | 132 | ||
| 67 | PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); | 133 | size_t GetFreeSize() { |
| 68 | Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option); | 134 | size_t total = 0; |
| 69 | Result AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id, | 135 | for (size_t i = 0; i < m_num_managers; i++) { |
| 70 | u8 fill_pattern); | 136 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(m_managers[i].GetPool())]); |
| 137 | total += m_managers[i].GetFreeSize(); | ||
| 138 | } | ||
| 139 | return total; | ||
| 140 | } | ||
| 71 | 141 | ||
| 72 | static constexpr size_t MaxManagerCount = 10; | 142 | size_t GetFreeSize(Pool pool) { |
| 143 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); | ||
| 144 | |||
| 145 | constexpr Direction GetSizeDirection = Direction::FromFront; | ||
| 146 | size_t total = 0; | ||
| 147 | for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; | ||
| 148 | manager = this->GetNextManager(manager, GetSizeDirection)) { | ||
| 149 | total += manager->GetFreeSize(); | ||
| 150 | } | ||
| 151 | return total; | ||
| 152 | } | ||
| 73 | 153 | ||
| 74 | void Close(PAddr address, size_t num_pages); | 154 | void DumpFreeList(Pool pool) { |
| 75 | void Close(const KPageGroup& pg); | 155 | KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]); |
| 76 | 156 | ||
| 77 | void Open(PAddr address, size_t num_pages); | 157 | constexpr Direction DumpDirection = Direction::FromFront; |
| 78 | void Open(const KPageGroup& pg); | 158 | for (auto* manager = this->GetFirstManager(pool, DumpDirection); manager != nullptr; |
| 159 | manager = this->GetNextManager(manager, DumpDirection)) { | ||
| 160 | manager->DumpFreeList(); | ||
| 161 | } | ||
| 162 | } | ||
| 79 | 163 | ||
| 80 | public: | 164 | public: |
| 81 | static size_t CalculateManagementOverheadSize(size_t region_size) { | 165 | static size_t CalculateManagementOverheadSize(size_t region_size) { |
| @@ -88,14 +172,13 @@ public: | |||
| 88 | } | 172 | } |
| 89 | 173 | ||
| 90 | static constexpr Pool GetPool(u32 option) { | 174 | static constexpr Pool GetPool(u32 option) { |
| 91 | return static_cast<Pool>((static_cast<u32>(option) & static_cast<u32>(Pool::Mask)) >> | 175 | return static_cast<Pool>((option & static_cast<u32>(Pool::Mask)) >> |
| 92 | static_cast<u32>(Pool::Shift)); | 176 | static_cast<u32>(Pool::Shift)); |
| 93 | } | 177 | } |
| 94 | 178 | ||
| 95 | static constexpr Direction GetDirection(u32 option) { | 179 | static constexpr Direction GetDirection(u32 option) { |
| 96 | return static_cast<Direction>( | 180 | return static_cast<Direction>((option & static_cast<u32>(Direction::Mask)) >> |
| 97 | (static_cast<u32>(option) & static_cast<u32>(Direction::Mask)) >> | 181 | static_cast<u32>(Direction::Shift)); |
| 98 | static_cast<u32>(Direction::Shift)); | ||
| 99 | } | 182 | } |
| 100 | 183 | ||
| 101 | static constexpr std::tuple<Pool, Direction> DecodeOption(u32 option) { | 184 | static constexpr std::tuple<Pool, Direction> DecodeOption(u32 option) { |
| @@ -103,74 +186,88 @@ public: | |||
| 103 | } | 186 | } |
| 104 | 187 | ||
| 105 | private: | 188 | private: |
| 106 | class Impl final { | 189 | class Impl { |
| 107 | public: | 190 | public: |
| 108 | YUZU_NON_COPYABLE(Impl); | 191 | static size_t CalculateManagementOverheadSize(size_t region_size); |
| 109 | YUZU_NON_MOVEABLE(Impl); | 192 | |
| 193 | static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) { | ||
| 194 | return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / | ||
| 195 | Common::BitSize<u64>()) * | ||
| 196 | sizeof(u64); | ||
| 197 | } | ||
| 110 | 198 | ||
| 199 | public: | ||
| 111 | Impl() = default; | 200 | Impl() = default; |
| 112 | ~Impl() = default; | ||
| 113 | 201 | ||
| 114 | size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end, | 202 | size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end, |
| 115 | Pool p); | 203 | Pool p); |
| 116 | 204 | ||
| 117 | VAddr AllocateBlock(s32 index, bool random) { | 205 | PAddr AllocateBlock(s32 index, bool random) { |
| 118 | return heap.AllocateBlock(index, random); | 206 | return m_heap.AllocateBlock(index, random); |
| 119 | } | 207 | } |
| 120 | 208 | PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { | |
| 121 | void Free(VAddr addr, size_t num_pages) { | 209 | return m_heap.AllocateAligned(index, num_pages, align_pages); |
| 122 | heap.Free(addr, num_pages); | 210 | } |
| 211 | void Free(PAddr addr, size_t num_pages) { | ||
| 212 | m_heap.Free(addr, num_pages); | ||
| 123 | } | 213 | } |
| 124 | 214 | ||
| 125 | void SetInitialUsedHeapSize(size_t reserved_size) { | 215 | void SetInitialUsedHeapSize(size_t reserved_size) { |
| 126 | heap.SetInitialUsedSize(reserved_size); | 216 | m_heap.SetInitialUsedSize(reserved_size); |
| 127 | } | 217 | } |
| 128 | 218 | ||
| 129 | constexpr Pool GetPool() const { | 219 | void InitializeOptimizedMemory() { |
| 130 | return pool; | 220 | UNIMPLEMENTED(); |
| 131 | } | 221 | } |
| 132 | 222 | ||
| 223 | void TrackUnoptimizedAllocation(PAddr block, size_t num_pages); | ||
| 224 | void TrackOptimizedAllocation(PAddr block, size_t num_pages); | ||
| 225 | |||
| 226 | bool ProcessOptimizedAllocation(PAddr block, size_t num_pages, u8 fill_pattern); | ||
| 227 | |||
| 228 | constexpr Pool GetPool() const { | ||
| 229 | return m_pool; | ||
| 230 | } | ||
| 133 | constexpr size_t GetSize() const { | 231 | constexpr size_t GetSize() const { |
| 134 | return heap.GetSize(); | 232 | return m_heap.GetSize(); |
| 233 | } | ||
| 234 | constexpr PAddr GetEndAddress() const { | ||
| 235 | return m_heap.GetEndAddress(); | ||
| 135 | } | 236 | } |
| 136 | 237 | ||
| 137 | constexpr VAddr GetAddress() const { | 238 | size_t GetFreeSize() const { |
| 138 | return heap.GetAddress(); | 239 | return m_heap.GetFreeSize(); |
| 139 | } | 240 | } |
| 140 | 241 | ||
| 141 | constexpr VAddr GetEndAddress() const { | 242 | void DumpFreeList() const { |
| 142 | return heap.GetEndAddress(); | 243 | UNIMPLEMENTED(); |
| 143 | } | 244 | } |
| 144 | 245 | ||
| 145 | constexpr size_t GetPageOffset(PAddr address) const { | 246 | constexpr size_t GetPageOffset(PAddr address) const { |
| 146 | return heap.GetPageOffset(address); | 247 | return m_heap.GetPageOffset(address); |
| 147 | } | 248 | } |
| 148 | |||
| 149 | constexpr size_t GetPageOffsetToEnd(PAddr address) const { | 249 | constexpr size_t GetPageOffsetToEnd(PAddr address) const { |
| 150 | return heap.GetPageOffsetToEnd(address); | 250 | return m_heap.GetPageOffsetToEnd(address); |
| 151 | } | 251 | } |
| 152 | 252 | ||
| 153 | constexpr void SetNext(Impl* n) { | 253 | constexpr void SetNext(Impl* n) { |
| 154 | next = n; | 254 | m_next = n; |
| 155 | } | 255 | } |
| 156 | |||
| 157 | constexpr void SetPrev(Impl* n) { | 256 | constexpr void SetPrev(Impl* n) { |
| 158 | prev = n; | 257 | m_prev = n; |
| 159 | } | 258 | } |
| 160 | |||
| 161 | constexpr Impl* GetNext() const { | 259 | constexpr Impl* GetNext() const { |
| 162 | return next; | 260 | return m_next; |
| 163 | } | 261 | } |
| 164 | |||
| 165 | constexpr Impl* GetPrev() const { | 262 | constexpr Impl* GetPrev() const { |
| 166 | return prev; | 263 | return m_prev; |
| 167 | } | 264 | } |
| 168 | 265 | ||
| 169 | void OpenFirst(PAddr address, size_t num_pages) { | 266 | void OpenFirst(PAddr address, size_t num_pages) { |
| 170 | size_t index = this->GetPageOffset(address); | 267 | size_t index = this->GetPageOffset(address); |
| 171 | const size_t end = index + num_pages; | 268 | const size_t end = index + num_pages; |
| 172 | while (index < end) { | 269 | while (index < end) { |
| 173 | const RefCount ref_count = (++page_reference_counts[index]); | 270 | const RefCount ref_count = (++m_page_reference_counts[index]); |
| 174 | ASSERT(ref_count == 1); | 271 | ASSERT(ref_count == 1); |
| 175 | 272 | ||
| 176 | index++; | 273 | index++; |
| @@ -181,7 +278,7 @@ private: | |||
| 181 | size_t index = this->GetPageOffset(address); | 278 | size_t index = this->GetPageOffset(address); |
| 182 | const size_t end = index + num_pages; | 279 | const size_t end = index + num_pages; |
| 183 | while (index < end) { | 280 | while (index < end) { |
| 184 | const RefCount ref_count = (++page_reference_counts[index]); | 281 | const RefCount ref_count = (++m_page_reference_counts[index]); |
| 185 | ASSERT(ref_count > 1); | 282 | ASSERT(ref_count > 1); |
| 186 | 283 | ||
| 187 | index++; | 284 | index++; |
| @@ -195,8 +292,8 @@ private: | |||
| 195 | size_t free_start = 0; | 292 | size_t free_start = 0; |
| 196 | size_t free_count = 0; | 293 | size_t free_count = 0; |
| 197 | while (index < end) { | 294 | while (index < end) { |
| 198 | ASSERT(page_reference_counts[index] > 0); | 295 | ASSERT(m_page_reference_counts[index] > 0); |
| 199 | const RefCount ref_count = (--page_reference_counts[index]); | 296 | const RefCount ref_count = (--m_page_reference_counts[index]); |
| 200 | 297 | ||
| 201 | // Keep track of how many zero refcounts we see in a row, to minimize calls to free. | 298 | // Keep track of how many zero refcounts we see in a row, to minimize calls to free. |
| 202 | if (ref_count == 0) { | 299 | if (ref_count == 0) { |
| @@ -208,7 +305,7 @@ private: | |||
| 208 | } | 305 | } |
| 209 | } else { | 306 | } else { |
| 210 | if (free_count > 0) { | 307 | if (free_count > 0) { |
| 211 | this->Free(heap.GetAddress() + free_start * PageSize, free_count); | 308 | this->Free(m_heap.GetAddress() + free_start * PageSize, free_count); |
| 212 | free_count = 0; | 309 | free_count = 0; |
| 213 | } | 310 | } |
| 214 | } | 311 | } |
| @@ -217,44 +314,36 @@ private: | |||
| 217 | } | 314 | } |
| 218 | 315 | ||
| 219 | if (free_count > 0) { | 316 | if (free_count > 0) { |
| 220 | this->Free(heap.GetAddress() + free_start * PageSize, free_count); | 317 | this->Free(m_heap.GetAddress() + free_start * PageSize, free_count); |
| 221 | } | 318 | } |
| 222 | } | 319 | } |
| 223 | 320 | ||
| 224 | static size_t CalculateManagementOverheadSize(size_t region_size); | ||
| 225 | |||
| 226 | static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) { | ||
| 227 | return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) / | ||
| 228 | Common::BitSize<u64>()) * | ||
| 229 | sizeof(u64); | ||
| 230 | } | ||
| 231 | |||
| 232 | private: | 321 | private: |
| 233 | using RefCount = u16; | 322 | using RefCount = u16; |
| 234 | 323 | ||
| 235 | KPageHeap heap; | 324 | KPageHeap m_heap; |
| 236 | std::vector<RefCount> page_reference_counts; | 325 | std::vector<RefCount> m_page_reference_counts; |
| 237 | VAddr management_region{}; | 326 | VAddr m_management_region{}; |
| 238 | Pool pool{}; | 327 | Pool m_pool{}; |
| 239 | Impl* next{}; | 328 | Impl* m_next{}; |
| 240 | Impl* prev{}; | 329 | Impl* m_prev{}; |
| 241 | }; | 330 | }; |
| 242 | 331 | ||
| 243 | private: | 332 | private: |
| 244 | Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) { | 333 | Impl& GetManager(PAddr address) { |
| 245 | return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; | 334 | return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; |
| 246 | } | 335 | } |
| 247 | 336 | ||
| 248 | const Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) const { | 337 | const Impl& GetManager(PAddr address) const { |
| 249 | return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; | 338 | return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()]; |
| 250 | } | 339 | } |
| 251 | 340 | ||
| 252 | constexpr Impl* GetFirstManager(Pool pool, Direction dir) const { | 341 | constexpr Impl* GetFirstManager(Pool pool, Direction dir) { |
| 253 | return dir == Direction::FromBack ? pool_managers_tail[static_cast<size_t>(pool)] | 342 | return dir == Direction::FromBack ? m_pool_managers_tail[static_cast<size_t>(pool)] |
| 254 | : pool_managers_head[static_cast<size_t>(pool)]; | 343 | : m_pool_managers_head[static_cast<size_t>(pool)]; |
| 255 | } | 344 | } |
| 256 | 345 | ||
| 257 | constexpr Impl* GetNextManager(Impl* cur, Direction dir) const { | 346 | constexpr Impl* GetNextManager(Impl* cur, Direction dir) { |
| 258 | if (dir == Direction::FromBack) { | 347 | if (dir == Direction::FromBack) { |
| 259 | return cur->GetPrev(); | 348 | return cur->GetPrev(); |
| 260 | } else { | 349 | } else { |
| @@ -263,15 +352,21 @@ private: | |||
| 263 | } | 352 | } |
| 264 | 353 | ||
| 265 | Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir, | 354 | Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir, |
| 266 | bool random); | 355 | bool unoptimized, bool random); |
| 267 | 356 | ||
| 268 | private: | 357 | private: |
| 269 | Core::System& system; | 358 | template <typename T> |
| 270 | std::array<KLightLock, static_cast<size_t>(Pool::Count)> pool_locks; | 359 | using PoolArray = std::array<T, static_cast<size_t>(Pool::Count)>; |
| 271 | std::array<Impl*, MaxManagerCount> pool_managers_head{}; | 360 | |
| 272 | std::array<Impl*, MaxManagerCount> pool_managers_tail{}; | 361 | Core::System& m_system; |
| 273 | std::array<Impl, MaxManagerCount> managers; | 362 | const KMemoryLayout& m_memory_layout; |
| 274 | size_t num_managers{}; | 363 | PoolArray<KLightLock> m_pool_locks; |
| 364 | std::array<Impl*, MaxManagerCount> m_pool_managers_head{}; | ||
| 365 | std::array<Impl*, MaxManagerCount> m_pool_managers_tail{}; | ||
| 366 | std::array<Impl, MaxManagerCount> m_managers; | ||
| 367 | size_t m_num_managers{}; | ||
| 368 | PoolArray<u64> m_optimized_process_ids{}; | ||
| 369 | PoolArray<bool> m_has_optimized_process{}; | ||
| 275 | }; | 370 | }; |
| 276 | 371 | ||
| 277 | } // namespace Kernel | 372 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h index 7e2fcccdc..e5630c1ac 100644 --- a/src/core/hle/kernel/k_memory_region_type.h +++ b/src/core/hle/kernel/k_memory_region_type.h | |||
| @@ -142,32 +142,38 @@ private: | |||
| 142 | 142 | ||
| 143 | } // namespace impl | 143 | } // namespace impl |
| 144 | 144 | ||
| 145 | constexpr auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue(); | 145 | constexpr inline auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue(); |
| 146 | constexpr auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2); | 146 | |
| 147 | constexpr auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2); | 147 | constexpr inline auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2); |
| 148 | constexpr inline auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2); | ||
| 148 | static_assert(KMemoryRegionType_Kernel.GetValue() == 0x1); | 149 | static_assert(KMemoryRegionType_Kernel.GetValue() == 0x1); |
| 149 | static_assert(KMemoryRegionType_Dram.GetValue() == 0x2); | 150 | static_assert(KMemoryRegionType_Dram.GetValue() == 0x2); |
| 150 | 151 | ||
| 151 | constexpr auto KMemoryRegionType_DramKernelBase = | 152 | // constexpr inline auto KMemoryRegionType_CoreLocalRegion = |
| 153 | // KMemoryRegionType_None.DeriveInitial(2).Finalize(); | ||
| 154 | // static_assert(KMemoryRegionType_CoreLocalRegion.GetValue() == 0x4); | ||
| 155 | |||
| 156 | constexpr inline auto KMemoryRegionType_DramKernelBase = | ||
| 152 | KMemoryRegionType_Dram.DeriveSparse(0, 3, 0) | 157 | KMemoryRegionType_Dram.DeriveSparse(0, 3, 0) |
| 153 | .SetAttribute(KMemoryRegionAttr_NoUserMap) | 158 | .SetAttribute(KMemoryRegionAttr_NoUserMap) |
| 154 | .SetAttribute(KMemoryRegionAttr_CarveoutProtected); | 159 | .SetAttribute(KMemoryRegionAttr_CarveoutProtected); |
| 155 | constexpr auto KMemoryRegionType_DramReservedBase = KMemoryRegionType_Dram.DeriveSparse(0, 3, 1); | 160 | constexpr inline auto KMemoryRegionType_DramReservedBase = |
| 156 | constexpr auto KMemoryRegionType_DramHeapBase = | 161 | KMemoryRegionType_Dram.DeriveSparse(0, 3, 1); |
| 162 | constexpr inline auto KMemoryRegionType_DramHeapBase = | ||
| 157 | KMemoryRegionType_Dram.DeriveSparse(0, 3, 2).SetAttribute(KMemoryRegionAttr_LinearMapped); | 163 | KMemoryRegionType_Dram.DeriveSparse(0, 3, 2).SetAttribute(KMemoryRegionAttr_LinearMapped); |
| 158 | static_assert(KMemoryRegionType_DramKernelBase.GetValue() == | 164 | static_assert(KMemoryRegionType_DramKernelBase.GetValue() == |
| 159 | (0xE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap)); | 165 | (0xE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap)); |
| 160 | static_assert(KMemoryRegionType_DramReservedBase.GetValue() == (0x16)); | 166 | static_assert(KMemoryRegionType_DramReservedBase.GetValue() == (0x16)); |
| 161 | static_assert(KMemoryRegionType_DramHeapBase.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped)); | 167 | static_assert(KMemoryRegionType_DramHeapBase.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped)); |
| 162 | 168 | ||
| 163 | constexpr auto KMemoryRegionType_DramKernelCode = | 169 | constexpr inline auto KMemoryRegionType_DramKernelCode = |
| 164 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 0); | 170 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 0); |
| 165 | constexpr auto KMemoryRegionType_DramKernelSlab = | 171 | constexpr inline auto KMemoryRegionType_DramKernelSlab = |
| 166 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 1); | 172 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 1); |
| 167 | constexpr auto KMemoryRegionType_DramKernelPtHeap = | 173 | constexpr inline auto KMemoryRegionType_DramKernelPtHeap = |
| 168 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 2).SetAttribute( | 174 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 2).SetAttribute( |
| 169 | KMemoryRegionAttr_LinearMapped); | 175 | KMemoryRegionAttr_LinearMapped); |
| 170 | constexpr auto KMemoryRegionType_DramKernelInitPt = | 176 | constexpr inline auto KMemoryRegionType_DramKernelInitPt = |
| 171 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 3).SetAttribute( | 177 | KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 3).SetAttribute( |
| 172 | KMemoryRegionAttr_LinearMapped); | 178 | KMemoryRegionAttr_LinearMapped); |
| 173 | static_assert(KMemoryRegionType_DramKernelCode.GetValue() == | 179 | static_assert(KMemoryRegionType_DramKernelCode.GetValue() == |
| @@ -181,32 +187,40 @@ static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() == | |||
| 181 | (0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | | 187 | (0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | |
| 182 | KMemoryRegionAttr_LinearMapped)); | 188 | KMemoryRegionAttr_LinearMapped)); |
| 183 | 189 | ||
| 184 | constexpr auto KMemoryRegionType_DramReservedEarly = | 190 | constexpr inline auto KMemoryRegionType_DramKernelSecureAppletMemory = |
| 191 | KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute( | ||
| 192 | KMemoryRegionAttr_LinearMapped); | ||
| 193 | static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() == | ||
| 194 | (0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | | ||
| 195 | KMemoryRegionAttr_LinearMapped)); | ||
| 196 | |||
| 197 | constexpr inline auto KMemoryRegionType_DramReservedEarly = | ||
| 185 | KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); | 198 | KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); |
| 186 | static_assert(KMemoryRegionType_DramReservedEarly.GetValue() == | 199 | static_assert(KMemoryRegionType_DramReservedEarly.GetValue() == |
| 187 | (0x16 | KMemoryRegionAttr_NoUserMap)); | 200 | (0x16 | KMemoryRegionAttr_NoUserMap)); |
| 188 | 201 | ||
| 189 | constexpr auto KMemoryRegionType_KernelTraceBuffer = | 202 | constexpr inline auto KMemoryRegionType_KernelTraceBuffer = |
| 190 | KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 0) | 203 | KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 0) |
| 191 | .SetAttribute(KMemoryRegionAttr_LinearMapped) | 204 | .SetAttribute(KMemoryRegionAttr_LinearMapped) |
| 192 | .SetAttribute(KMemoryRegionAttr_UserReadOnly); | 205 | .SetAttribute(KMemoryRegionAttr_UserReadOnly); |
| 193 | constexpr auto KMemoryRegionType_OnMemoryBootImage = | 206 | constexpr inline auto KMemoryRegionType_OnMemoryBootImage = |
| 194 | KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 1); | 207 | KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 1); |
| 195 | constexpr auto KMemoryRegionType_DTB = KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2); | 208 | constexpr inline auto KMemoryRegionType_DTB = |
| 209 | KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2); | ||
| 196 | static_assert(KMemoryRegionType_KernelTraceBuffer.GetValue() == | 210 | static_assert(KMemoryRegionType_KernelTraceBuffer.GetValue() == |
| 197 | (0xD6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_UserReadOnly)); | 211 | (0xD6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_UserReadOnly)); |
| 198 | static_assert(KMemoryRegionType_OnMemoryBootImage.GetValue() == 0x156); | 212 | static_assert(KMemoryRegionType_OnMemoryBootImage.GetValue() == 0x156); |
| 199 | static_assert(KMemoryRegionType_DTB.GetValue() == 0x256); | 213 | static_assert(KMemoryRegionType_DTB.GetValue() == 0x256); |
| 200 | 214 | ||
| 201 | constexpr auto KMemoryRegionType_DramPoolPartition = | 215 | constexpr inline auto KMemoryRegionType_DramPoolPartition = |
| 202 | KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); | 216 | KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); |
| 203 | static_assert(KMemoryRegionType_DramPoolPartition.GetValue() == | 217 | static_assert(KMemoryRegionType_DramPoolPartition.GetValue() == |
| 204 | (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | 218 | (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); |
| 205 | 219 | ||
| 206 | constexpr auto KMemoryRegionType_DramPoolManagement = | 220 | constexpr inline auto KMemoryRegionType_DramPoolManagement = |
| 207 | KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute( | 221 | KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute( |
| 208 | KMemoryRegionAttr_CarveoutProtected); | 222 | KMemoryRegionAttr_CarveoutProtected); |
| 209 | constexpr auto KMemoryRegionType_DramUserPool = | 223 | constexpr inline auto KMemoryRegionType_DramUserPool = |
| 210 | KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition(); | 224 | KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition(); |
| 211 | static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == | 225 | static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == |
| 212 | (0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | | 226 | (0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | |
| @@ -214,11 +228,13 @@ static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == | |||
| 214 | static_assert(KMemoryRegionType_DramUserPool.GetValue() == | 228 | static_assert(KMemoryRegionType_DramUserPool.GetValue() == |
| 215 | (0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | 229 | (0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); |
| 216 | 230 | ||
| 217 | constexpr auto KMemoryRegionType_DramApplicationPool = KMemoryRegionType_DramUserPool.Derive(4, 0); | 231 | constexpr inline auto KMemoryRegionType_DramApplicationPool = |
| 218 | constexpr auto KMemoryRegionType_DramAppletPool = KMemoryRegionType_DramUserPool.Derive(4, 1); | 232 | KMemoryRegionType_DramUserPool.Derive(4, 0); |
| 219 | constexpr auto KMemoryRegionType_DramSystemNonSecurePool = | 233 | constexpr inline auto KMemoryRegionType_DramAppletPool = |
| 234 | KMemoryRegionType_DramUserPool.Derive(4, 1); | ||
| 235 | constexpr inline auto KMemoryRegionType_DramSystemNonSecurePool = | ||
| 220 | KMemoryRegionType_DramUserPool.Derive(4, 2); | 236 | KMemoryRegionType_DramUserPool.Derive(4, 2); |
| 221 | constexpr auto KMemoryRegionType_DramSystemPool = | 237 | constexpr inline auto KMemoryRegionType_DramSystemPool = |
| 222 | KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected); | 238 | KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected); |
| 223 | static_assert(KMemoryRegionType_DramApplicationPool.GetValue() == | 239 | static_assert(KMemoryRegionType_DramApplicationPool.GetValue() == |
| 224 | (0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); | 240 | (0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); |
| @@ -230,50 +246,55 @@ static_assert(KMemoryRegionType_DramSystemPool.GetValue() == | |||
| 230 | (0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | | 246 | (0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | |
| 231 | KMemoryRegionAttr_CarveoutProtected)); | 247 | KMemoryRegionAttr_CarveoutProtected)); |
| 232 | 248 | ||
| 233 | constexpr auto KMemoryRegionType_VirtualDramHeapBase = KMemoryRegionType_Dram.DeriveSparse(1, 3, 0); | 249 | constexpr inline auto KMemoryRegionType_VirtualDramHeapBase = |
| 234 | constexpr auto KMemoryRegionType_VirtualDramKernelPtHeap = | 250 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 0); |
| 251 | constexpr inline auto KMemoryRegionType_VirtualDramKernelPtHeap = | ||
| 235 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 1); | 252 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 1); |
| 236 | constexpr auto KMemoryRegionType_VirtualDramKernelTraceBuffer = | 253 | constexpr inline auto KMemoryRegionType_VirtualDramKernelTraceBuffer = |
| 237 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 2); | 254 | KMemoryRegionType_Dram.DeriveSparse(1, 3, 2); |
| 238 | static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A); | 255 | static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A); |
| 239 | static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A); | 256 | static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A); |
| 240 | static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A); | 257 | static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A); |
| 241 | 258 | ||
| 242 | // UNUSED: .DeriveSparse(2, 2, 0); | 259 | // UNUSED: .DeriveSparse(2, 2, 0); |
| 243 | constexpr auto KMemoryRegionType_VirtualDramUnknownDebug = | 260 | constexpr inline auto KMemoryRegionType_VirtualDramUnknownDebug = |
| 244 | KMemoryRegionType_Dram.DeriveSparse(2, 2, 1); | 261 | KMemoryRegionType_Dram.DeriveSparse(2, 2, 1); |
| 245 | static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52)); | 262 | static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52)); |
| 246 | 263 | ||
| 247 | constexpr auto KMemoryRegionType_VirtualDramKernelInitPt = | 264 | constexpr inline auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory = |
| 265 | KMemoryRegionType_Dram.DeriveSparse(3, 1, 0); | ||
| 266 | static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62)); | ||
| 267 | |||
| 268 | constexpr inline auto KMemoryRegionType_VirtualDramKernelInitPt = | ||
| 248 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0); | 269 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0); |
| 249 | constexpr auto KMemoryRegionType_VirtualDramPoolManagement = | 270 | constexpr inline auto KMemoryRegionType_VirtualDramPoolManagement = |
| 250 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1); | 271 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1); |
| 251 | constexpr auto KMemoryRegionType_VirtualDramUserPool = | 272 | constexpr inline auto KMemoryRegionType_VirtualDramUserPool = |
| 252 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2); | 273 | KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2); |
| 253 | static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A); | 274 | static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A); |
| 254 | static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A); | 275 | static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A); |
| 255 | static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A); | 276 | static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A); |
| 256 | 277 | ||
| 257 | // NOTE: For unknown reason, the pools are derived out-of-order here. It's worth eventually trying | 278 | // NOTE: For unknown reason, the pools are derived out-of-order here. |
| 258 | // to understand why Nintendo made this choice. | 279 | // It's worth eventually trying to understand why Nintendo made this choice. |
| 259 | // UNUSED: .Derive(6, 0); | 280 | // UNUSED: .Derive(6, 0); |
| 260 | // UNUSED: .Derive(6, 1); | 281 | // UNUSED: .Derive(6, 1); |
| 261 | constexpr auto KMemoryRegionType_VirtualDramAppletPool = | 282 | constexpr inline auto KMemoryRegionType_VirtualDramAppletPool = |
| 262 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 2); | 283 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 2); |
| 263 | constexpr auto KMemoryRegionType_VirtualDramApplicationPool = | 284 | constexpr inline auto KMemoryRegionType_VirtualDramApplicationPool = |
| 264 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 3); | 285 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 3); |
| 265 | constexpr auto KMemoryRegionType_VirtualDramSystemNonSecurePool = | 286 | constexpr inline auto KMemoryRegionType_VirtualDramSystemNonSecurePool = |
| 266 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 4); | 287 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 4); |
| 267 | constexpr auto KMemoryRegionType_VirtualDramSystemPool = | 288 | constexpr inline auto KMemoryRegionType_VirtualDramSystemPool = |
| 268 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 5); | 289 | KMemoryRegionType_VirtualDramUserPool.Derive(6, 5); |
| 269 | static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A); | 290 | static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A); |
| 270 | static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A); | 291 | static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A); |
| 271 | static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A); | 292 | static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A); |
| 272 | static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A); | 293 | static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A); |
| 273 | 294 | ||
| 274 | constexpr auto KMemoryRegionType_ArchDeviceBase = | 295 | constexpr inline auto KMemoryRegionType_ArchDeviceBase = |
| 275 | KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly(); | 296 | KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly(); |
| 276 | constexpr auto KMemoryRegionType_BoardDeviceBase = | 297 | constexpr inline auto KMemoryRegionType_BoardDeviceBase = |
| 277 | KMemoryRegionType_Kernel.DeriveTransition(0, 2).SetDenseOnly(); | 298 | KMemoryRegionType_Kernel.DeriveTransition(0, 2).SetDenseOnly(); |
| 278 | static_assert(KMemoryRegionType_ArchDeviceBase.GetValue() == 0x5); | 299 | static_assert(KMemoryRegionType_ArchDeviceBase.GetValue() == 0x5); |
| 279 | static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5); | 300 | static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5); |
| @@ -284,7 +305,7 @@ static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5); | |||
| 284 | #error "Unimplemented" | 305 | #error "Unimplemented" |
| 285 | #else | 306 | #else |
| 286 | // Default to no architecture devices. | 307 | // Default to no architecture devices. |
| 287 | constexpr auto NumArchitectureDeviceRegions = 0; | 308 | constexpr inline auto NumArchitectureDeviceRegions = 0; |
| 288 | #endif | 309 | #endif |
| 289 | static_assert(NumArchitectureDeviceRegions >= 0); | 310 | static_assert(NumArchitectureDeviceRegions >= 0); |
| 290 | 311 | ||
| @@ -292,34 +313,35 @@ static_assert(NumArchitectureDeviceRegions >= 0); | |||
| 292 | #include "core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc" | 313 | #include "core/hle/kernel/board/nintendo/nx/k_memory_region_device_types.inc" |
| 293 | #else | 314 | #else |
| 294 | // Default to no board devices. | 315 | // Default to no board devices. |
| 295 | constexpr auto NumBoardDeviceRegions = 0; | 316 | constexpr inline auto NumBoardDeviceRegions = 0; |
| 296 | #endif | 317 | #endif |
| 297 | static_assert(NumBoardDeviceRegions >= 0); | 318 | static_assert(NumBoardDeviceRegions >= 0); |
| 298 | 319 | ||
| 299 | constexpr auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0); | 320 | constexpr inline auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0); |
| 300 | constexpr auto KMemoryRegionType_KernelStack = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1); | 321 | constexpr inline auto KMemoryRegionType_KernelStack = |
| 301 | constexpr auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2); | 322 | KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1); |
| 302 | constexpr auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3); | 323 | constexpr inline auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2); |
| 324 | constexpr inline auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3); | ||
| 303 | static_assert(KMemoryRegionType_KernelCode.GetValue() == 0x19); | 325 | static_assert(KMemoryRegionType_KernelCode.GetValue() == 0x19); |
| 304 | static_assert(KMemoryRegionType_KernelStack.GetValue() == 0x29); | 326 | static_assert(KMemoryRegionType_KernelStack.GetValue() == 0x29); |
| 305 | static_assert(KMemoryRegionType_KernelMisc.GetValue() == 0x49); | 327 | static_assert(KMemoryRegionType_KernelMisc.GetValue() == 0x49); |
| 306 | static_assert(KMemoryRegionType_KernelSlab.GetValue() == 0x89); | 328 | static_assert(KMemoryRegionType_KernelSlab.GetValue() == 0x89); |
| 307 | 329 | ||
| 308 | constexpr auto KMemoryRegionType_KernelMiscDerivedBase = | 330 | constexpr inline auto KMemoryRegionType_KernelMiscDerivedBase = |
| 309 | KMemoryRegionType_KernelMisc.DeriveTransition(); | 331 | KMemoryRegionType_KernelMisc.DeriveTransition(); |
| 310 | static_assert(KMemoryRegionType_KernelMiscDerivedBase.GetValue() == 0x149); | 332 | static_assert(KMemoryRegionType_KernelMiscDerivedBase.GetValue() == 0x149); |
| 311 | 333 | ||
| 312 | // UNUSED: .Derive(7, 0); | 334 | // UNUSED: .Derive(7, 0); |
| 313 | constexpr auto KMemoryRegionType_KernelMiscMainStack = | 335 | constexpr inline auto KMemoryRegionType_KernelMiscMainStack = |
| 314 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 1); | 336 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 1); |
| 315 | constexpr auto KMemoryRegionType_KernelMiscMappedDevice = | 337 | constexpr inline auto KMemoryRegionType_KernelMiscMappedDevice = |
| 316 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 2); | 338 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 2); |
| 317 | constexpr auto KMemoryRegionType_KernelMiscExceptionStack = | 339 | constexpr inline auto KMemoryRegionType_KernelMiscExceptionStack = |
| 318 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 3); | 340 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 3); |
| 319 | constexpr auto KMemoryRegionType_KernelMiscUnknownDebug = | 341 | constexpr inline auto KMemoryRegionType_KernelMiscUnknownDebug = |
| 320 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 4); | 342 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 4); |
| 321 | // UNUSED: .Derive(7, 5); | 343 | // UNUSED: .Derive(7, 5); |
| 322 | constexpr auto KMemoryRegionType_KernelMiscIdleStack = | 344 | constexpr inline auto KMemoryRegionType_KernelMiscIdleStack = |
| 323 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 6); | 345 | KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 6); |
| 324 | static_assert(KMemoryRegionType_KernelMiscMainStack.GetValue() == 0xB49); | 346 | static_assert(KMemoryRegionType_KernelMiscMainStack.GetValue() == 0xB49); |
| 325 | static_assert(KMemoryRegionType_KernelMiscMappedDevice.GetValue() == 0xD49); | 347 | static_assert(KMemoryRegionType_KernelMiscMappedDevice.GetValue() == 0xD49); |
| @@ -327,7 +349,8 @@ static_assert(KMemoryRegionType_KernelMiscExceptionStack.GetValue() == 0x1349); | |||
| 327 | static_assert(KMemoryRegionType_KernelMiscUnknownDebug.GetValue() == 0x1549); | 349 | static_assert(KMemoryRegionType_KernelMiscUnknownDebug.GetValue() == 0x1549); |
| 328 | static_assert(KMemoryRegionType_KernelMiscIdleStack.GetValue() == 0x2349); | 350 | static_assert(KMemoryRegionType_KernelMiscIdleStack.GetValue() == 0x2349); |
| 329 | 351 | ||
| 330 | constexpr auto KMemoryRegionType_KernelTemp = KMemoryRegionType_Kernel.Advance(2).Derive(2, 0); | 352 | constexpr inline auto KMemoryRegionType_KernelTemp = |
| 353 | KMemoryRegionType_Kernel.Advance(2).Derive(2, 0); | ||
| 331 | static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31); | 354 | static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31); |
| 332 | 355 | ||
| 333 | constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { | 356 | constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { |
| @@ -335,6 +358,8 @@ constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { | |||
| 335 | return KMemoryRegionType_VirtualDramKernelTraceBuffer; | 358 | return KMemoryRegionType_VirtualDramKernelTraceBuffer; |
| 336 | } else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { | 359 | } else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { |
| 337 | return KMemoryRegionType_VirtualDramKernelPtHeap; | 360 | return KMemoryRegionType_VirtualDramKernelPtHeap; |
| 361 | } else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) { | ||
| 362 | return KMemoryRegionType_VirtualDramKernelSecureAppletMemory; | ||
| 338 | } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) { | 363 | } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) { |
| 339 | return KMemoryRegionType_VirtualDramUnknownDebug; | 364 | return KMemoryRegionType_VirtualDramUnknownDebug; |
| 340 | } else { | 365 | } else { |
diff --git a/src/core/hle/kernel/k_page_bitmap.h b/src/core/hle/kernel/k_page_bitmap.h index c97b3dc0b..0ff987732 100644 --- a/src/core/hle/kernel/k_page_bitmap.h +++ b/src/core/hle/kernel/k_page_bitmap.h | |||
| @@ -16,107 +16,126 @@ | |||
| 16 | namespace Kernel { | 16 | namespace Kernel { |
| 17 | 17 | ||
| 18 | class KPageBitmap { | 18 | class KPageBitmap { |
| 19 | private: | 19 | public: |
| 20 | class RandomBitGenerator { | 20 | class RandomBitGenerator { |
| 21 | private: | 21 | public: |
| 22 | Common::TinyMT rng{}; | 22 | RandomBitGenerator() { |
| 23 | u32 entropy{}; | 23 | m_rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64())); |
| 24 | u32 bits_available{}; | 24 | } |
| 25 | |||
| 26 | u64 SelectRandomBit(u64 bitmap) { | ||
| 27 | u64 selected = 0; | ||
| 28 | |||
| 29 | for (size_t cur_num_bits = Common::BitSize<decltype(bitmap)>() / 2; cur_num_bits != 0; | ||
| 30 | cur_num_bits /= 2) { | ||
| 31 | const u64 high = (bitmap >> cur_num_bits); | ||
| 32 | const u64 low = (bitmap & (~(UINT64_C(0xFFFFFFFFFFFFFFFF) << cur_num_bits))); | ||
| 33 | |||
| 34 | // Choose high if we have high and (don't have low or select high randomly). | ||
| 35 | if (high && (low == 0 || this->GenerateRandomBit())) { | ||
| 36 | bitmap = high; | ||
| 37 | selected += cur_num_bits; | ||
| 38 | } else { | ||
| 39 | bitmap = low; | ||
| 40 | selected += 0; | ||
| 41 | } | ||
| 42 | } | ||
| 43 | |||
| 44 | return selected; | ||
| 45 | } | ||
| 46 | |||
| 47 | u64 GenerateRandom(u64 max) { | ||
| 48 | // Determine the number of bits we need. | ||
| 49 | const u64 bits_needed = 1 + (Common::BitSize<decltype(max)>() - std::countl_zero(max)); | ||
| 50 | |||
| 51 | // Generate a random value of the desired bitwidth. | ||
| 52 | const u64 rnd = this->GenerateRandomBits(static_cast<u32>(bits_needed)); | ||
| 53 | |||
| 54 | // Adjust the value to be in range. | ||
| 55 | return rnd - ((rnd / max) * max); | ||
| 56 | } | ||
| 25 | 57 | ||
| 26 | private: | 58 | private: |
| 27 | void RefreshEntropy() { | 59 | void RefreshEntropy() { |
| 28 | entropy = rng.GenerateRandomU32(); | 60 | m_entropy = m_rng.GenerateRandomU32(); |
| 29 | bits_available = static_cast<u32>(Common::BitSize<decltype(entropy)>()); | 61 | m_bits_available = static_cast<u32>(Common::BitSize<decltype(m_entropy)>()); |
| 30 | } | 62 | } |
| 31 | 63 | ||
| 32 | bool GenerateRandomBit() { | 64 | bool GenerateRandomBit() { |
| 33 | if (bits_available == 0) { | 65 | if (m_bits_available == 0) { |
| 34 | this->RefreshEntropy(); | 66 | this->RefreshEntropy(); |
| 35 | } | 67 | } |
| 36 | 68 | ||
| 37 | const bool rnd_bit = (entropy & 1) != 0; | 69 | const bool rnd_bit = (m_entropy & 1) != 0; |
| 38 | entropy >>= 1; | 70 | m_entropy >>= 1; |
| 39 | --bits_available; | 71 | --m_bits_available; |
| 40 | return rnd_bit; | 72 | return rnd_bit; |
| 41 | } | 73 | } |
| 42 | 74 | ||
| 43 | public: | 75 | u64 GenerateRandomBits(u32 num_bits) { |
| 44 | RandomBitGenerator() { | 76 | u64 result = 0; |
| 45 | rng.Initialize(static_cast<u32>(KSystemControl::GenerateRandomU64())); | ||
| 46 | } | ||
| 47 | 77 | ||
| 48 | std::size_t SelectRandomBit(u64 bitmap) { | 78 | // Iteratively add random bits to our result. |
| 49 | u64 selected = 0; | 79 | while (num_bits > 0) { |
| 80 | // Ensure we have random bits to take from. | ||
| 81 | if (m_bits_available == 0) { | ||
| 82 | this->RefreshEntropy(); | ||
| 83 | } | ||
| 50 | 84 | ||
| 51 | u64 cur_num_bits = Common::BitSize<decltype(bitmap)>() / 2; | 85 | // Determine how many bits to take this round. |
| 52 | u64 cur_mask = (1ULL << cur_num_bits) - 1; | 86 | const auto cur_bits = std::min(num_bits, m_bits_available); |
| 53 | 87 | ||
| 54 | while (cur_num_bits) { | 88 | // Generate mask for our current bits. |
| 55 | const u64 low = (bitmap >> 0) & cur_mask; | 89 | const u64 mask = (static_cast<u64>(1) << cur_bits) - 1; |
| 56 | const u64 high = (bitmap >> cur_num_bits) & cur_mask; | ||
| 57 | 90 | ||
| 58 | bool choose_low; | 91 | // Add bits to output from our entropy. |
| 59 | if (high == 0) { | 92 | result <<= cur_bits; |
| 60 | // If only low val is set, choose low. | 93 | result |= (m_entropy & mask); |
| 61 | choose_low = true; | ||
| 62 | } else if (low == 0) { | ||
| 63 | // If only high val is set, choose high. | ||
| 64 | choose_low = false; | ||
| 65 | } else { | ||
| 66 | // If both are set, choose random. | ||
| 67 | choose_low = this->GenerateRandomBit(); | ||
| 68 | } | ||
| 69 | 94 | ||
| 70 | // If we chose low, proceed with low. | 95 | // Remove bits from our entropy. |
| 71 | if (choose_low) { | 96 | m_entropy >>= cur_bits; |
| 72 | bitmap = low; | 97 | m_bits_available -= cur_bits; |
| 73 | selected += 0; | ||
| 74 | } else { | ||
| 75 | bitmap = high; | ||
| 76 | selected += cur_num_bits; | ||
| 77 | } | ||
| 78 | 98 | ||
| 79 | // Proceed. | 99 | // Advance. |
| 80 | cur_num_bits /= 2; | 100 | num_bits -= cur_bits; |
| 81 | cur_mask >>= cur_num_bits; | ||
| 82 | } | 101 | } |
| 83 | 102 | ||
| 84 | return selected; | 103 | return result; |
| 85 | } | 104 | } |
| 105 | |||
| 106 | private: | ||
| 107 | Common::TinyMT m_rng; | ||
| 108 | u32 m_entropy{}; | ||
| 109 | u32 m_bits_available{}; | ||
| 86 | }; | 110 | }; |
| 87 | 111 | ||
| 88 | public: | 112 | public: |
| 89 | static constexpr std::size_t MaxDepth = 4; | 113 | static constexpr size_t MaxDepth = 4; |
| 90 | |||
| 91 | private: | ||
| 92 | std::array<u64*, MaxDepth> bit_storages{}; | ||
| 93 | RandomBitGenerator rng{}; | ||
| 94 | std::size_t num_bits{}; | ||
| 95 | std::size_t used_depths{}; | ||
| 96 | 114 | ||
| 97 | public: | 115 | public: |
| 98 | KPageBitmap() = default; | 116 | KPageBitmap() = default; |
| 99 | 117 | ||
| 100 | constexpr std::size_t GetNumBits() const { | 118 | constexpr size_t GetNumBits() const { |
| 101 | return num_bits; | 119 | return m_num_bits; |
| 102 | } | 120 | } |
| 103 | constexpr s32 GetHighestDepthIndex() const { | 121 | constexpr s32 GetHighestDepthIndex() const { |
| 104 | return static_cast<s32>(used_depths) - 1; | 122 | return static_cast<s32>(m_used_depths) - 1; |
| 105 | } | 123 | } |
| 106 | 124 | ||
| 107 | u64* Initialize(u64* storage, std::size_t size) { | 125 | u64* Initialize(u64* storage, size_t size) { |
| 108 | // Initially, everything is un-set. | 126 | // Initially, everything is un-set. |
| 109 | num_bits = 0; | 127 | m_num_bits = 0; |
| 110 | 128 | ||
| 111 | // Calculate the needed bitmap depth. | 129 | // Calculate the needed bitmap depth. |
| 112 | used_depths = static_cast<std::size_t>(GetRequiredDepth(size)); | 130 | m_used_depths = static_cast<size_t>(GetRequiredDepth(size)); |
| 113 | ASSERT(used_depths <= MaxDepth); | 131 | ASSERT(m_used_depths <= MaxDepth); |
| 114 | 132 | ||
| 115 | // Set the bitmap pointers. | 133 | // Set the bitmap pointers. |
| 116 | for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) { | 134 | for (s32 depth = this->GetHighestDepthIndex(); depth >= 0; depth--) { |
| 117 | bit_storages[depth] = storage; | 135 | m_bit_storages[depth] = storage; |
| 118 | size = Common::AlignUp(size, Common::BitSize<u64>()) / Common::BitSize<u64>(); | 136 | size = Common::AlignUp(size, Common::BitSize<u64>()) / Common::BitSize<u64>(); |
| 119 | storage += size; | 137 | storage += size; |
| 138 | m_end_storages[depth] = storage; | ||
| 120 | } | 139 | } |
| 121 | 140 | ||
| 122 | return storage; | 141 | return storage; |
| @@ -128,19 +147,19 @@ public: | |||
| 128 | 147 | ||
| 129 | if (random) { | 148 | if (random) { |
| 130 | do { | 149 | do { |
| 131 | const u64 v = bit_storages[depth][offset]; | 150 | const u64 v = m_bit_storages[depth][offset]; |
| 132 | if (v == 0) { | 151 | if (v == 0) { |
| 133 | // If depth is bigger than zero, then a previous level indicated a block was | 152 | // If depth is bigger than zero, then a previous level indicated a block was |
| 134 | // free. | 153 | // free. |
| 135 | ASSERT(depth == 0); | 154 | ASSERT(depth == 0); |
| 136 | return -1; | 155 | return -1; |
| 137 | } | 156 | } |
| 138 | offset = offset * Common::BitSize<u64>() + rng.SelectRandomBit(v); | 157 | offset = offset * Common::BitSize<u64>() + m_rng.SelectRandomBit(v); |
| 139 | ++depth; | 158 | ++depth; |
| 140 | } while (depth < static_cast<s32>(used_depths)); | 159 | } while (depth < static_cast<s32>(m_used_depths)); |
| 141 | } else { | 160 | } else { |
| 142 | do { | 161 | do { |
| 143 | const u64 v = bit_storages[depth][offset]; | 162 | const u64 v = m_bit_storages[depth][offset]; |
| 144 | if (v == 0) { | 163 | if (v == 0) { |
| 145 | // If depth is bigger than zero, then a previous level indicated a block was | 164 | // If depth is bigger than zero, then a previous level indicated a block was |
| 146 | // free. | 165 | // free. |
| @@ -149,28 +168,69 @@ public: | |||
| 149 | } | 168 | } |
| 150 | offset = offset * Common::BitSize<u64>() + std::countr_zero(v); | 169 | offset = offset * Common::BitSize<u64>() + std::countr_zero(v); |
| 151 | ++depth; | 170 | ++depth; |
| 152 | } while (depth < static_cast<s32>(used_depths)); | 171 | } while (depth < static_cast<s32>(m_used_depths)); |
| 153 | } | 172 | } |
| 154 | 173 | ||
| 155 | return static_cast<s64>(offset); | 174 | return static_cast<s64>(offset); |
| 156 | } | 175 | } |
| 157 | 176 | ||
| 158 | void SetBit(std::size_t offset) { | 177 | s64 FindFreeRange(size_t count) { |
| 178 | // Check that it is possible to find a range. | ||
| 179 | const u64* const storage_start = m_bit_storages[m_used_depths - 1]; | ||
| 180 | const u64* const storage_end = m_end_storages[m_used_depths - 1]; | ||
| 181 | |||
| 182 | // If we don't have a storage to iterate (or want more blocks than fit in a single storage), | ||
| 183 | // we can't find a free range. | ||
| 184 | if (!(storage_start < storage_end && count <= Common::BitSize<u64>())) { | ||
| 185 | return -1; | ||
| 186 | } | ||
| 187 | |||
| 188 | // Walk the storages to select a random free range. | ||
| 189 | const size_t options_per_storage = std::max<size_t>(Common::BitSize<u64>() / count, 1); | ||
| 190 | const size_t num_entries = std::max<size_t>(storage_end - storage_start, 1); | ||
| 191 | |||
| 192 | const u64 free_mask = (static_cast<u64>(1) << count) - 1; | ||
| 193 | |||
| 194 | size_t num_valid_options = 0; | ||
| 195 | s64 chosen_offset = -1; | ||
| 196 | for (size_t storage_index = 0; storage_index < num_entries; ++storage_index) { | ||
| 197 | u64 storage = storage_start[storage_index]; | ||
| 198 | for (size_t option = 0; option < options_per_storage; ++option) { | ||
| 199 | if ((storage & free_mask) == free_mask) { | ||
| 200 | // We've found a new valid option. | ||
| 201 | ++num_valid_options; | ||
| 202 | |||
| 203 | // Select the Kth valid option with probability 1/K. This leads to an overall | ||
| 204 | // uniform distribution. | ||
| 205 | if (num_valid_options == 1 || m_rng.GenerateRandom(num_valid_options) == 0) { | ||
| 206 | // This is our first option, so select it. | ||
| 207 | chosen_offset = storage_index * Common::BitSize<u64>() + option * count; | ||
| 208 | } | ||
| 209 | } | ||
| 210 | storage >>= count; | ||
| 211 | } | ||
| 212 | } | ||
| 213 | |||
| 214 | // Return the random offset we chose.*/ | ||
| 215 | return chosen_offset; | ||
| 216 | } | ||
| 217 | |||
| 218 | void SetBit(size_t offset) { | ||
| 159 | this->SetBit(this->GetHighestDepthIndex(), offset); | 219 | this->SetBit(this->GetHighestDepthIndex(), offset); |
| 160 | num_bits++; | 220 | m_num_bits++; |
| 161 | } | 221 | } |
| 162 | 222 | ||
| 163 | void ClearBit(std::size_t offset) { | 223 | void ClearBit(size_t offset) { |
| 164 | this->ClearBit(this->GetHighestDepthIndex(), offset); | 224 | this->ClearBit(this->GetHighestDepthIndex(), offset); |
| 165 | num_bits--; | 225 | m_num_bits--; |
| 166 | } | 226 | } |
| 167 | 227 | ||
| 168 | bool ClearRange(std::size_t offset, std::size_t count) { | 228 | bool ClearRange(size_t offset, size_t count) { |
| 169 | s32 depth = this->GetHighestDepthIndex(); | 229 | s32 depth = this->GetHighestDepthIndex(); |
| 170 | u64* bits = bit_storages[depth]; | 230 | u64* bits = m_bit_storages[depth]; |
| 171 | std::size_t bit_ind = offset / Common::BitSize<u64>(); | 231 | size_t bit_ind = offset / Common::BitSize<u64>(); |
| 172 | if (count < Common::BitSize<u64>()) { | 232 | if (count < Common::BitSize<u64>()) [[likely]] { |
| 173 | const std::size_t shift = offset % Common::BitSize<u64>(); | 233 | const size_t shift = offset % Common::BitSize<u64>(); |
| 174 | ASSERT(shift + count <= Common::BitSize<u64>()); | 234 | ASSERT(shift + count <= Common::BitSize<u64>()); |
| 175 | // Check that all the bits are set. | 235 | // Check that all the bits are set. |
| 176 | const u64 mask = ((u64(1) << count) - 1) << shift; | 236 | const u64 mask = ((u64(1) << count) - 1) << shift; |
| @@ -189,8 +249,8 @@ public: | |||
| 189 | ASSERT(offset % Common::BitSize<u64>() == 0); | 249 | ASSERT(offset % Common::BitSize<u64>() == 0); |
| 190 | ASSERT(count % Common::BitSize<u64>() == 0); | 250 | ASSERT(count % Common::BitSize<u64>() == 0); |
| 191 | // Check that all the bits are set. | 251 | // Check that all the bits are set. |
| 192 | std::size_t remaining = count; | 252 | size_t remaining = count; |
| 193 | std::size_t i = 0; | 253 | size_t i = 0; |
| 194 | do { | 254 | do { |
| 195 | if (bits[bit_ind + i++] != ~u64(0)) { | 255 | if (bits[bit_ind + i++] != ~u64(0)) { |
| 196 | return false; | 256 | return false; |
| @@ -209,18 +269,18 @@ public: | |||
| 209 | } while (remaining > 0); | 269 | } while (remaining > 0); |
| 210 | } | 270 | } |
| 211 | 271 | ||
| 212 | num_bits -= count; | 272 | m_num_bits -= count; |
| 213 | return true; | 273 | return true; |
| 214 | } | 274 | } |
| 215 | 275 | ||
| 216 | private: | 276 | private: |
| 217 | void SetBit(s32 depth, std::size_t offset) { | 277 | void SetBit(s32 depth, size_t offset) { |
| 218 | while (depth >= 0) { | 278 | while (depth >= 0) { |
| 219 | std::size_t ind = offset / Common::BitSize<u64>(); | 279 | size_t ind = offset / Common::BitSize<u64>(); |
| 220 | std::size_t which = offset % Common::BitSize<u64>(); | 280 | size_t which = offset % Common::BitSize<u64>(); |
| 221 | const u64 mask = u64(1) << which; | 281 | const u64 mask = u64(1) << which; |
| 222 | 282 | ||
| 223 | u64* bit = std::addressof(bit_storages[depth][ind]); | 283 | u64* bit = std::addressof(m_bit_storages[depth][ind]); |
| 224 | u64 v = *bit; | 284 | u64 v = *bit; |
| 225 | ASSERT((v & mask) == 0); | 285 | ASSERT((v & mask) == 0); |
| 226 | *bit = v | mask; | 286 | *bit = v | mask; |
| @@ -232,13 +292,13 @@ private: | |||
| 232 | } | 292 | } |
| 233 | } | 293 | } |
| 234 | 294 | ||
| 235 | void ClearBit(s32 depth, std::size_t offset) { | 295 | void ClearBit(s32 depth, size_t offset) { |
| 236 | while (depth >= 0) { | 296 | while (depth >= 0) { |
| 237 | std::size_t ind = offset / Common::BitSize<u64>(); | 297 | size_t ind = offset / Common::BitSize<u64>(); |
| 238 | std::size_t which = offset % Common::BitSize<u64>(); | 298 | size_t which = offset % Common::BitSize<u64>(); |
| 239 | const u64 mask = u64(1) << which; | 299 | const u64 mask = u64(1) << which; |
| 240 | 300 | ||
| 241 | u64* bit = std::addressof(bit_storages[depth][ind]); | 301 | u64* bit = std::addressof(m_bit_storages[depth][ind]); |
| 242 | u64 v = *bit; | 302 | u64 v = *bit; |
| 243 | ASSERT((v & mask) != 0); | 303 | ASSERT((v & mask) != 0); |
| 244 | v &= ~mask; | 304 | v &= ~mask; |
| @@ -252,7 +312,7 @@ private: | |||
| 252 | } | 312 | } |
| 253 | 313 | ||
| 254 | private: | 314 | private: |
| 255 | static constexpr s32 GetRequiredDepth(std::size_t region_size) { | 315 | static constexpr s32 GetRequiredDepth(size_t region_size) { |
| 256 | s32 depth = 0; | 316 | s32 depth = 0; |
| 257 | while (true) { | 317 | while (true) { |
| 258 | region_size /= Common::BitSize<u64>(); | 318 | region_size /= Common::BitSize<u64>(); |
| @@ -264,8 +324,8 @@ private: | |||
| 264 | } | 324 | } |
| 265 | 325 | ||
| 266 | public: | 326 | public: |
| 267 | static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size) { | 327 | static constexpr size_t CalculateManagementOverheadSize(size_t region_size) { |
| 268 | std::size_t overhead_bits = 0; | 328 | size_t overhead_bits = 0; |
| 269 | for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) { | 329 | for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) { |
| 270 | region_size = | 330 | region_size = |
| 271 | Common::AlignUp(region_size, Common::BitSize<u64>()) / Common::BitSize<u64>(); | 331 | Common::AlignUp(region_size, Common::BitSize<u64>()) / Common::BitSize<u64>(); |
| @@ -273,6 +333,13 @@ public: | |||
| 273 | } | 333 | } |
| 274 | return overhead_bits * sizeof(u64); | 334 | return overhead_bits * sizeof(u64); |
| 275 | } | 335 | } |
| 336 | |||
| 337 | private: | ||
| 338 | std::array<u64*, MaxDepth> m_bit_storages{}; | ||
| 339 | std::array<u64*, MaxDepth> m_end_storages{}; | ||
| 340 | RandomBitGenerator m_rng; | ||
| 341 | size_t m_num_bits{}; | ||
| 342 | size_t m_used_depths{}; | ||
| 276 | }; | 343 | }; |
| 277 | 344 | ||
| 278 | } // namespace Kernel | 345 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_page_buffer.h b/src/core/hle/kernel/k_page_buffer.h index aef06e213..cfedaae61 100644 --- a/src/core/hle/kernel/k_page_buffer.h +++ b/src/core/hle/kernel/k_page_buffer.h | |||
| @@ -11,6 +11,16 @@ | |||
| 11 | 11 | ||
| 12 | namespace Kernel { | 12 | namespace Kernel { |
| 13 | 13 | ||
| 14 | class KernelCore; | ||
| 15 | |||
| 16 | class KPageBufferSlabHeap : protected impl::KSlabHeapImpl { | ||
| 17 | public: | ||
| 18 | static constexpr size_t BufferSize = PageSize; | ||
| 19 | |||
| 20 | public: | ||
| 21 | void Initialize(Core::System& system); | ||
| 22 | }; | ||
| 23 | |||
| 14 | class KPageBuffer final : public KSlabAllocated<KPageBuffer> { | 24 | class KPageBuffer final : public KSlabAllocated<KPageBuffer> { |
| 15 | public: | 25 | public: |
| 16 | explicit KPageBuffer(KernelCore&) {} | 26 | explicit KPageBuffer(KernelCore&) {} |
| @@ -21,8 +31,6 @@ public: | |||
| 21 | private: | 31 | private: |
| 22 | [[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{}; | 32 | [[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{}; |
| 23 | }; | 33 | }; |
| 24 | 34 | static_assert(sizeof(KPageBuffer) == KPageBufferSlabHeap::BufferSize); | |
| 25 | static_assert(sizeof(KPageBuffer) == PageSize); | ||
| 26 | static_assert(alignof(KPageBuffer) == PageSize); | ||
| 27 | 35 | ||
| 28 | } // namespace Kernel | 36 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h index 968753992..316f172f2 100644 --- a/src/core/hle/kernel/k_page_group.h +++ b/src/core/hle/kernel/k_page_group.h | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | 5 | ||
| 6 | #include <list> | 6 | #include <list> |
| 7 | 7 | ||
| 8 | #include "common/alignment.h" | ||
| 8 | #include "common/assert.h" | 9 | #include "common/assert.h" |
| 9 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 10 | #include "core/hle/kernel/memory_types.h" | 11 | #include "core/hle/kernel/memory_types.h" |
| @@ -12,6 +13,89 @@ | |||
| 12 | 13 | ||
| 13 | namespace Kernel { | 14 | namespace Kernel { |
| 14 | 15 | ||
| 16 | class KPageGroup; | ||
| 17 | |||
| 18 | class KBlockInfo { | ||
| 19 | private: | ||
| 20 | friend class KPageGroup; | ||
| 21 | |||
| 22 | public: | ||
| 23 | constexpr KBlockInfo() = default; | ||
| 24 | |||
| 25 | constexpr void Initialize(PAddr addr, size_t np) { | ||
| 26 | ASSERT(Common::IsAligned(addr, PageSize)); | ||
| 27 | ASSERT(static_cast<u32>(np) == np); | ||
| 28 | |||
| 29 | m_page_index = static_cast<u32>(addr) / PageSize; | ||
| 30 | m_num_pages = static_cast<u32>(np); | ||
| 31 | } | ||
| 32 | |||
| 33 | constexpr PAddr GetAddress() const { | ||
| 34 | return m_page_index * PageSize; | ||
| 35 | } | ||
| 36 | constexpr size_t GetNumPages() const { | ||
| 37 | return m_num_pages; | ||
| 38 | } | ||
| 39 | constexpr size_t GetSize() const { | ||
| 40 | return this->GetNumPages() * PageSize; | ||
| 41 | } | ||
| 42 | constexpr PAddr GetEndAddress() const { | ||
| 43 | return (m_page_index + m_num_pages) * PageSize; | ||
| 44 | } | ||
| 45 | constexpr PAddr GetLastAddress() const { | ||
| 46 | return this->GetEndAddress() - 1; | ||
| 47 | } | ||
| 48 | |||
| 49 | constexpr KBlockInfo* GetNext() const { | ||
| 50 | return m_next; | ||
| 51 | } | ||
| 52 | |||
| 53 | constexpr bool IsEquivalentTo(const KBlockInfo& rhs) const { | ||
| 54 | return m_page_index == rhs.m_page_index && m_num_pages == rhs.m_num_pages; | ||
| 55 | } | ||
| 56 | |||
| 57 | constexpr bool operator==(const KBlockInfo& rhs) const { | ||
| 58 | return this->IsEquivalentTo(rhs); | ||
| 59 | } | ||
| 60 | |||
| 61 | constexpr bool operator!=(const KBlockInfo& rhs) const { | ||
| 62 | return !(*this == rhs); | ||
| 63 | } | ||
| 64 | |||
| 65 | constexpr bool IsStrictlyBefore(PAddr addr) const { | ||
| 66 | const PAddr end = this->GetEndAddress(); | ||
| 67 | |||
| 68 | if (m_page_index != 0 && end == 0) { | ||
| 69 | return false; | ||
| 70 | } | ||
| 71 | |||
| 72 | return end < addr; | ||
| 73 | } | ||
| 74 | |||
| 75 | constexpr bool operator<(PAddr addr) const { | ||
| 76 | return this->IsStrictlyBefore(addr); | ||
| 77 | } | ||
| 78 | |||
| 79 | constexpr bool TryConcatenate(PAddr addr, size_t np) { | ||
| 80 | if (addr != 0 && addr == this->GetEndAddress()) { | ||
| 81 | m_num_pages += static_cast<u32>(np); | ||
| 82 | return true; | ||
| 83 | } | ||
| 84 | return false; | ||
| 85 | } | ||
| 86 | |||
| 87 | private: | ||
| 88 | constexpr void SetNext(KBlockInfo* next) { | ||
| 89 | m_next = next; | ||
| 90 | } | ||
| 91 | |||
| 92 | private: | ||
| 93 | KBlockInfo* m_next{}; | ||
| 94 | u32 m_page_index{}; | ||
| 95 | u32 m_num_pages{}; | ||
| 96 | }; | ||
| 97 | static_assert(sizeof(KBlockInfo) <= 0x10); | ||
| 98 | |||
| 15 | class KPageGroup final { | 99 | class KPageGroup final { |
| 16 | public: | 100 | public: |
| 17 | class Node final { | 101 | class Node final { |
| @@ -92,6 +176,8 @@ public: | |||
| 92 | return nodes.empty(); | 176 | return nodes.empty(); |
| 93 | } | 177 | } |
| 94 | 178 | ||
| 179 | void Finalize() {} | ||
| 180 | |||
| 95 | private: | 181 | private: |
| 96 | std::list<Node> nodes; | 182 | std::list<Node> nodes; |
| 97 | }; | 183 | }; |
diff --git a/src/core/hle/kernel/k_page_heap.cpp b/src/core/hle/kernel/k_page_heap.cpp index 5ede60168..7b02c7d8b 100644 --- a/src/core/hle/kernel/k_page_heap.cpp +++ b/src/core/hle/kernel/k_page_heap.cpp | |||
| @@ -44,11 +44,11 @@ size_t KPageHeap::GetNumFreePages() const { | |||
| 44 | return num_free; | 44 | return num_free; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | PAddr KPageHeap::AllocateBlock(s32 index, bool random) { | 47 | PAddr KPageHeap::AllocateByLinearSearch(s32 index) { |
| 48 | const size_t needed_size = m_blocks[index].GetSize(); | 48 | const size_t needed_size = m_blocks[index].GetSize(); |
| 49 | 49 | ||
| 50 | for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) { | 50 | for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) { |
| 51 | if (const PAddr addr = m_blocks[i].PopBlock(random); addr != 0) { | 51 | if (const PAddr addr = m_blocks[i].PopBlock(false); addr != 0) { |
| 52 | if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) { | 52 | if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) { |
| 53 | this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize); | 53 | this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize); |
| 54 | } | 54 | } |
| @@ -59,6 +59,88 @@ PAddr KPageHeap::AllocateBlock(s32 index, bool random) { | |||
| 59 | return 0; | 59 | return 0; |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) { | ||
| 63 | // Get the size and required alignment. | ||
| 64 | const size_t needed_size = num_pages * PageSize; | ||
| 65 | const size_t align_size = align_pages * PageSize; | ||
| 66 | |||
| 67 | // Determine meta-alignment of our desired alignment size. | ||
| 68 | const size_t align_shift = std::countr_zero(align_size); | ||
| 69 | |||
| 70 | // Decide on a block to allocate from. | ||
| 71 | constexpr size_t MinimumPossibleAlignmentsForRandomAllocation = 4; | ||
| 72 | { | ||
| 73 | // By default, we'll want to look at all blocks larger than our current one. | ||
| 74 | s32 max_blocks = static_cast<s32>(m_num_blocks); | ||
| 75 | |||
| 76 | // Determine the maximum block we should try to allocate from. | ||
| 77 | size_t possible_alignments = 0; | ||
| 78 | for (s32 i = index; i < max_blocks; ++i) { | ||
| 79 | // Add the possible alignments from blocks at the current size. | ||
| 80 | possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * | ||
| 81 | m_blocks[i].GetNumFreeBlocks(); | ||
| 82 | |||
| 83 | // If there are enough possible alignments, we don't need to look at larger blocks. | ||
| 84 | if (possible_alignments >= MinimumPossibleAlignmentsForRandomAllocation) { | ||
| 85 | max_blocks = i + 1; | ||
| 86 | break; | ||
| 87 | } | ||
| 88 | } | ||
| 89 | |||
| 90 | // If we have any possible alignments which require a larger block, we need to pick one. | ||
| 91 | if (possible_alignments > 0 && index + 1 < max_blocks) { | ||
| 92 | // Select a random alignment from the possibilities. | ||
| 93 | const size_t rnd = m_rng.GenerateRandom(possible_alignments); | ||
| 94 | |||
| 95 | // Determine which block corresponds to the random alignment we chose. | ||
| 96 | possible_alignments = 0; | ||
| 97 | for (s32 i = index; i < max_blocks; ++i) { | ||
| 98 | // Add the possible alignments from blocks at the current size. | ||
| 99 | possible_alignments += | ||
| 100 | (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * | ||
| 101 | m_blocks[i].GetNumFreeBlocks(); | ||
| 102 | |||
| 103 | // If the current block gets us to our random choice, use the current block. | ||
| 104 | if (rnd < possible_alignments) { | ||
| 105 | index = i; | ||
| 106 | break; | ||
| 107 | } | ||
| 108 | } | ||
| 109 | } | ||
| 110 | } | ||
| 111 | |||
| 112 | // Pop a block from the index we selected. | ||
| 113 | if (PAddr addr = m_blocks[index].PopBlock(true); addr != 0) { | ||
| 114 | // Determine how much size we have left over. | ||
| 115 | if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size; | ||
| 116 | leftover_size > 0) { | ||
| 117 | // Determine how many valid alignments we can have. | ||
| 118 | const size_t possible_alignments = 1 + (leftover_size >> align_shift); | ||
| 119 | |||
| 120 | // Select a random valid alignment. | ||
| 121 | const size_t random_offset = m_rng.GenerateRandom(possible_alignments) << align_shift; | ||
| 122 | |||
| 123 | // Free memory before the random offset. | ||
| 124 | if (random_offset != 0) { | ||
| 125 | this->Free(addr, random_offset / PageSize); | ||
| 126 | } | ||
| 127 | |||
| 128 | // Advance our block by the random offset. | ||
| 129 | addr += random_offset; | ||
| 130 | |||
| 131 | // Free memory after our allocated block. | ||
| 132 | if (random_offset != leftover_size) { | ||
| 133 | this->Free(addr + needed_size, (leftover_size - random_offset) / PageSize); | ||
| 134 | } | ||
| 135 | } | ||
| 136 | |||
| 137 | // Return the block we allocated. | ||
| 138 | return addr; | ||
| 139 | } | ||
| 140 | |||
| 141 | return 0; | ||
| 142 | } | ||
| 143 | |||
| 62 | void KPageHeap::FreeBlock(PAddr block, s32 index) { | 144 | void KPageHeap::FreeBlock(PAddr block, s32 index) { |
| 63 | do { | 145 | do { |
| 64 | block = m_blocks[index++].PushBlock(block); | 146 | block = m_blocks[index++].PushBlock(block); |
diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h index 0917a8bed..9021edcf7 100644 --- a/src/core/hle/kernel/k_page_heap.h +++ b/src/core/hle/kernel/k_page_heap.h | |||
| @@ -14,13 +14,9 @@ | |||
| 14 | 14 | ||
| 15 | namespace Kernel { | 15 | namespace Kernel { |
| 16 | 16 | ||
| 17 | class KPageHeap final { | 17 | class KPageHeap { |
| 18 | public: | 18 | public: |
| 19 | YUZU_NON_COPYABLE(KPageHeap); | ||
| 20 | YUZU_NON_MOVEABLE(KPageHeap); | ||
| 21 | |||
| 22 | KPageHeap() = default; | 19 | KPageHeap() = default; |
| 23 | ~KPageHeap() = default; | ||
| 24 | 20 | ||
| 25 | constexpr PAddr GetAddress() const { | 21 | constexpr PAddr GetAddress() const { |
| 26 | return m_heap_address; | 22 | return m_heap_address; |
| @@ -57,7 +53,20 @@ public: | |||
| 57 | m_initial_used_size = m_heap_size - free_size - reserved_size; | 53 | m_initial_used_size = m_heap_size - free_size - reserved_size; |
| 58 | } | 54 | } |
| 59 | 55 | ||
| 60 | PAddr AllocateBlock(s32 index, bool random); | 56 | PAddr AllocateBlock(s32 index, bool random) { |
| 57 | if (random) { | ||
| 58 | const size_t block_pages = m_blocks[index].GetNumPages(); | ||
| 59 | return this->AllocateByRandom(index, block_pages, block_pages); | ||
| 60 | } else { | ||
| 61 | return this->AllocateByLinearSearch(index); | ||
| 62 | } | ||
| 63 | } | ||
| 64 | |||
| 65 | PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { | ||
| 66 | // TODO: linear search support? | ||
| 67 | return this->AllocateByRandom(index, num_pages, align_pages); | ||
| 68 | } | ||
| 69 | |||
| 61 | void Free(PAddr addr, size_t num_pages); | 70 | void Free(PAddr addr, size_t num_pages); |
| 62 | 71 | ||
| 63 | static size_t CalculateManagementOverheadSize(size_t region_size) { | 72 | static size_t CalculateManagementOverheadSize(size_t region_size) { |
| @@ -68,7 +77,7 @@ public: | |||
| 68 | static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) { | 77 | static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) { |
| 69 | const size_t target_pages = std::max(num_pages, align_pages); | 78 | const size_t target_pages = std::max(num_pages, align_pages); |
| 70 | for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) { | 79 | for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) { |
| 71 | if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { | 80 | if (target_pages <= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { |
| 72 | return static_cast<s32>(i); | 81 | return static_cast<s32>(i); |
| 73 | } | 82 | } |
| 74 | } | 83 | } |
| @@ -77,7 +86,7 @@ public: | |||
| 77 | 86 | ||
| 78 | static constexpr s32 GetBlockIndex(size_t num_pages) { | 87 | static constexpr s32 GetBlockIndex(size_t num_pages) { |
| 79 | for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) { | 88 | for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) { |
| 80 | if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { | 89 | if (num_pages >= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { |
| 81 | return i; | 90 | return i; |
| 82 | } | 91 | } |
| 83 | } | 92 | } |
| @@ -85,7 +94,7 @@ public: | |||
| 85 | } | 94 | } |
| 86 | 95 | ||
| 87 | static constexpr size_t GetBlockSize(size_t index) { | 96 | static constexpr size_t GetBlockSize(size_t index) { |
| 88 | return size_t(1) << MemoryBlockPageShifts[index]; | 97 | return static_cast<size_t>(1) << MemoryBlockPageShifts[index]; |
| 89 | } | 98 | } |
| 90 | 99 | ||
| 91 | static constexpr size_t GetBlockNumPages(size_t index) { | 100 | static constexpr size_t GetBlockNumPages(size_t index) { |
| @@ -93,13 +102,9 @@ public: | |||
| 93 | } | 102 | } |
| 94 | 103 | ||
| 95 | private: | 104 | private: |
| 96 | class Block final { | 105 | class Block { |
| 97 | public: | 106 | public: |
| 98 | YUZU_NON_COPYABLE(Block); | ||
| 99 | YUZU_NON_MOVEABLE(Block); | ||
| 100 | |||
| 101 | Block() = default; | 107 | Block() = default; |
| 102 | ~Block() = default; | ||
| 103 | 108 | ||
| 104 | constexpr size_t GetShift() const { | 109 | constexpr size_t GetShift() const { |
| 105 | return m_block_shift; | 110 | return m_block_shift; |
| @@ -201,6 +206,9 @@ private: | |||
| 201 | }; | 206 | }; |
| 202 | 207 | ||
| 203 | private: | 208 | private: |
| 209 | PAddr AllocateByLinearSearch(s32 index); | ||
| 210 | PAddr AllocateByRandom(s32 index, size_t num_pages, size_t align_pages); | ||
| 211 | |||
| 204 | static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts, | 212 | static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts, |
| 205 | size_t num_block_shifts); | 213 | size_t num_block_shifts); |
| 206 | 214 | ||
| @@ -209,7 +217,8 @@ private: | |||
| 209 | size_t m_heap_size{}; | 217 | size_t m_heap_size{}; |
| 210 | size_t m_initial_used_size{}; | 218 | size_t m_initial_used_size{}; |
| 211 | size_t m_num_blocks{}; | 219 | size_t m_num_blocks{}; |
| 212 | std::array<Block, NumMemoryBlockPageShifts> m_blocks{}; | 220 | std::array<Block, NumMemoryBlockPageShifts> m_blocks; |
| 221 | KPageBitmap::RandomBitGenerator m_rng; | ||
| 213 | std::vector<u64> m_management_data; | 222 | std::vector<u64> m_management_data; |
| 214 | }; | 223 | }; |
| 215 | 224 | ||
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 307e491cb..fab55a057 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include "core/hle/kernel/k_resource_limit.h" | 15 | #include "core/hle/kernel/k_resource_limit.h" |
| 16 | #include "core/hle/kernel/k_scoped_resource_reservation.h" | 16 | #include "core/hle/kernel/k_scoped_resource_reservation.h" |
| 17 | #include "core/hle/kernel/k_system_control.h" | 17 | #include "core/hle/kernel/k_system_control.h" |
| 18 | #include "core/hle/kernel/k_system_resource.h" | ||
| 18 | #include "core/hle/kernel/kernel.h" | 19 | #include "core/hle/kernel/kernel.h" |
| 19 | #include "core/hle/kernel/svc_results.h" | 20 | #include "core/hle/kernel/svc_results.h" |
| 20 | #include "core/memory.h" | 21 | #include "core/memory.h" |
| @@ -23,6 +24,61 @@ namespace Kernel { | |||
| 23 | 24 | ||
| 24 | namespace { | 25 | namespace { |
| 25 | 26 | ||
| 27 | class KScopedLightLockPair { | ||
| 28 | YUZU_NON_COPYABLE(KScopedLightLockPair); | ||
| 29 | YUZU_NON_MOVEABLE(KScopedLightLockPair); | ||
| 30 | |||
| 31 | private: | ||
| 32 | KLightLock* m_lower; | ||
| 33 | KLightLock* m_upper; | ||
| 34 | |||
| 35 | public: | ||
| 36 | KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) { | ||
| 37 | // Ensure our locks are in a consistent order. | ||
| 38 | if (std::addressof(lhs) <= std::addressof(rhs)) { | ||
| 39 | m_lower = std::addressof(lhs); | ||
| 40 | m_upper = std::addressof(rhs); | ||
| 41 | } else { | ||
| 42 | m_lower = std::addressof(rhs); | ||
| 43 | m_upper = std::addressof(lhs); | ||
| 44 | } | ||
| 45 | |||
| 46 | // Acquire both locks. | ||
| 47 | m_lower->Lock(); | ||
| 48 | if (m_lower != m_upper) { | ||
| 49 | m_upper->Lock(); | ||
| 50 | } | ||
| 51 | } | ||
| 52 | |||
| 53 | ~KScopedLightLockPair() { | ||
| 54 | // Unlock the upper lock. | ||
| 55 | if (m_upper != nullptr && m_upper != m_lower) { | ||
| 56 | m_upper->Unlock(); | ||
| 57 | } | ||
| 58 | |||
| 59 | // Unlock the lower lock. | ||
| 60 | if (m_lower != nullptr) { | ||
| 61 | m_lower->Unlock(); | ||
| 62 | } | ||
| 63 | } | ||
| 64 | |||
| 65 | public: | ||
| 66 | // Utility. | ||
| 67 | void TryUnlockHalf(KLightLock& lock) { | ||
| 68 | // Only allow unlocking if the lock is half the pair. | ||
| 69 | if (m_lower != m_upper) { | ||
| 70 | // We want to be sure the lock is one we own. | ||
| 71 | if (m_lower == std::addressof(lock)) { | ||
| 72 | lock.Unlock(); | ||
| 73 | m_lower = nullptr; | ||
| 74 | } else if (m_upper == std::addressof(lock)) { | ||
| 75 | lock.Unlock(); | ||
| 76 | m_upper = nullptr; | ||
| 77 | } | ||
| 78 | } | ||
| 79 | } | ||
| 80 | }; | ||
| 81 | |||
| 26 | using namespace Common::Literals; | 82 | using namespace Common::Literals; |
| 27 | 83 | ||
| 28 | constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { | 84 | constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { |
| @@ -49,9 +105,10 @@ KPageTable::KPageTable(Core::System& system_) | |||
| 49 | KPageTable::~KPageTable() = default; | 105 | KPageTable::~KPageTable() = default; |
| 50 | 106 | ||
| 51 | Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, | 107 | Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, |
| 52 | VAddr code_addr, size_t code_size, | 108 | bool enable_das_merge, bool from_back, |
| 53 | KMemoryBlockSlabManager* mem_block_slab_manager, | 109 | KMemoryManager::Pool pool, VAddr code_addr, |
| 54 | KMemoryManager::Pool pool) { | 110 | size_t code_size, KSystemResource* system_resource, |
| 111 | KResourceLimit* resource_limit) { | ||
| 55 | 112 | ||
| 56 | const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { | 113 | const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { |
| 57 | return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); | 114 | return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); |
| @@ -112,11 +169,13 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type | |||
| 112 | 169 | ||
| 113 | // Set other basic fields | 170 | // Set other basic fields |
| 114 | m_enable_aslr = enable_aslr; | 171 | m_enable_aslr = enable_aslr; |
| 115 | m_enable_device_address_space_merge = false; | 172 | m_enable_device_address_space_merge = enable_das_merge; |
| 116 | m_address_space_start = start; | 173 | m_address_space_start = start; |
| 117 | m_address_space_end = end; | 174 | m_address_space_end = end; |
| 118 | m_is_kernel = false; | 175 | m_is_kernel = false; |
| 119 | m_memory_block_slab_manager = mem_block_slab_manager; | 176 | m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer(); |
| 177 | m_block_info_manager = system_resource->GetBlockInfoManagerPointer(); | ||
| 178 | m_resource_limit = resource_limit; | ||
| 120 | 179 | ||
| 121 | // Determine the region we can place our undetermineds in | 180 | // Determine the region we can place our undetermineds in |
| 122 | VAddr alloc_start{}; | 181 | VAddr alloc_start{}; |
| @@ -215,10 +274,22 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type | |||
| 215 | } | 274 | } |
| 216 | } | 275 | } |
| 217 | 276 | ||
| 218 | // Set heap members | 277 | // Set heap and fill members. |
| 219 | m_current_heap_end = m_heap_region_start; | 278 | m_current_heap_end = m_heap_region_start; |
| 220 | m_max_heap_size = 0; | 279 | m_max_heap_size = 0; |
| 221 | m_max_physical_memory_size = 0; | 280 | m_mapped_physical_memory_size = 0; |
| 281 | m_mapped_unsafe_physical_memory = 0; | ||
| 282 | m_mapped_insecure_memory = 0; | ||
| 283 | m_mapped_ipc_server_memory = 0; | ||
| 284 | |||
| 285 | m_heap_fill_value = 0; | ||
| 286 | m_ipc_fill_value = 0; | ||
| 287 | m_stack_fill_value = 0; | ||
| 288 | |||
| 289 | // Set allocation option. | ||
| 290 | m_allocate_option = | ||
| 291 | KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack | ||
| 292 | : KMemoryManager::Direction::FromFront); | ||
| 222 | 293 | ||
| 223 | // Ensure that we regions inside our address space | 294 | // Ensure that we regions inside our address space |
| 224 | auto IsInAddressSpace = [&](VAddr addr) { | 295 | auto IsInAddressSpace = [&](VAddr addr) { |
| @@ -267,6 +338,16 @@ void KPageTable::Finalize() { | |||
| 267 | m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size); | 338 | m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size); |
| 268 | }); | 339 | }); |
| 269 | 340 | ||
| 341 | // Release any insecure mapped memory. | ||
| 342 | if (m_mapped_insecure_memory) { | ||
| 343 | UNIMPLEMENTED(); | ||
| 344 | } | ||
| 345 | |||
| 346 | // Release any ipc server memory. | ||
| 347 | if (m_mapped_ipc_server_memory) { | ||
| 348 | UNIMPLEMENTED(); | ||
| 349 | } | ||
| 350 | |||
| 270 | // Close the backing page table, as the destructor is not called for guest objects. | 351 | // Close the backing page table, as the destructor is not called for guest objects. |
| 271 | m_page_table_impl.reset(); | 352 | m_page_table_impl.reset(); |
| 272 | } | 353 | } |
| @@ -650,7 +731,8 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu | |||
| 650 | 731 | ||
| 651 | Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, | 732 | Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, |
| 652 | VAddr src_addr) { | 733 | VAddr src_addr) { |
| 653 | KScopedLightLock lk(m_general_lock); | 734 | // Acquire the table locks. |
| 735 | KScopedLightLockPair lk(src_page_table.m_general_lock, m_general_lock); | ||
| 654 | 736 | ||
| 655 | const size_t num_pages{size / PageSize}; | 737 | const size_t num_pages{size / PageSize}; |
| 656 | 738 | ||
| @@ -686,9 +768,753 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& s | |||
| 686 | R_SUCCEED(); | 768 | R_SUCCEED(); |
| 687 | } | 769 | } |
| 688 | 770 | ||
| 771 | Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, | ||
| 772 | VAddr address, size_t size, KMemoryPermission test_perm, | ||
| 773 | KMemoryState dst_state) { | ||
| 774 | // Validate pre-conditions. | ||
| 775 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 776 | ASSERT(test_perm == KMemoryPermission::UserReadWrite || | ||
| 777 | test_perm == KMemoryPermission::UserRead); | ||
| 778 | |||
| 779 | // Check that the address is in range. | ||
| 780 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 781 | |||
| 782 | // Get the source permission. | ||
| 783 | const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite) | ||
| 784 | ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped | ||
| 785 | : KMemoryPermission::UserRead; | ||
| 786 | |||
| 787 | // Get aligned extents. | ||
| 788 | const VAddr aligned_src_start = Common::AlignDown((address), PageSize); | ||
| 789 | const VAddr aligned_src_end = Common::AlignUp((address) + size, PageSize); | ||
| 790 | const VAddr mapping_src_start = Common::AlignUp((address), PageSize); | ||
| 791 | const VAddr mapping_src_end = Common::AlignDown((address) + size, PageSize); | ||
| 792 | |||
| 793 | const auto aligned_src_last = (aligned_src_end)-1; | ||
| 794 | const auto mapping_src_last = (mapping_src_end)-1; | ||
| 795 | |||
| 796 | // Get the test state and attribute mask. | ||
| 797 | KMemoryState test_state; | ||
| 798 | KMemoryAttribute test_attr_mask; | ||
| 799 | switch (dst_state) { | ||
| 800 | case KMemoryState::Ipc: | ||
| 801 | test_state = KMemoryState::FlagCanUseIpc; | ||
| 802 | test_attr_mask = | ||
| 803 | KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked; | ||
| 804 | break; | ||
| 805 | case KMemoryState::NonSecureIpc: | ||
| 806 | test_state = KMemoryState::FlagCanUseNonSecureIpc; | ||
| 807 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 808 | break; | ||
| 809 | case KMemoryState::NonDeviceIpc: | ||
| 810 | test_state = KMemoryState::FlagCanUseNonDeviceIpc; | ||
| 811 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 812 | break; | ||
| 813 | default: | ||
| 814 | R_THROW(ResultInvalidCombination); | ||
| 815 | } | ||
| 816 | |||
| 817 | // Ensure that on failure, we roll back appropriately. | ||
| 818 | size_t mapped_size = 0; | ||
| 819 | ON_RESULT_FAILURE { | ||
| 820 | if (mapped_size > 0) { | ||
| 821 | this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size, | ||
| 822 | src_perm); | ||
| 823 | } | ||
| 824 | }; | ||
| 825 | |||
| 826 | size_t blocks_needed = 0; | ||
| 827 | |||
| 828 | // Iterate, mapping as needed. | ||
| 829 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start); | ||
| 830 | while (true) { | ||
| 831 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 832 | |||
| 833 | // Validate the current block. | ||
| 834 | R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm, | ||
| 835 | test_attr_mask, KMemoryAttribute::None)); | ||
| 836 | |||
| 837 | if (mapping_src_start < mapping_src_end && (mapping_src_start) < info.GetEndAddress() && | ||
| 838 | info.GetAddress() < (mapping_src_end)) { | ||
| 839 | const auto cur_start = | ||
| 840 | info.GetAddress() >= (mapping_src_start) ? info.GetAddress() : (mapping_src_start); | ||
| 841 | const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress() | ||
| 842 | : (mapping_src_end); | ||
| 843 | const size_t cur_size = cur_end - cur_start; | ||
| 844 | |||
| 845 | if (info.GetAddress() < (mapping_src_start)) { | ||
| 846 | ++blocks_needed; | ||
| 847 | } | ||
| 848 | if (mapping_src_last < info.GetLastAddress()) { | ||
| 849 | ++blocks_needed; | ||
| 850 | } | ||
| 851 | |||
| 852 | // Set the permissions on the block, if we need to. | ||
| 853 | if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) { | ||
| 854 | R_TRY(Operate(cur_start, cur_size / PageSize, src_perm, | ||
| 855 | OperationType::ChangePermissions)); | ||
| 856 | } | ||
| 857 | |||
| 858 | // Note that we mapped this part. | ||
| 859 | mapped_size += cur_size; | ||
| 860 | } | ||
| 861 | |||
| 862 | // If the block is at the end, we're done. | ||
| 863 | if (aligned_src_last <= info.GetLastAddress()) { | ||
| 864 | break; | ||
| 865 | } | ||
| 866 | |||
| 867 | // Advance. | ||
| 868 | ++it; | ||
| 869 | ASSERT(it != m_memory_block_manager.end()); | ||
| 870 | } | ||
| 871 | |||
| 872 | if (out_blocks_needed != nullptr) { | ||
| 873 | ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | ||
| 874 | *out_blocks_needed = blocks_needed; | ||
| 875 | } | ||
| 876 | |||
| 877 | R_SUCCEED(); | ||
| 878 | } | ||
| 879 | |||
| 880 | Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr, | ||
| 881 | KMemoryPermission test_perm, KMemoryState dst_state, | ||
| 882 | KPageTable& src_page_table, bool send) { | ||
| 883 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 884 | ASSERT(src_page_table.IsLockedByCurrentThread()); | ||
| 885 | |||
| 886 | // Check that we can theoretically map. | ||
| 887 | const VAddr region_start = m_alias_region_start; | ||
| 888 | const size_t region_size = m_alias_region_end - m_alias_region_start; | ||
| 889 | R_UNLESS(size < region_size, ResultOutOfAddressSpace); | ||
| 890 | |||
| 891 | // Get aligned source extents. | ||
| 892 | const VAddr src_start = src_addr; | ||
| 893 | const VAddr src_end = src_addr + size; | ||
| 894 | const VAddr aligned_src_start = Common::AlignDown((src_start), PageSize); | ||
| 895 | const VAddr aligned_src_end = Common::AlignUp((src_start) + size, PageSize); | ||
| 896 | const VAddr mapping_src_start = Common::AlignUp((src_start), PageSize); | ||
| 897 | const VAddr mapping_src_end = Common::AlignDown((src_start) + size, PageSize); | ||
| 898 | const size_t aligned_src_size = aligned_src_end - aligned_src_start; | ||
| 899 | const size_t mapping_src_size = | ||
| 900 | (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0; | ||
| 901 | |||
| 902 | // Select a random address to map at. | ||
| 903 | VAddr dst_addr = | ||
| 904 | this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize, | ||
| 905 | PageSize, 0, this->GetNumGuardPages()); | ||
| 906 | |||
| 907 | R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace); | ||
| 908 | |||
| 909 | // Check that we can perform the operation we're about to perform. | ||
| 910 | ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state)); | ||
| 911 | |||
| 912 | // Create an update allocator. | ||
| 913 | Result allocator_result; | ||
| 914 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 915 | m_memory_block_slab_manager); | ||
| 916 | R_TRY(allocator_result); | ||
| 917 | |||
| 918 | // We're going to perform an update, so create a helper. | ||
| 919 | KScopedPageTableUpdater updater(this); | ||
| 920 | |||
| 921 | // Reserve space for any partial pages we allocate. | ||
| 922 | const size_t unmapped_size = aligned_src_size - mapping_src_size; | ||
| 923 | KScopedResourceReservation memory_reservation(m_resource_limit, | ||
| 924 | LimitableResource::PhysicalMemory, unmapped_size); | ||
| 925 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | ||
| 926 | |||
| 927 | // Ensure that we manage page references correctly. | ||
| 928 | PAddr start_partial_page = 0; | ||
| 929 | PAddr end_partial_page = 0; | ||
| 930 | VAddr cur_mapped_addr = dst_addr; | ||
| 931 | |||
| 932 | // If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll | ||
| 933 | // free on scope exit. | ||
| 934 | SCOPE_EXIT({ | ||
| 935 | if (start_partial_page != 0) { | ||
| 936 | m_system.Kernel().MemoryManager().Close(start_partial_page, 1); | ||
| 937 | } | ||
| 938 | if (end_partial_page != 0) { | ||
| 939 | m_system.Kernel().MemoryManager().Close(end_partial_page, 1); | ||
| 940 | } | ||
| 941 | }); | ||
| 942 | |||
| 943 | ON_RESULT_FAILURE { | ||
| 944 | if (cur_mapped_addr != dst_addr) { | ||
| 945 | // HACK: Manually close the pages. | ||
| 946 | HACK_ClosePages(dst_addr, (cur_mapped_addr - dst_addr) / PageSize); | ||
| 947 | |||
| 948 | ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize, | ||
| 949 | KMemoryPermission::None, OperationType::Unmap) | ||
| 950 | .IsSuccess()); | ||
| 951 | } | ||
| 952 | }; | ||
| 953 | |||
| 954 | // Allocate the start page as needed. | ||
| 955 | if (aligned_src_start < mapping_src_start) { | ||
| 956 | start_partial_page = | ||
| 957 | m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); | ||
| 958 | R_UNLESS(start_partial_page != 0, ResultOutOfMemory); | ||
| 959 | } | ||
| 960 | |||
| 961 | // Allocate the end page as needed. | ||
| 962 | if (mapping_src_end < aligned_src_end && | ||
| 963 | (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) { | ||
| 964 | end_partial_page = | ||
| 965 | m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option); | ||
| 966 | R_UNLESS(end_partial_page != 0, ResultOutOfMemory); | ||
| 967 | } | ||
| 968 | |||
| 969 | // Get the implementation. | ||
| 970 | auto& src_impl = src_page_table.PageTableImpl(); | ||
| 971 | |||
| 972 | // Get the fill value for partial pages. | ||
| 973 | const auto fill_val = m_ipc_fill_value; | ||
| 974 | |||
| 975 | // Begin traversal. | ||
| 976 | Common::PageTable::TraversalContext context; | ||
| 977 | Common::PageTable::TraversalEntry next_entry; | ||
| 978 | bool traverse_valid = src_impl.BeginTraversal(next_entry, context, aligned_src_start); | ||
| 979 | ASSERT(traverse_valid); | ||
| 980 | |||
| 981 | // Prepare tracking variables. | ||
| 982 | PAddr cur_block_addr = next_entry.phys_addr; | ||
| 983 | size_t cur_block_size = | ||
| 984 | next_entry.block_size - ((cur_block_addr) & (next_entry.block_size - 1)); | ||
| 985 | size_t tot_block_size = cur_block_size; | ||
| 986 | |||
| 987 | // Map the start page, if we have one. | ||
| 988 | if (start_partial_page != 0) { | ||
| 989 | // Ensure the page holds correct data. | ||
| 990 | const VAddr start_partial_virt = | ||
| 991 | GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), start_partial_page); | ||
| 992 | if (send) { | ||
| 993 | const size_t partial_offset = src_start - aligned_src_start; | ||
| 994 | size_t copy_size, clear_size; | ||
| 995 | if (src_end < mapping_src_start) { | ||
| 996 | copy_size = size; | ||
| 997 | clear_size = mapping_src_start - src_end; | ||
| 998 | } else { | ||
| 999 | copy_size = mapping_src_start - src_start; | ||
| 1000 | clear_size = 0; | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val, | ||
| 1004 | partial_offset); | ||
| 1005 | std::memcpy( | ||
| 1006 | m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset), | ||
| 1007 | m_system.Memory().GetPointer<void>( | ||
| 1008 | GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), cur_block_addr) + | ||
| 1009 | partial_offset), | ||
| 1010 | copy_size); | ||
| 1011 | if (clear_size > 0) { | ||
| 1012 | std::memset(m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset + | ||
| 1013 | copy_size), | ||
| 1014 | fill_val, clear_size); | ||
| 1015 | } | ||
| 1016 | } else { | ||
| 1017 | std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val, PageSize); | ||
| 1018 | } | ||
| 1019 | |||
| 1020 | // Map the page. | ||
| 1021 | R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page)); | ||
| 1022 | |||
| 1023 | // HACK: Manually open the pages. | ||
| 1024 | HACK_OpenPages(start_partial_page, 1); | ||
| 1025 | |||
| 1026 | // Update tracking extents. | ||
| 1027 | cur_mapped_addr += PageSize; | ||
| 1028 | cur_block_addr += PageSize; | ||
| 1029 | cur_block_size -= PageSize; | ||
| 1030 | |||
| 1031 | // If the block's size was one page, we may need to continue traversal. | ||
| 1032 | if (cur_block_size == 0 && aligned_src_size > PageSize) { | ||
| 1033 | traverse_valid = src_impl.ContinueTraversal(next_entry, context); | ||
| 1034 | ASSERT(traverse_valid); | ||
| 1035 | |||
| 1036 | cur_block_addr = next_entry.phys_addr; | ||
| 1037 | cur_block_size = next_entry.block_size; | ||
| 1038 | tot_block_size += next_entry.block_size; | ||
| 1039 | } | ||
| 1040 | } | ||
| 1041 | |||
| 1042 | // Map the remaining pages. | ||
| 1043 | while (aligned_src_start + tot_block_size < mapping_src_end) { | ||
| 1044 | // Continue the traversal. | ||
| 1045 | traverse_valid = src_impl.ContinueTraversal(next_entry, context); | ||
| 1046 | ASSERT(traverse_valid); | ||
| 1047 | |||
| 1048 | // Process the block. | ||
| 1049 | if (next_entry.phys_addr != cur_block_addr + cur_block_size) { | ||
| 1050 | // Map the block we've been processing so far. | ||
| 1051 | R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map, | ||
| 1052 | cur_block_addr)); | ||
| 1053 | |||
| 1054 | // HACK: Manually open the pages. | ||
| 1055 | HACK_OpenPages(cur_block_addr, cur_block_size / PageSize); | ||
| 1056 | |||
| 1057 | // Update tracking extents. | ||
| 1058 | cur_mapped_addr += cur_block_size; | ||
| 1059 | cur_block_addr = next_entry.phys_addr; | ||
| 1060 | cur_block_size = next_entry.block_size; | ||
| 1061 | } else { | ||
| 1062 | cur_block_size += next_entry.block_size; | ||
| 1063 | } | ||
| 1064 | tot_block_size += next_entry.block_size; | ||
| 1065 | } | ||
| 1066 | |||
| 1067 | // Handle the last direct-mapped page. | ||
| 1068 | if (const VAddr mapped_block_end = aligned_src_start + tot_block_size - cur_block_size; | ||
| 1069 | mapped_block_end < mapping_src_end) { | ||
| 1070 | const size_t last_block_size = mapping_src_end - mapped_block_end; | ||
| 1071 | |||
| 1072 | // Map the last block. | ||
| 1073 | R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map, | ||
| 1074 | cur_block_addr)); | ||
| 1075 | |||
| 1076 | // HACK: Manually open the pages. | ||
| 1077 | HACK_OpenPages(cur_block_addr, last_block_size / PageSize); | ||
| 1078 | |||
| 1079 | // Update tracking extents. | ||
| 1080 | cur_mapped_addr += last_block_size; | ||
| 1081 | cur_block_addr += last_block_size; | ||
| 1082 | if (mapped_block_end + cur_block_size < aligned_src_end && | ||
| 1083 | cur_block_size == last_block_size) { | ||
| 1084 | traverse_valid = src_impl.ContinueTraversal(next_entry, context); | ||
| 1085 | ASSERT(traverse_valid); | ||
| 1086 | |||
| 1087 | cur_block_addr = next_entry.phys_addr; | ||
| 1088 | } | ||
| 1089 | } | ||
| 1090 | |||
| 1091 | // Map the end page, if we have one. | ||
| 1092 | if (end_partial_page != 0) { | ||
| 1093 | // Ensure the page holds correct data. | ||
| 1094 | const VAddr end_partial_virt = | ||
| 1095 | GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), end_partial_page); | ||
| 1096 | if (send) { | ||
| 1097 | const size_t copy_size = src_end - mapping_src_end; | ||
| 1098 | std::memcpy(m_system.Memory().GetPointer<void>(end_partial_virt), | ||
| 1099 | m_system.Memory().GetPointer<void>(GetHeapVirtualAddress( | ||
| 1100 | m_system.Kernel().MemoryLayout(), cur_block_addr)), | ||
| 1101 | copy_size); | ||
| 1102 | std::memset(m_system.Memory().GetPointer<void>(end_partial_virt + copy_size), fill_val, | ||
| 1103 | PageSize - copy_size); | ||
| 1104 | } else { | ||
| 1105 | std::memset(m_system.Memory().GetPointer<void>(end_partial_virt), fill_val, PageSize); | ||
| 1106 | } | ||
| 1107 | |||
| 1108 | // Map the page. | ||
| 1109 | R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page)); | ||
| 1110 | |||
| 1111 | // HACK: Manually open the pages. | ||
| 1112 | HACK_OpenPages(end_partial_page, 1); | ||
| 1113 | } | ||
| 1114 | |||
| 1115 | // Update memory blocks to reflect our changes | ||
| 1116 | m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize, | ||
| 1117 | dst_state, test_perm, KMemoryAttribute::None, | ||
| 1118 | KMemoryBlockDisableMergeAttribute::Normal, | ||
| 1119 | KMemoryBlockDisableMergeAttribute::None); | ||
| 1120 | |||
| 1121 | // Set the output address. | ||
| 1122 | *out_addr = dst_addr + (src_start - aligned_src_start); | ||
| 1123 | |||
| 1124 | // We succeeded. | ||
| 1125 | memory_reservation.Commit(); | ||
| 1126 | R_SUCCEED(); | ||
| 1127 | } | ||
| 1128 | |||
| 1129 | Result KPageTable::SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr, | ||
| 1130 | KPageTable& src_page_table, KMemoryPermission test_perm, | ||
| 1131 | KMemoryState dst_state, bool send) { | ||
| 1132 | // For convenience, alias this. | ||
| 1133 | KPageTable& dst_page_table = *this; | ||
| 1134 | |||
| 1135 | // Acquire the table locks. | ||
| 1136 | KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock); | ||
| 1137 | |||
| 1138 | // We're going to perform an update, so create a helper. | ||
| 1139 | KScopedPageTableUpdater updater(std::addressof(src_page_table)); | ||
| 1140 | |||
| 1141 | // Perform client setup. | ||
| 1142 | size_t num_allocator_blocks; | ||
| 1143 | R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(), | ||
| 1144 | std::addressof(num_allocator_blocks), src_addr, size, | ||
| 1145 | test_perm, dst_state)); | ||
| 1146 | |||
| 1147 | // Create an update allocator. | ||
| 1148 | Result allocator_result; | ||
| 1149 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1150 | src_page_table.m_memory_block_slab_manager, | ||
| 1151 | num_allocator_blocks); | ||
| 1152 | R_TRY(allocator_result); | ||
| 1153 | |||
| 1154 | // Get the mapped extents. | ||
| 1155 | const VAddr src_map_start = Common::AlignUp((src_addr), PageSize); | ||
| 1156 | const VAddr src_map_end = Common::AlignDown((src_addr) + size, PageSize); | ||
| 1157 | const size_t src_map_size = src_map_end - src_map_start; | ||
| 1158 | |||
| 1159 | // Ensure that we clean up appropriately if we fail after this. | ||
| 1160 | const auto src_perm = (test_perm == KMemoryPermission::UserReadWrite) | ||
| 1161 | ? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped | ||
| 1162 | : KMemoryPermission::UserRead; | ||
| 1163 | ON_RESULT_FAILURE { | ||
| 1164 | if (src_map_end > src_map_start) { | ||
| 1165 | src_page_table.CleanupForIpcClientOnServerSetupFailure( | ||
| 1166 | updater.GetPageList(), src_map_start, src_map_size, src_perm); | ||
| 1167 | } | ||
| 1168 | }; | ||
| 1169 | |||
| 1170 | // Perform server setup. | ||
| 1171 | R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state, | ||
| 1172 | src_page_table, send)); | ||
| 1173 | |||
| 1174 | // If anything was mapped, ipc-lock the pages. | ||
| 1175 | if (src_map_start < src_map_end) { | ||
| 1176 | // Get the source permission. | ||
| 1177 | src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start, | ||
| 1178 | (src_map_end - src_map_start) / PageSize, | ||
| 1179 | &KMemoryBlock::LockForIpc, src_perm); | ||
| 1180 | } | ||
| 1181 | |||
| 1182 | R_SUCCEED(); | ||
| 1183 | } | ||
| 1184 | |||
| 1185 | Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state) { | ||
| 1186 | // Validate the address. | ||
| 1187 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 1188 | |||
| 1189 | // Lock the table. | ||
| 1190 | KScopedLightLock lk(m_general_lock); | ||
| 1191 | |||
| 1192 | // Validate the memory state. | ||
| 1193 | size_t num_allocator_blocks; | ||
| 1194 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 1195 | KMemoryState::All, dst_state, KMemoryPermission::UserRead, | ||
| 1196 | KMemoryPermission::UserRead, KMemoryAttribute::All, | ||
| 1197 | KMemoryAttribute::None)); | ||
| 1198 | |||
| 1199 | // Create an update allocator. | ||
| 1200 | Result allocator_result; | ||
| 1201 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1202 | m_memory_block_slab_manager, num_allocator_blocks); | ||
| 1203 | R_TRY(allocator_result); | ||
| 1204 | |||
| 1205 | // We're going to perform an update, so create a helper. | ||
| 1206 | KScopedPageTableUpdater updater(this); | ||
| 1207 | |||
| 1208 | // Get aligned extents. | ||
| 1209 | const VAddr aligned_start = Common::AlignDown((address), PageSize); | ||
| 1210 | const VAddr aligned_end = Common::AlignUp((address) + size, PageSize); | ||
| 1211 | const size_t aligned_size = aligned_end - aligned_start; | ||
| 1212 | const size_t aligned_num_pages = aligned_size / PageSize; | ||
| 1213 | |||
| 1214 | // HACK: Manually close the pages. | ||
| 1215 | HACK_ClosePages(aligned_start, aligned_num_pages); | ||
| 1216 | |||
| 1217 | // Unmap the pages. | ||
| 1218 | R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap)); | ||
| 1219 | |||
| 1220 | // Update memory blocks. | ||
| 1221 | m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages, | ||
| 1222 | KMemoryState::None, KMemoryPermission::None, | ||
| 1223 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | ||
| 1224 | KMemoryBlockDisableMergeAttribute::Normal); | ||
| 1225 | |||
| 1226 | // Release from the resource limit as relevant. | ||
| 1227 | const VAddr mapping_start = Common::AlignUp((address), PageSize); | ||
| 1228 | const VAddr mapping_end = Common::AlignDown((address) + size, PageSize); | ||
| 1229 | const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0; | ||
| 1230 | m_resource_limit->Release(LimitableResource::PhysicalMemory, aligned_size - mapping_size); | ||
| 1231 | |||
| 1232 | R_SUCCEED(); | ||
| 1233 | } | ||
| 1234 | |||
| 1235 | Result KPageTable::CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state) { | ||
| 1236 | // Validate the address. | ||
| 1237 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 1238 | |||
| 1239 | // Get aligned source extents. | ||
| 1240 | const VAddr mapping_start = Common::AlignUp((address), PageSize); | ||
| 1241 | const VAddr mapping_end = Common::AlignDown((address) + size, PageSize); | ||
| 1242 | const VAddr mapping_last = mapping_end - 1; | ||
| 1243 | const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0; | ||
| 1244 | |||
| 1245 | // If nothing was mapped, we're actually done immediately. | ||
| 1246 | R_SUCCEED_IF(mapping_size == 0); | ||
| 1247 | |||
| 1248 | // Get the test state and attribute mask. | ||
| 1249 | KMemoryState test_state; | ||
| 1250 | KMemoryAttribute test_attr_mask; | ||
| 1251 | switch (dst_state) { | ||
| 1252 | case KMemoryState::Ipc: | ||
| 1253 | test_state = KMemoryState::FlagCanUseIpc; | ||
| 1254 | test_attr_mask = | ||
| 1255 | KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked; | ||
| 1256 | break; | ||
| 1257 | case KMemoryState::NonSecureIpc: | ||
| 1258 | test_state = KMemoryState::FlagCanUseNonSecureIpc; | ||
| 1259 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 1260 | break; | ||
| 1261 | case KMemoryState::NonDeviceIpc: | ||
| 1262 | test_state = KMemoryState::FlagCanUseNonDeviceIpc; | ||
| 1263 | test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked; | ||
| 1264 | break; | ||
| 1265 | default: | ||
| 1266 | R_THROW(ResultInvalidCombination); | ||
| 1267 | } | ||
| 1268 | |||
| 1269 | // Lock the table. | ||
| 1270 | // NOTE: Nintendo does this *after* creating the updater below, but this does not follow | ||
| 1271 | // convention elsewhere in KPageTable. | ||
| 1272 | KScopedLightLock lk(m_general_lock); | ||
| 1273 | |||
| 1274 | // We're going to perform an update, so create a helper. | ||
| 1275 | KScopedPageTableUpdater updater(this); | ||
| 1276 | |||
| 1277 | // Ensure that on failure, we roll back appropriately. | ||
| 1278 | size_t mapped_size = 0; | ||
| 1279 | ON_RESULT_FAILURE { | ||
| 1280 | if (mapped_size > 0) { | ||
| 1281 | // Determine where the mapping ends. | ||
| 1282 | const auto mapped_end = (mapping_start) + mapped_size; | ||
| 1283 | const auto mapped_last = mapped_end - 1; | ||
| 1284 | |||
| 1285 | // Get current and next iterators. | ||
| 1286 | KMemoryBlockManager::const_iterator start_it = | ||
| 1287 | m_memory_block_manager.FindIterator(mapping_start); | ||
| 1288 | KMemoryBlockManager::const_iterator next_it = start_it; | ||
| 1289 | ++next_it; | ||
| 1290 | |||
| 1291 | // Get the current block info. | ||
| 1292 | KMemoryInfo cur_info = start_it->GetMemoryInfo(); | ||
| 1293 | |||
| 1294 | // Create tracking variables. | ||
| 1295 | VAddr cur_address = cur_info.GetAddress(); | ||
| 1296 | size_t cur_size = cur_info.GetSize(); | ||
| 1297 | bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); | ||
| 1298 | bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; | ||
| 1299 | bool first = | ||
| 1300 | cur_info.GetIpcDisableMergeCount() == 1 && | ||
| 1301 | (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) == | ||
| 1302 | KMemoryBlockDisableMergeAttribute::None; | ||
| 1303 | |||
| 1304 | while (((cur_address) + cur_size - 1) < mapped_last) { | ||
| 1305 | // Check that we have a next block. | ||
| 1306 | ASSERT(next_it != m_memory_block_manager.end()); | ||
| 1307 | |||
| 1308 | // Get the next info. | ||
| 1309 | const KMemoryInfo next_info = next_it->GetMemoryInfo(); | ||
| 1310 | |||
| 1311 | // Check if we can consolidate the next block's permission set with the current one. | ||
| 1312 | |||
| 1313 | const bool next_perm_eq = | ||
| 1314 | next_info.GetPermission() == next_info.GetOriginalPermission(); | ||
| 1315 | const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; | ||
| 1316 | if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && | ||
| 1317 | cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { | ||
| 1318 | // We can consolidate the reprotection for the current and next block into a | ||
| 1319 | // single call. | ||
| 1320 | cur_size += next_info.GetSize(); | ||
| 1321 | } else { | ||
| 1322 | // We have to operate on the current block. | ||
| 1323 | if ((cur_needs_set_perm || first) && !cur_perm_eq) { | ||
| 1324 | ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(), | ||
| 1325 | OperationType::ChangePermissions) | ||
| 1326 | .IsSuccess()); | ||
| 1327 | } | ||
| 1328 | |||
| 1329 | // Advance. | ||
| 1330 | cur_address = next_info.GetAddress(); | ||
| 1331 | cur_size = next_info.GetSize(); | ||
| 1332 | first = false; | ||
| 1333 | } | ||
| 1334 | |||
| 1335 | // Advance. | ||
| 1336 | cur_info = next_info; | ||
| 1337 | cur_perm_eq = next_perm_eq; | ||
| 1338 | cur_needs_set_perm = next_needs_set_perm; | ||
| 1339 | ++next_it; | ||
| 1340 | } | ||
| 1341 | |||
| 1342 | // Process the last block. | ||
| 1343 | if ((first || cur_needs_set_perm) && !cur_perm_eq) { | ||
| 1344 | ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(), | ||
| 1345 | OperationType::ChangePermissions) | ||
| 1346 | .IsSuccess()); | ||
| 1347 | } | ||
| 1348 | } | ||
| 1349 | }; | ||
| 1350 | |||
| 1351 | // Iterate, reprotecting as needed. | ||
| 1352 | { | ||
| 1353 | // Get current and next iterators. | ||
| 1354 | KMemoryBlockManager::const_iterator start_it = | ||
| 1355 | m_memory_block_manager.FindIterator(mapping_start); | ||
| 1356 | KMemoryBlockManager::const_iterator next_it = start_it; | ||
| 1357 | ++next_it; | ||
| 1358 | |||
| 1359 | // Validate the current block. | ||
| 1360 | KMemoryInfo cur_info = start_it->GetMemoryInfo(); | ||
| 1361 | ASSERT(this->CheckMemoryState(cur_info, test_state, test_state, KMemoryPermission::None, | ||
| 1362 | KMemoryPermission::None, | ||
| 1363 | test_attr_mask | KMemoryAttribute::IpcLocked, | ||
| 1364 | KMemoryAttribute::IpcLocked) | ||
| 1365 | .IsSuccess()); | ||
| 1366 | |||
| 1367 | // Create tracking variables. | ||
| 1368 | VAddr cur_address = cur_info.GetAddress(); | ||
| 1369 | size_t cur_size = cur_info.GetSize(); | ||
| 1370 | bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission(); | ||
| 1371 | bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1; | ||
| 1372 | bool first = | ||
| 1373 | cur_info.GetIpcDisableMergeCount() == 1 && | ||
| 1374 | (cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) == | ||
| 1375 | KMemoryBlockDisableMergeAttribute::None; | ||
| 1376 | |||
| 1377 | while ((cur_address + cur_size - 1) < mapping_last) { | ||
| 1378 | // Check that we have a next block. | ||
| 1379 | ASSERT(next_it != m_memory_block_manager.end()); | ||
| 1380 | |||
| 1381 | // Get the next info. | ||
| 1382 | const KMemoryInfo next_info = next_it->GetMemoryInfo(); | ||
| 1383 | |||
| 1384 | // Validate the next block. | ||
| 1385 | ASSERT(this->CheckMemoryState(next_info, test_state, test_state, | ||
| 1386 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 1387 | test_attr_mask | KMemoryAttribute::IpcLocked, | ||
| 1388 | KMemoryAttribute::IpcLocked) | ||
| 1389 | .IsSuccess()); | ||
| 1390 | |||
| 1391 | // Check if we can consolidate the next block's permission set with the current one. | ||
| 1392 | const bool next_perm_eq = | ||
| 1393 | next_info.GetPermission() == next_info.GetOriginalPermission(); | ||
| 1394 | const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1; | ||
| 1395 | if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && | ||
| 1396 | cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) { | ||
| 1397 | // We can consolidate the reprotection for the current and next block into a single | ||
| 1398 | // call. | ||
| 1399 | cur_size += next_info.GetSize(); | ||
| 1400 | } else { | ||
| 1401 | // We have to operate on the current block. | ||
| 1402 | if ((cur_needs_set_perm || first) && !cur_perm_eq) { | ||
| 1403 | R_TRY(Operate(cur_address, cur_size / PageSize, | ||
| 1404 | cur_needs_set_perm ? cur_info.GetOriginalPermission() | ||
| 1405 | : cur_info.GetPermission(), | ||
| 1406 | OperationType::ChangePermissions)); | ||
| 1407 | } | ||
| 1408 | |||
| 1409 | // Mark that we mapped the block. | ||
| 1410 | mapped_size += cur_size; | ||
| 1411 | |||
| 1412 | // Advance. | ||
| 1413 | cur_address = next_info.GetAddress(); | ||
| 1414 | cur_size = next_info.GetSize(); | ||
| 1415 | first = false; | ||
| 1416 | } | ||
| 1417 | |||
| 1418 | // Advance. | ||
| 1419 | cur_info = next_info; | ||
| 1420 | cur_perm_eq = next_perm_eq; | ||
| 1421 | cur_needs_set_perm = next_needs_set_perm; | ||
| 1422 | ++next_it; | ||
| 1423 | } | ||
| 1424 | |||
| 1425 | // Process the last block. | ||
| 1426 | const auto lock_count = | ||
| 1427 | cur_info.GetIpcLockCount() + | ||
| 1428 | (next_it != m_memory_block_manager.end() | ||
| 1429 | ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) | ||
| 1430 | : 0); | ||
| 1431 | if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) { | ||
| 1432 | R_TRY(Operate(cur_address, cur_size / PageSize, | ||
| 1433 | cur_needs_set_perm ? cur_info.GetOriginalPermission() | ||
| 1434 | : cur_info.GetPermission(), | ||
| 1435 | OperationType::ChangePermissions)); | ||
| 1436 | } | ||
| 1437 | } | ||
| 1438 | |||
| 1439 | // Create an update allocator. | ||
| 1440 | // NOTE: Guaranteed zero blocks needed here. | ||
| 1441 | Result allocator_result; | ||
| 1442 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | ||
| 1443 | m_memory_block_slab_manager, 0); | ||
| 1444 | R_TRY(allocator_result); | ||
| 1445 | |||
| 1446 | // Unlock the pages. | ||
| 1447 | m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start, | ||
| 1448 | mapping_size / PageSize, &KMemoryBlock::UnlockForIpc, | ||
| 1449 | KMemoryPermission::None); | ||
| 1450 | |||
| 1451 | R_SUCCEED(); | ||
| 1452 | } | ||
| 1453 | |||
| 1454 | void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLinkedList* page_list, | ||
| 1455 | VAddr address, size_t size, | ||
| 1456 | KMemoryPermission prot_perm) { | ||
| 1457 | ASSERT(this->IsLockedByCurrentThread()); | ||
| 1458 | ASSERT(Common::IsAligned(address, PageSize)); | ||
| 1459 | ASSERT(Common::IsAligned(size, PageSize)); | ||
| 1460 | |||
| 1461 | // Get the mapped extents. | ||
| 1462 | const VAddr src_map_start = address; | ||
| 1463 | const VAddr src_map_end = address + size; | ||
| 1464 | const VAddr src_map_last = src_map_end - 1; | ||
| 1465 | |||
| 1466 | // This function is only invoked when there's something to do. | ||
| 1467 | ASSERT(src_map_end > src_map_start); | ||
| 1468 | |||
| 1469 | // Iterate over blocks, fixing permissions. | ||
| 1470 | KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address); | ||
| 1471 | while (true) { | ||
| 1472 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1473 | |||
| 1474 | const auto cur_start = | ||
| 1475 | info.GetAddress() >= src_map_start ? info.GetAddress() : src_map_start; | ||
| 1476 | const auto cur_end = | ||
| 1477 | src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress(); | ||
| 1478 | |||
| 1479 | // If we can, fix the protections on the block. | ||
| 1480 | if ((info.GetIpcLockCount() == 0 && | ||
| 1481 | (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) || | ||
| 1482 | (info.GetIpcLockCount() != 0 && | ||
| 1483 | (info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) { | ||
| 1484 | // Check if we actually need to fix the protections on the block. | ||
| 1485 | if (cur_end == src_map_end || info.GetAddress() <= src_map_start || | ||
| 1486 | (info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) { | ||
| 1487 | ASSERT(Operate(cur_start, (cur_end - cur_start) / PageSize, info.GetPermission(), | ||
| 1488 | OperationType::ChangePermissions) | ||
| 1489 | .IsSuccess()); | ||
| 1490 | } | ||
| 1491 | } | ||
| 1492 | |||
| 1493 | // If we're past the end of the region, we're done. | ||
| 1494 | if (src_map_last <= info.GetLastAddress()) { | ||
| 1495 | break; | ||
| 1496 | } | ||
| 1497 | |||
| 1498 | // Advance. | ||
| 1499 | ++it; | ||
| 1500 | ASSERT(it != m_memory_block_manager.end()); | ||
| 1501 | } | ||
| 1502 | } | ||
| 1503 | |||
| 1504 | void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) { | ||
| 1505 | m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages); | ||
| 1506 | } | ||
| 1507 | |||
| 1508 | void KPageTable::HACK_ClosePages(VAddr virt_addr, size_t num_pages) { | ||
| 1509 | for (size_t index = 0; index < num_pages; ++index) { | ||
| 1510 | const auto paddr = GetPhysicalAddr(virt_addr + (index * PageSize)); | ||
| 1511 | m_system.Kernel().MemoryManager().Close(paddr, 1); | ||
| 1512 | } | ||
| 1513 | } | ||
| 1514 | |||
| 689 | Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | 1515 | Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { |
| 690 | // Lock the physical memory lock. | 1516 | // Lock the physical memory lock. |
| 691 | KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); | 1517 | KScopedLightLock phys_lk(m_map_physical_memory_lock); |
| 692 | 1518 | ||
| 693 | // Calculate the last address for convenience. | 1519 | // Calculate the last address for convenience. |
| 694 | const VAddr last_address = address + size - 1; | 1520 | const VAddr last_address = address + size - 1; |
| @@ -742,15 +1568,19 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 742 | { | 1568 | { |
| 743 | // Reserve the memory from the process resource limit. | 1569 | // Reserve the memory from the process resource limit. |
| 744 | KScopedResourceReservation memory_reservation( | 1570 | KScopedResourceReservation memory_reservation( |
| 745 | m_system.Kernel().CurrentProcess()->GetResourceLimit(), | 1571 | m_resource_limit, LimitableResource::PhysicalMemory, size - mapped_size); |
| 746 | LimitableResource::PhysicalMemory, size - mapped_size); | ||
| 747 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 1572 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 748 | 1573 | ||
| 749 | // Allocate pages for the new memory. | 1574 | // Allocate pages for the new memory. |
| 750 | KPageGroup pg; | 1575 | KPageGroup pg; |
| 751 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( | 1576 | R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( |
| 752 | &pg, (size - mapped_size) / PageSize, | 1577 | &pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0)); |
| 753 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); | 1578 | |
| 1579 | // If we fail in the next bit (or retry), we need to cleanup the pages. | ||
| 1580 | // auto pg_guard = SCOPE_GUARD { | ||
| 1581 | // pg.OpenFirst(); | ||
| 1582 | // pg.Close(); | ||
| 1583 | //}; | ||
| 754 | 1584 | ||
| 755 | // Map the memory. | 1585 | // Map the memory. |
| 756 | { | 1586 | { |
| @@ -810,15 +1640,24 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 810 | 1640 | ||
| 811 | // Create an update allocator. | 1641 | // Create an update allocator. |
| 812 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | 1642 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); |
| 813 | Result allocator_result{ResultSuccess}; | 1643 | Result allocator_result; |
| 814 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 1644 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 815 | m_memory_block_slab_manager, | 1645 | m_memory_block_slab_manager, |
| 816 | num_allocator_blocks); | 1646 | num_allocator_blocks); |
| 817 | R_TRY(allocator_result); | 1647 | R_TRY(allocator_result); |
| 818 | 1648 | ||
| 1649 | // We're going to perform an update, so create a helper. | ||
| 1650 | KScopedPageTableUpdater updater(this); | ||
| 1651 | |||
| 1652 | // Prepare to iterate over the memory. | ||
| 1653 | auto pg_it = pg.Nodes().begin(); | ||
| 1654 | PAddr pg_phys_addr = pg_it->GetAddress(); | ||
| 1655 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 1656 | |||
| 819 | // Reset the current tracking address, and make sure we clean up on failure. | 1657 | // Reset the current tracking address, and make sure we clean up on failure. |
| 1658 | // pg_guard.Cancel(); | ||
| 820 | cur_address = address; | 1659 | cur_address = address; |
| 821 | auto unmap_guard = detail::ScopeExit([&] { | 1660 | ON_RESULT_FAILURE { |
| 822 | if (cur_address > address) { | 1661 | if (cur_address > address) { |
| 823 | const VAddr last_unmap_address = cur_address - 1; | 1662 | const VAddr last_unmap_address = cur_address - 1; |
| 824 | 1663 | ||
| @@ -841,6 +1680,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 841 | last_unmap_address + 1 - cur_address) / | 1680 | last_unmap_address + 1 - cur_address) / |
| 842 | PageSize; | 1681 | PageSize; |
| 843 | 1682 | ||
| 1683 | // HACK: Manually close the pages. | ||
| 1684 | HACK_ClosePages(cur_address, cur_pages); | ||
| 1685 | |||
| 844 | // Unmap. | 1686 | // Unmap. |
| 845 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, | 1687 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, |
| 846 | OperationType::Unmap) | 1688 | OperationType::Unmap) |
| @@ -857,12 +1699,17 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 857 | ++it; | 1699 | ++it; |
| 858 | } | 1700 | } |
| 859 | } | 1701 | } |
| 860 | }); | ||
| 861 | 1702 | ||
| 862 | // Iterate over the memory. | 1703 | // Release any remaining unmapped memory. |
| 863 | auto pg_it = pg.Nodes().begin(); | 1704 | m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages); |
| 864 | PAddr pg_phys_addr = pg_it->GetAddress(); | 1705 | m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages); |
| 865 | size_t pg_pages = pg_it->GetNumPages(); | 1706 | for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) { |
| 1707 | m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(), | ||
| 1708 | pg_it->GetNumPages()); | ||
| 1709 | m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(), | ||
| 1710 | pg_it->GetNumPages()); | ||
| 1711 | } | ||
| 1712 | }; | ||
| 866 | 1713 | ||
| 867 | auto it = m_memory_block_manager.FindIterator(cur_address); | 1714 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| 868 | while (true) { | 1715 | while (true) { |
| @@ -897,6 +1744,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 897 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, | 1744 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, |
| 898 | OperationType::Map, pg_phys_addr)); | 1745 | OperationType::Map, pg_phys_addr)); |
| 899 | 1746 | ||
| 1747 | // HACK: Manually open the pages. | ||
| 1748 | HACK_OpenPages(pg_phys_addr, cur_pages); | ||
| 1749 | |||
| 900 | // Advance. | 1750 | // Advance. |
| 901 | cur_address += cur_pages * PageSize; | 1751 | cur_address += cur_pages * PageSize; |
| 902 | map_pages -= cur_pages; | 1752 | map_pages -= cur_pages; |
| @@ -928,9 +1778,6 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 928 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, | 1778 | KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, |
| 929 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None); | 1779 | KMemoryPermission::UserReadWrite, KMemoryAttribute::None); |
| 930 | 1780 | ||
| 931 | // Cancel our guard. | ||
| 932 | unmap_guard.Cancel(); | ||
| 933 | |||
| 934 | R_SUCCEED(); | 1781 | R_SUCCEED(); |
| 935 | } | 1782 | } |
| 936 | } | 1783 | } |
| @@ -939,7 +1786,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { | |||
| 939 | 1786 | ||
| 940 | Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | 1787 | Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { |
| 941 | // Lock the physical memory lock. | 1788 | // Lock the physical memory lock. |
| 942 | KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); | 1789 | KScopedLightLock phys_lk(m_map_physical_memory_lock); |
| 943 | 1790 | ||
| 944 | // Lock the table. | 1791 | // Lock the table. |
| 945 | KScopedLightLock lk(m_general_lock); | 1792 | KScopedLightLock lk(m_general_lock); |
| @@ -948,8 +1795,11 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 948 | const VAddr last_address = address + size - 1; | 1795 | const VAddr last_address = address + size - 1; |
| 949 | 1796 | ||
| 950 | // Define iteration variables. | 1797 | // Define iteration variables. |
| 951 | VAddr cur_address = 0; | 1798 | VAddr map_start_address = 0; |
| 952 | size_t mapped_size = 0; | 1799 | VAddr map_last_address = 0; |
| 1800 | |||
| 1801 | VAddr cur_address; | ||
| 1802 | size_t mapped_size; | ||
| 953 | size_t num_allocator_blocks = 0; | 1803 | size_t num_allocator_blocks = 0; |
| 954 | 1804 | ||
| 955 | // Check if the memory is mapped. | 1805 | // Check if the memory is mapped. |
| @@ -975,27 +1825,27 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 975 | if (is_normal) { | 1825 | if (is_normal) { |
| 976 | R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory); | 1826 | R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory); |
| 977 | 1827 | ||
| 1828 | if (map_start_address == 0) { | ||
| 1829 | map_start_address = cur_address; | ||
| 1830 | } | ||
| 1831 | map_last_address = | ||
| 1832 | (last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address; | ||
| 1833 | |||
| 978 | if (info.GetAddress() < address) { | 1834 | if (info.GetAddress() < address) { |
| 979 | ++num_allocator_blocks; | 1835 | ++num_allocator_blocks; |
| 980 | } | 1836 | } |
| 981 | if (last_address < info.GetLastAddress()) { | 1837 | if (last_address < info.GetLastAddress()) { |
| 982 | ++num_allocator_blocks; | 1838 | ++num_allocator_blocks; |
| 983 | } | 1839 | } |
| 1840 | |||
| 1841 | mapped_size += (map_last_address + 1 - cur_address); | ||
| 984 | } | 1842 | } |
| 985 | 1843 | ||
| 986 | // Check if we're done. | 1844 | // Check if we're done. |
| 987 | if (last_address <= info.GetLastAddress()) { | 1845 | if (last_address <= info.GetLastAddress()) { |
| 988 | if (is_normal) { | ||
| 989 | mapped_size += (last_address + 1 - cur_address); | ||
| 990 | } | ||
| 991 | break; | 1846 | break; |
| 992 | } | 1847 | } |
| 993 | 1848 | ||
| 994 | // Track the memory if it's mapped. | ||
| 995 | if (is_normal) { | ||
| 996 | mapped_size += VAddr(info.GetEndAddress()) - cur_address; | ||
| 997 | } | ||
| 998 | |||
| 999 | // Advance. | 1849 | // Advance. |
| 1000 | cur_address = info.GetEndAddress(); | 1850 | cur_address = info.GetEndAddress(); |
| 1001 | ++it; | 1851 | ++it; |
| @@ -1005,125 +1855,22 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 1005 | R_SUCCEED_IF(mapped_size == 0); | 1855 | R_SUCCEED_IF(mapped_size == 0); |
| 1006 | } | 1856 | } |
| 1007 | 1857 | ||
| 1008 | // Make a page group for the unmap region. | ||
| 1009 | KPageGroup pg; | ||
| 1010 | { | ||
| 1011 | auto& impl = this->PageTableImpl(); | ||
| 1012 | |||
| 1013 | // Begin traversal. | ||
| 1014 | Common::PageTable::TraversalContext context; | ||
| 1015 | Common::PageTable::TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0}; | ||
| 1016 | bool cur_valid = false; | ||
| 1017 | Common::PageTable::TraversalEntry next_entry; | ||
| 1018 | bool next_valid = false; | ||
| 1019 | size_t tot_size = 0; | ||
| 1020 | |||
| 1021 | cur_address = address; | ||
| 1022 | next_valid = impl.BeginTraversal(next_entry, context, cur_address); | ||
| 1023 | next_entry.block_size = | ||
| 1024 | (next_entry.block_size - (next_entry.phys_addr & (next_entry.block_size - 1))); | ||
| 1025 | |||
| 1026 | // Iterate, building the group. | ||
| 1027 | while (true) { | ||
| 1028 | if ((!next_valid && !cur_valid) || | ||
| 1029 | (next_valid && cur_valid && | ||
| 1030 | next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) { | ||
| 1031 | cur_entry.block_size += next_entry.block_size; | ||
| 1032 | } else { | ||
| 1033 | if (cur_valid) { | ||
| 1034 | // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr)); | ||
| 1035 | R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize)); | ||
| 1036 | } | ||
| 1037 | |||
| 1038 | // Update tracking variables. | ||
| 1039 | tot_size += cur_entry.block_size; | ||
| 1040 | cur_entry = next_entry; | ||
| 1041 | cur_valid = next_valid; | ||
| 1042 | } | ||
| 1043 | |||
| 1044 | if (cur_entry.block_size + tot_size >= size) { | ||
| 1045 | break; | ||
| 1046 | } | ||
| 1047 | |||
| 1048 | next_valid = impl.ContinueTraversal(next_entry, context); | ||
| 1049 | } | ||
| 1050 | |||
| 1051 | // Add the last block. | ||
| 1052 | if (cur_valid) { | ||
| 1053 | // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr)); | ||
| 1054 | R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize)); | ||
| 1055 | } | ||
| 1056 | } | ||
| 1057 | ASSERT(pg.GetNumPages() == mapped_size / PageSize); | ||
| 1058 | |||
| 1059 | // Create an update allocator. | 1858 | // Create an update allocator. |
| 1060 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); | 1859 | ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); |
| 1061 | Result allocator_result{ResultSuccess}; | 1860 | Result allocator_result; |
| 1062 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 1861 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1063 | m_memory_block_slab_manager, num_allocator_blocks); | 1862 | m_memory_block_slab_manager, num_allocator_blocks); |
| 1064 | R_TRY(allocator_result); | 1863 | R_TRY(allocator_result); |
| 1065 | 1864 | ||
| 1066 | // Reset the current tracking address, and make sure we clean up on failure. | 1865 | // We're going to perform an update, so create a helper. |
| 1067 | cur_address = address; | 1866 | KScopedPageTableUpdater updater(this); |
| 1068 | auto remap_guard = detail::ScopeExit([&] { | ||
| 1069 | if (cur_address > address) { | ||
| 1070 | const VAddr last_map_address = cur_address - 1; | ||
| 1071 | cur_address = address; | ||
| 1072 | |||
| 1073 | // Iterate over the memory we unmapped. | ||
| 1074 | auto it = m_memory_block_manager.FindIterator(cur_address); | ||
| 1075 | auto pg_it = pg.Nodes().begin(); | ||
| 1076 | PAddr pg_phys_addr = pg_it->GetAddress(); | ||
| 1077 | size_t pg_pages = pg_it->GetNumPages(); | ||
| 1078 | |||
| 1079 | while (true) { | ||
| 1080 | // Get the memory info for the pages we unmapped, convert to property. | ||
| 1081 | const KMemoryInfo info = it->GetMemoryInfo(); | ||
| 1082 | |||
| 1083 | // If the memory is normal, we unmapped it and need to re-map it. | ||
| 1084 | if (info.GetState() == KMemoryState::Normal) { | ||
| 1085 | // Determine the range to map. | ||
| 1086 | size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address, | ||
| 1087 | last_map_address + 1 - cur_address) / | ||
| 1088 | PageSize; | ||
| 1089 | |||
| 1090 | // While we have pages to map, map them. | ||
| 1091 | while (map_pages > 0) { | ||
| 1092 | // Check if we're at the end of the physical block. | ||
| 1093 | if (pg_pages == 0) { | ||
| 1094 | // Ensure there are more pages to map. | ||
| 1095 | ASSERT(pg_it != pg.Nodes().end()); | ||
| 1096 | |||
| 1097 | // Advance our physical block. | ||
| 1098 | ++pg_it; | ||
| 1099 | pg_phys_addr = pg_it->GetAddress(); | ||
| 1100 | pg_pages = pg_it->GetNumPages(); | ||
| 1101 | } | ||
| 1102 | |||
| 1103 | // Map whatever we can. | ||
| 1104 | const size_t cur_pages = std::min(pg_pages, map_pages); | ||
| 1105 | ASSERT(this->Operate(cur_address, cur_pages, info.GetPermission(), | ||
| 1106 | OperationType::Map, pg_phys_addr) == ResultSuccess); | ||
| 1107 | 1867 | ||
| 1108 | // Advance. | 1868 | // Separate the mapping. |
| 1109 | cur_address += cur_pages * PageSize; | 1869 | R_TRY(Operate(map_start_address, (map_last_address + 1 - map_start_address) / PageSize, |
| 1110 | map_pages -= cur_pages; | 1870 | KMemoryPermission::None, OperationType::Separate)); |
| 1111 | 1871 | ||
| 1112 | pg_phys_addr += cur_pages * PageSize; | 1872 | // Reset the current tracking address, and make sure we clean up on failure. |
| 1113 | pg_pages -= cur_pages; | 1873 | cur_address = address; |
| 1114 | } | ||
| 1115 | } | ||
| 1116 | |||
| 1117 | // Check if we're done. | ||
| 1118 | if (last_map_address <= info.GetLastAddress()) { | ||
| 1119 | break; | ||
| 1120 | } | ||
| 1121 | |||
| 1122 | // Advance. | ||
| 1123 | ++it; | ||
| 1124 | } | ||
| 1125 | } | ||
| 1126 | }); | ||
| 1127 | 1874 | ||
| 1128 | // Iterate over the memory, unmapping as we go. | 1875 | // Iterate over the memory, unmapping as we go. |
| 1129 | auto it = m_memory_block_manager.FindIterator(cur_address); | 1876 | auto it = m_memory_block_manager.FindIterator(cur_address); |
| @@ -1141,8 +1888,12 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 1141 | last_address + 1 - cur_address) / | 1888 | last_address + 1 - cur_address) / |
| 1142 | PageSize; | 1889 | PageSize; |
| 1143 | 1890 | ||
| 1891 | // HACK: Manually close the pages. | ||
| 1892 | HACK_ClosePages(cur_address, cur_pages); | ||
| 1893 | |||
| 1144 | // Unmap. | 1894 | // Unmap. |
| 1145 | R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)); | 1895 | ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap) |
| 1896 | .IsSuccess()); | ||
| 1146 | } | 1897 | } |
| 1147 | 1898 | ||
| 1148 | // Check if we're done. | 1899 | // Check if we're done. |
| @@ -1157,8 +1908,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 1157 | 1908 | ||
| 1158 | // Release the memory resource. | 1909 | // Release the memory resource. |
| 1159 | m_mapped_physical_memory_size -= mapped_size; | 1910 | m_mapped_physical_memory_size -= mapped_size; |
| 1160 | auto process{m_system.Kernel().CurrentProcess()}; | 1911 | m_resource_limit->Release(LimitableResource::PhysicalMemory, mapped_size); |
| 1161 | process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); | ||
| 1162 | 1912 | ||
| 1163 | // Update memory blocks. | 1913 | // Update memory blocks. |
| 1164 | m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, | 1914 | m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, |
| @@ -1166,14 +1916,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { | |||
| 1166 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, | 1916 | KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, |
| 1167 | KMemoryBlockDisableMergeAttribute::None); | 1917 | KMemoryBlockDisableMergeAttribute::None); |
| 1168 | 1918 | ||
| 1169 | // TODO(bunnei): This is a workaround until the next set of changes, where we add reference | ||
| 1170 | // counting for mapped pages. Until then, we must manually close the reference to the page | ||
| 1171 | // group. | ||
| 1172 | m_system.Kernel().MemoryManager().Close(pg); | ||
| 1173 | |||
| 1174 | // We succeeded. | 1919 | // We succeeded. |
| 1175 | remap_guard.Cancel(); | ||
| 1176 | |||
| 1177 | R_SUCCEED(); | 1920 | R_SUCCEED(); |
| 1178 | } | 1921 | } |
| 1179 | 1922 | ||
| @@ -1749,8 +2492,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) { | |||
| 1749 | OperationType::Unmap)); | 2492 | OperationType::Unmap)); |
| 1750 | 2493 | ||
| 1751 | // Release the memory from the resource limit. | 2494 | // Release the memory from the resource limit. |
| 1752 | m_system.Kernel().CurrentProcess()->GetResourceLimit()->Release( | 2495 | m_resource_limit->Release(LimitableResource::PhysicalMemory, num_pages * PageSize); |
| 1753 | LimitableResource::PhysicalMemory, num_pages * PageSize); | ||
| 1754 | 2496 | ||
| 1755 | // Apply the memory block update. | 2497 | // Apply the memory block update. |
| 1756 | m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, | 2498 | m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, |
| @@ -1780,8 +2522,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) { | |||
| 1780 | 2522 | ||
| 1781 | // Reserve memory for the heap extension. | 2523 | // Reserve memory for the heap extension. |
| 1782 | KScopedResourceReservation memory_reservation( | 2524 | KScopedResourceReservation memory_reservation( |
| 1783 | m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, | 2525 | m_resource_limit, LimitableResource::PhysicalMemory, allocation_size); |
| 1784 | allocation_size); | ||
| 1785 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); | 2526 | R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); |
| 1786 | 2527 | ||
| 1787 | // Allocate pages for the heap extension. | 2528 | // Allocate pages for the heap extension. |
| @@ -1869,7 +2610,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_ | |||
| 1869 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); | 2610 | R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); |
| 1870 | } else { | 2611 | } else { |
| 1871 | KPageGroup page_group; | 2612 | KPageGroup page_group; |
| 1872 | R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( | 2613 | R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess( |
| 1873 | &page_group, needed_num_pages, | 2614 | &page_group, needed_num_pages, |
| 1874 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); | 2615 | KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); |
| 1875 | R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); | 2616 | R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); |
| @@ -1883,8 +2624,9 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_ | |||
| 1883 | return addr; | 2624 | return addr; |
| 1884 | } | 2625 | } |
| 1885 | 2626 | ||
| 1886 | Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, | 2627 | Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, |
| 1887 | bool is_aligned) { | 2628 | KMemoryPermission perm, bool is_aligned, |
| 2629 | bool check_heap) { | ||
| 1888 | // Lightly validate the range before doing anything else. | 2630 | // Lightly validate the range before doing anything else. |
| 1889 | const size_t num_pages = size / PageSize; | 2631 | const size_t num_pages = size / PageSize; |
| 1890 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | 2632 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); |
| @@ -1894,15 +2636,18 @@ Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMem | |||
| 1894 | 2636 | ||
| 1895 | // Check the memory state. | 2637 | // Check the memory state. |
| 1896 | const auto test_state = | 2638 | const auto test_state = |
| 1897 | (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap); | 2639 | (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) | |
| 2640 | (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None); | ||
| 1898 | size_t num_allocator_blocks; | 2641 | size_t num_allocator_blocks; |
| 1899 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state, | 2642 | KMemoryState old_state; |
| 2643 | R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr, | ||
| 2644 | std::addressof(num_allocator_blocks), address, size, test_state, | ||
| 1900 | test_state, perm, perm, | 2645 | test_state, perm, perm, |
| 1901 | KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, | 2646 | KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, |
| 1902 | KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); | 2647 | KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); |
| 1903 | 2648 | ||
| 1904 | // Create an update allocator. | 2649 | // Create an update allocator. |
| 1905 | Result allocator_result{ResultSuccess}; | 2650 | Result allocator_result; |
| 1906 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 2651 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1907 | m_memory_block_slab_manager, num_allocator_blocks); | 2652 | m_memory_block_slab_manager, num_allocator_blocks); |
| 1908 | R_TRY(allocator_result); | 2653 | R_TRY(allocator_result); |
| @@ -1911,10 +2656,13 @@ Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMem | |||
| 1911 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, | 2656 | m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, |
| 1912 | &KMemoryBlock::ShareToDevice, KMemoryPermission::None); | 2657 | &KMemoryBlock::ShareToDevice, KMemoryPermission::None); |
| 1913 | 2658 | ||
| 2659 | // Set whether the locked memory was io. | ||
| 2660 | *out_is_io = old_state == KMemoryState::Io; | ||
| 2661 | |||
| 1914 | R_SUCCEED(); | 2662 | R_SUCCEED(); |
| 1915 | } | 2663 | } |
| 1916 | 2664 | ||
| 1917 | Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) { | 2665 | Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap) { |
| 1918 | // Lightly validate the range before doing anything else. | 2666 | // Lightly validate the range before doing anything else. |
| 1919 | const size_t num_pages = size / PageSize; | 2667 | const size_t num_pages = size / PageSize; |
| 1920 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | 2668 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); |
| @@ -1923,16 +2671,16 @@ Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) { | |||
| 1923 | KScopedLightLock lk(m_general_lock); | 2671 | KScopedLightLock lk(m_general_lock); |
| 1924 | 2672 | ||
| 1925 | // Check the memory state. | 2673 | // Check the memory state. |
| 2674 | const auto test_state = KMemoryState::FlagCanDeviceMap | | ||
| 2675 | (check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None); | ||
| 1926 | size_t num_allocator_blocks; | 2676 | size_t num_allocator_blocks; |
| 1927 | R_TRY(this->CheckMemoryStateContiguous( | 2677 | R_TRY(this->CheckMemoryStateContiguous( |
| 1928 | std::addressof(num_allocator_blocks), address, size, | 2678 | std::addressof(num_allocator_blocks), address, size, test_state, test_state, |
| 1929 | KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, | ||
| 1930 | KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, | ||
| 1931 | KMemoryPermission::None, KMemoryPermission::None, | 2679 | KMemoryPermission::None, KMemoryPermission::None, |
| 1932 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); | 2680 | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); |
| 1933 | 2681 | ||
| 1934 | // Create an update allocator. | 2682 | // Create an update allocator. |
| 1935 | Result allocator_result{ResultSuccess}; | 2683 | Result allocator_result; |
| 1936 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), | 2684 | KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), |
| 1937 | m_memory_block_slab_manager, num_allocator_blocks); | 2685 | m_memory_block_slab_manager, num_allocator_blocks); |
| 1938 | R_TRY(allocator_result); | 2686 | R_TRY(allocator_result); |
| @@ -1976,13 +2724,28 @@ Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) { | |||
| 1976 | R_SUCCEED(); | 2724 | R_SUCCEED(); |
| 1977 | } | 2725 | } |
| 1978 | 2726 | ||
| 2727 | Result KPageTable::LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size) { | ||
| 2728 | R_RETURN(this->LockMemoryAndOpen( | ||
| 2729 | nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer, | ||
| 2730 | KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All, | ||
| 2731 | KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None, | ||
| 2732 | KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite, | ||
| 2733 | KMemoryAttribute::Locked)); | ||
| 2734 | } | ||
| 2735 | |||
| 2736 | Result KPageTable::UnlockForIpcUserBuffer(VAddr address, size_t size) { | ||
| 2737 | R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer, | ||
| 2738 | KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None, | ||
| 2739 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 2740 | KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, | ||
| 2741 | KMemoryAttribute::Locked, nullptr)); | ||
| 2742 | } | ||
| 2743 | |||
| 1979 | Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) { | 2744 | Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) { |
| 1980 | R_RETURN(this->LockMemoryAndOpen( | 2745 | R_RETURN(this->LockMemoryAndOpen( |
| 1981 | out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, | 2746 | out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, |
| 1982 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, | 2747 | KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, |
| 1983 | KMemoryAttribute::None, | 2748 | KMemoryAttribute::None, KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite, |
| 1984 | static_cast<KMemoryPermission>(KMemoryPermission::NotMapped | | ||
| 1985 | KMemoryPermission::KernelReadWrite), | ||
| 1986 | KMemoryAttribute::Locked)); | 2749 | KMemoryAttribute::Locked)); |
| 1987 | } | 2750 | } |
| 1988 | 2751 | ||
| @@ -2066,6 +2829,10 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, | |||
| 2066 | m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); | 2829 | m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); |
| 2067 | break; | 2830 | break; |
| 2068 | } | 2831 | } |
| 2832 | case OperationType::Separate: { | ||
| 2833 | // HACK: Unimplemented. | ||
| 2834 | break; | ||
| 2835 | } | ||
| 2069 | case OperationType::ChangePermissions: | 2836 | case OperationType::ChangePermissions: |
| 2070 | case OperationType::ChangePermissionsAndRefresh: | 2837 | case OperationType::ChangePermissionsAndRefresh: |
| 2071 | break; | 2838 | break; |
| @@ -2075,6 +2842,17 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, | |||
| 2075 | R_SUCCEED(); | 2842 | R_SUCCEED(); |
| 2076 | } | 2843 | } |
| 2077 | 2844 | ||
| 2845 | void KPageTable::FinalizeUpdate(PageLinkedList* page_list) { | ||
| 2846 | while (page_list->Peek()) { | ||
| 2847 | [[maybe_unused]] auto page = page_list->Pop(); | ||
| 2848 | |||
| 2849 | // TODO(bunnei): Free pages once they are allocated in guest memory | ||
| 2850 | // ASSERT(this->GetPageTableManager().IsInPageTableHeap(page)); | ||
| 2851 | // ASSERT(this->GetPageTableManager().GetRefCount(page) == 0); | ||
| 2852 | // this->GetPageTableManager().Free(page); | ||
| 2853 | } | ||
| 2854 | } | ||
| 2855 | |||
| 2078 | VAddr KPageTable::GetRegionAddress(KMemoryState state) const { | 2856 | VAddr KPageTable::GetRegionAddress(KMemoryState state) const { |
| 2079 | switch (state) { | 2857 | switch (state) { |
| 2080 | case KMemoryState::Free: | 2858 | case KMemoryState::Free: |
| @@ -2101,6 +2879,7 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const { | |||
| 2101 | case KMemoryState::GeneratedCode: | 2879 | case KMemoryState::GeneratedCode: |
| 2102 | case KMemoryState::CodeOut: | 2880 | case KMemoryState::CodeOut: |
| 2103 | case KMemoryState::Coverage: | 2881 | case KMemoryState::Coverage: |
| 2882 | case KMemoryState::Insecure: | ||
| 2104 | return m_alias_code_region_start; | 2883 | return m_alias_code_region_start; |
| 2105 | case KMemoryState::Code: | 2884 | case KMemoryState::Code: |
| 2106 | case KMemoryState::CodeData: | 2885 | case KMemoryState::CodeData: |
| @@ -2136,6 +2915,7 @@ size_t KPageTable::GetRegionSize(KMemoryState state) const { | |||
| 2136 | case KMemoryState::GeneratedCode: | 2915 | case KMemoryState::GeneratedCode: |
| 2137 | case KMemoryState::CodeOut: | 2916 | case KMemoryState::CodeOut: |
| 2138 | case KMemoryState::Coverage: | 2917 | case KMemoryState::Coverage: |
| 2918 | case KMemoryState::Insecure: | ||
| 2139 | return m_alias_code_region_end - m_alias_code_region_start; | 2919 | return m_alias_code_region_end - m_alias_code_region_start; |
| 2140 | case KMemoryState::Code: | 2920 | case KMemoryState::Code: |
| 2141 | case KMemoryState::CodeData: | 2921 | case KMemoryState::CodeData: |
| @@ -2177,6 +2957,7 @@ bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const { | |||
| 2177 | case KMemoryState::GeneratedCode: | 2957 | case KMemoryState::GeneratedCode: |
| 2178 | case KMemoryState::CodeOut: | 2958 | case KMemoryState::CodeOut: |
| 2179 | case KMemoryState::Coverage: | 2959 | case KMemoryState::Coverage: |
| 2960 | case KMemoryState::Insecure: | ||
| 2180 | return is_in_region && !is_in_heap && !is_in_alias; | 2961 | return is_in_region && !is_in_heap && !is_in_alias; |
| 2181 | case KMemoryState::Normal: | 2962 | case KMemoryState::Normal: |
| 2182 | ASSERT(is_in_heap); | 2963 | ASSERT(is_in_heap); |
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index c6aeacd96..950850291 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include "core/hle/kernel/k_memory_layout.h" | 16 | #include "core/hle/kernel/k_memory_layout.h" |
| 17 | #include "core/hle/kernel/k_memory_manager.h" | 17 | #include "core/hle/kernel/k_memory_manager.h" |
| 18 | #include "core/hle/result.h" | 18 | #include "core/hle/result.h" |
| 19 | #include "core/memory.h" | ||
| 19 | 20 | ||
| 20 | namespace Core { | 21 | namespace Core { |
| 21 | class System; | 22 | class System; |
| @@ -23,7 +24,10 @@ class System; | |||
| 23 | 24 | ||
| 24 | namespace Kernel { | 25 | namespace Kernel { |
| 25 | 26 | ||
| 27 | class KBlockInfoManager; | ||
| 26 | class KMemoryBlockManager; | 28 | class KMemoryBlockManager; |
| 29 | class KResourceLimit; | ||
| 30 | class KSystemResource; | ||
| 27 | 31 | ||
| 28 | class KPageTable final { | 32 | class KPageTable final { |
| 29 | public: | 33 | public: |
| @@ -36,9 +40,9 @@ public: | |||
| 36 | ~KPageTable(); | 40 | ~KPageTable(); |
| 37 | 41 | ||
| 38 | Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, | 42 | Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, |
| 39 | VAddr code_addr, size_t code_size, | 43 | bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, |
| 40 | KMemoryBlockSlabManager* mem_block_slab_manager, | 44 | VAddr code_addr, size_t code_size, KSystemResource* system_resource, |
| 41 | KMemoryManager::Pool pool); | 45 | KResourceLimit* resource_limit); |
| 42 | 46 | ||
| 43 | void Finalize(); | 47 | void Finalize(); |
| 44 | 48 | ||
| @@ -74,12 +78,20 @@ public: | |||
| 74 | KMemoryState state, KMemoryPermission perm, | 78 | KMemoryState state, KMemoryPermission perm, |
| 75 | PAddr map_addr = 0); | 79 | PAddr map_addr = 0); |
| 76 | 80 | ||
| 77 | Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, | 81 | Result LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size, |
| 78 | bool is_aligned); | 82 | KMemoryPermission perm, bool is_aligned, bool check_heap); |
| 79 | Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size); | 83 | Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap); |
| 80 | 84 | ||
| 81 | Result UnlockForDeviceAddressSpace(VAddr addr, size_t size); | 85 | Result UnlockForDeviceAddressSpace(VAddr addr, size_t size); |
| 82 | 86 | ||
| 87 | Result LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size); | ||
| 88 | Result UnlockForIpcUserBuffer(VAddr address, size_t size); | ||
| 89 | |||
| 90 | Result SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr, KPageTable& src_page_table, | ||
| 91 | KMemoryPermission test_perm, KMemoryState dst_state, bool send); | ||
| 92 | Result CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state); | ||
| 93 | Result CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state); | ||
| 94 | |||
| 83 | Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size); | 95 | Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size); |
| 84 | Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg); | 96 | Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg); |
| 85 | Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, | 97 | Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, |
| @@ -97,13 +109,54 @@ public: | |||
| 97 | 109 | ||
| 98 | bool CanContain(VAddr addr, size_t size, KMemoryState state) const; | 110 | bool CanContain(VAddr addr, size_t size, KMemoryState state) const; |
| 99 | 111 | ||
| 112 | protected: | ||
| 113 | struct PageLinkedList { | ||
| 114 | private: | ||
| 115 | struct Node { | ||
| 116 | Node* m_next; | ||
| 117 | std::array<u8, PageSize - sizeof(Node*)> m_buffer; | ||
| 118 | }; | ||
| 119 | |||
| 120 | public: | ||
| 121 | constexpr PageLinkedList() = default; | ||
| 122 | |||
| 123 | void Push(Node* n) { | ||
| 124 | ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize)); | ||
| 125 | n->m_next = m_root; | ||
| 126 | m_root = n; | ||
| 127 | } | ||
| 128 | |||
| 129 | void Push(Core::Memory::Memory& memory, VAddr addr) { | ||
| 130 | this->Push(memory.GetPointer<Node>(addr)); | ||
| 131 | } | ||
| 132 | |||
| 133 | Node* Peek() const { | ||
| 134 | return m_root; | ||
| 135 | } | ||
| 136 | |||
| 137 | Node* Pop() { | ||
| 138 | Node* const r = m_root; | ||
| 139 | |||
| 140 | m_root = r->m_next; | ||
| 141 | r->m_next = nullptr; | ||
| 142 | |||
| 143 | return r; | ||
| 144 | } | ||
| 145 | |||
| 146 | private: | ||
| 147 | Node* m_root{}; | ||
| 148 | }; | ||
| 149 | static_assert(std::is_trivially_destructible<PageLinkedList>::value); | ||
| 150 | |||
| 100 | private: | 151 | private: |
| 101 | enum class OperationType : u32 { | 152 | enum class OperationType : u32 { |
| 102 | Map, | 153 | Map = 0, |
| 103 | MapGroup, | 154 | MapFirst = 1, |
| 104 | Unmap, | 155 | MapGroup = 2, |
| 105 | ChangePermissions, | 156 | Unmap = 3, |
| 106 | ChangePermissionsAndRefresh, | 157 | ChangePermissions = 4, |
| 158 | ChangePermissionsAndRefresh = 5, | ||
| 159 | Separate = 6, | ||
| 107 | }; | 160 | }; |
| 108 | 161 | ||
| 109 | static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = | 162 | static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = |
| @@ -123,6 +176,7 @@ private: | |||
| 123 | OperationType operation); | 176 | OperationType operation); |
| 124 | Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, | 177 | Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, |
| 125 | PAddr map_addr = 0); | 178 | PAddr map_addr = 0); |
| 179 | void FinalizeUpdate(PageLinkedList* page_list); | ||
| 126 | VAddr GetRegionAddress(KMemoryState state) const; | 180 | VAddr GetRegionAddress(KMemoryState state) const; |
| 127 | size_t GetRegionSize(KMemoryState state) const; | 181 | size_t GetRegionSize(KMemoryState state) const; |
| 128 | 182 | ||
| @@ -199,6 +253,18 @@ private: | |||
| 199 | return *out != 0; | 253 | return *out != 0; |
| 200 | } | 254 | } |
| 201 | 255 | ||
| 256 | Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, VAddr address, | ||
| 257 | size_t size, KMemoryPermission test_perm, KMemoryState dst_state); | ||
| 258 | Result SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr, | ||
| 259 | KMemoryPermission test_perm, KMemoryState dst_state, | ||
| 260 | KPageTable& src_page_table, bool send); | ||
| 261 | void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address, | ||
| 262 | size_t size, KMemoryPermission prot_perm); | ||
| 263 | |||
| 264 | // HACK: These will be removed once we automatically manage page reference counts. | ||
| 265 | void HACK_OpenPages(PAddr phys_addr, size_t num_pages); | ||
| 266 | void HACK_ClosePages(VAddr virt_addr, size_t num_pages); | ||
| 267 | |||
| 202 | mutable KLightLock m_general_lock; | 268 | mutable KLightLock m_general_lock; |
| 203 | mutable KLightLock m_map_physical_memory_lock; | 269 | mutable KLightLock m_map_physical_memory_lock; |
| 204 | 270 | ||
| @@ -316,6 +382,31 @@ public: | |||
| 316 | addr + size - 1 <= m_address_space_end - 1; | 382 | addr + size - 1 <= m_address_space_end - 1; |
| 317 | } | 383 | } |
| 318 | 384 | ||
| 385 | public: | ||
| 386 | static VAddr GetLinearMappedVirtualAddress(const KMemoryLayout& layout, PAddr addr) { | ||
| 387 | return layout.GetLinearVirtualAddress(addr); | ||
| 388 | } | ||
| 389 | |||
| 390 | static PAddr GetLinearMappedPhysicalAddress(const KMemoryLayout& layout, VAddr addr) { | ||
| 391 | return layout.GetLinearPhysicalAddress(addr); | ||
| 392 | } | ||
| 393 | |||
| 394 | static VAddr GetHeapVirtualAddress(const KMemoryLayout& layout, PAddr addr) { | ||
| 395 | return GetLinearMappedVirtualAddress(layout, addr); | ||
| 396 | } | ||
| 397 | |||
| 398 | static PAddr GetHeapPhysicalAddress(const KMemoryLayout& layout, VAddr addr) { | ||
| 399 | return GetLinearMappedPhysicalAddress(layout, addr); | ||
| 400 | } | ||
| 401 | |||
| 402 | static VAddr GetPageTableVirtualAddress(const KMemoryLayout& layout, PAddr addr) { | ||
| 403 | return GetLinearMappedVirtualAddress(layout, addr); | ||
| 404 | } | ||
| 405 | |||
| 406 | static PAddr GetPageTablePhysicalAddress(const KMemoryLayout& layout, VAddr addr) { | ||
| 407 | return GetLinearMappedPhysicalAddress(layout, addr); | ||
| 408 | } | ||
| 409 | |||
| 319 | private: | 410 | private: |
| 320 | constexpr bool IsKernel() const { | 411 | constexpr bool IsKernel() const { |
| 321 | return m_is_kernel; | 412 | return m_is_kernel; |
| @@ -331,6 +422,24 @@ private: | |||
| 331 | } | 422 | } |
| 332 | 423 | ||
| 333 | private: | 424 | private: |
| 425 | class KScopedPageTableUpdater { | ||
| 426 | private: | ||
| 427 | KPageTable* m_pt{}; | ||
| 428 | PageLinkedList m_ll; | ||
| 429 | |||
| 430 | public: | ||
| 431 | explicit KScopedPageTableUpdater(KPageTable* pt) : m_pt(pt) {} | ||
| 432 | explicit KScopedPageTableUpdater(KPageTable& pt) : KScopedPageTableUpdater(&pt) {} | ||
| 433 | ~KScopedPageTableUpdater() { | ||
| 434 | m_pt->FinalizeUpdate(this->GetPageList()); | ||
| 435 | } | ||
| 436 | |||
| 437 | PageLinkedList* GetPageList() { | ||
| 438 | return &m_ll; | ||
| 439 | } | ||
| 440 | }; | ||
| 441 | |||
| 442 | private: | ||
| 334 | VAddr m_address_space_start{}; | 443 | VAddr m_address_space_start{}; |
| 335 | VAddr m_address_space_end{}; | 444 | VAddr m_address_space_end{}; |
| 336 | VAddr m_heap_region_start{}; | 445 | VAddr m_heap_region_start{}; |
| @@ -347,20 +456,27 @@ private: | |||
| 347 | VAddr m_alias_code_region_start{}; | 456 | VAddr m_alias_code_region_start{}; |
| 348 | VAddr m_alias_code_region_end{}; | 457 | VAddr m_alias_code_region_end{}; |
| 349 | 458 | ||
| 350 | size_t m_mapped_physical_memory_size{}; | ||
| 351 | size_t m_max_heap_size{}; | 459 | size_t m_max_heap_size{}; |
| 352 | size_t m_max_physical_memory_size{}; | 460 | size_t m_mapped_physical_memory_size{}; |
| 461 | size_t m_mapped_unsafe_physical_memory{}; | ||
| 462 | size_t m_mapped_insecure_memory{}; | ||
| 463 | size_t m_mapped_ipc_server_memory{}; | ||
| 353 | size_t m_address_space_width{}; | 464 | size_t m_address_space_width{}; |
| 354 | 465 | ||
| 355 | KMemoryBlockManager m_memory_block_manager; | 466 | KMemoryBlockManager m_memory_block_manager; |
| 467 | u32 m_allocate_option{}; | ||
| 356 | 468 | ||
| 357 | bool m_is_kernel{}; | 469 | bool m_is_kernel{}; |
| 358 | bool m_enable_aslr{}; | 470 | bool m_enable_aslr{}; |
| 359 | bool m_enable_device_address_space_merge{}; | 471 | bool m_enable_device_address_space_merge{}; |
| 360 | 472 | ||
| 361 | KMemoryBlockSlabManager* m_memory_block_slab_manager{}; | 473 | KMemoryBlockSlabManager* m_memory_block_slab_manager{}; |
| 474 | KBlockInfoManager* m_block_info_manager{}; | ||
| 475 | KResourceLimit* m_resource_limit{}; | ||
| 362 | 476 | ||
| 363 | u32 m_heap_fill_value{}; | 477 | u32 m_heap_fill_value{}; |
| 478 | u32 m_ipc_fill_value{}; | ||
| 479 | u32 m_stack_fill_value{}; | ||
| 364 | const KMemoryRegion* m_cached_physical_heap_region{}; | 480 | const KMemoryRegion* m_cached_physical_heap_region{}; |
| 365 | 481 | ||
| 366 | KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; | 482 | KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; |
diff --git a/src/core/hle/kernel/k_page_table_manager.h b/src/core/hle/kernel/k_page_table_manager.h new file mode 100644 index 000000000..91a45cde3 --- /dev/null +++ b/src/core/hle/kernel/k_page_table_manager.h | |||
| @@ -0,0 +1,55 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <atomic> | ||
| 7 | |||
| 8 | #include "common/common_types.h" | ||
| 9 | #include "core/hle/kernel/k_dynamic_resource_manager.h" | ||
| 10 | #include "core/hle/kernel/k_page_table_slab_heap.h" | ||
| 11 | |||
| 12 | namespace Kernel { | ||
| 13 | |||
| 14 | class KPageTableManager : public KDynamicResourceManager<impl::PageTablePage, true> { | ||
| 15 | public: | ||
| 16 | using RefCount = KPageTableSlabHeap::RefCount; | ||
| 17 | static constexpr size_t PageTableSize = KPageTableSlabHeap::PageTableSize; | ||
| 18 | |||
| 19 | public: | ||
| 20 | KPageTableManager() = default; | ||
| 21 | |||
| 22 | void Initialize(KDynamicPageManager* page_allocator, KPageTableSlabHeap* pt_heap) { | ||
| 23 | m_pt_heap = pt_heap; | ||
| 24 | |||
| 25 | static_assert(std::derived_from<KPageTableSlabHeap, DynamicSlabType>); | ||
| 26 | BaseHeap::Initialize(page_allocator, pt_heap); | ||
| 27 | } | ||
| 28 | |||
| 29 | VAddr Allocate() { | ||
| 30 | return VAddr(BaseHeap::Allocate()); | ||
| 31 | } | ||
| 32 | |||
| 33 | RefCount GetRefCount(VAddr addr) const { | ||
| 34 | return m_pt_heap->GetRefCount(addr); | ||
| 35 | } | ||
| 36 | |||
| 37 | void Open(VAddr addr, int count) { | ||
| 38 | return m_pt_heap->Open(addr, count); | ||
| 39 | } | ||
| 40 | |||
| 41 | bool Close(VAddr addr, int count) { | ||
| 42 | return m_pt_heap->Close(addr, count); | ||
| 43 | } | ||
| 44 | |||
| 45 | bool IsInPageTableHeap(VAddr addr) const { | ||
| 46 | return m_pt_heap->IsInRange(addr); | ||
| 47 | } | ||
| 48 | |||
| 49 | private: | ||
| 50 | using BaseHeap = KDynamicResourceManager<impl::PageTablePage, true>; | ||
| 51 | |||
| 52 | KPageTableSlabHeap* m_pt_heap{}; | ||
| 53 | }; | ||
| 54 | |||
| 55 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_page_table_slab_heap.h b/src/core/hle/kernel/k_page_table_slab_heap.h new file mode 100644 index 000000000..a9543cbd0 --- /dev/null +++ b/src/core/hle/kernel/k_page_table_slab_heap.h | |||
| @@ -0,0 +1,93 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <array> | ||
| 7 | #include <vector> | ||
| 8 | |||
| 9 | #include "common/common_types.h" | ||
| 10 | #include "core/hle/kernel/k_dynamic_slab_heap.h" | ||
| 11 | #include "core/hle/kernel/slab_helpers.h" | ||
| 12 | |||
| 13 | namespace Kernel { | ||
| 14 | |||
| 15 | namespace impl { | ||
| 16 | |||
| 17 | class PageTablePage { | ||
| 18 | public: | ||
| 19 | // Do not initialize anything. | ||
| 20 | PageTablePage() = default; | ||
| 21 | |||
| 22 | private: | ||
| 23 | std::array<u8, PageSize> m_buffer{}; | ||
| 24 | }; | ||
| 25 | static_assert(sizeof(PageTablePage) == PageSize); | ||
| 26 | |||
| 27 | } // namespace impl | ||
| 28 | |||
| 29 | class KPageTableSlabHeap : public KDynamicSlabHeap<impl::PageTablePage, true> { | ||
| 30 | public: | ||
| 31 | using RefCount = u16; | ||
| 32 | static constexpr size_t PageTableSize = sizeof(impl::PageTablePage); | ||
| 33 | static_assert(PageTableSize == PageSize); | ||
| 34 | |||
| 35 | public: | ||
| 36 | KPageTableSlabHeap() = default; | ||
| 37 | |||
| 38 | static constexpr size_t CalculateReferenceCountSize(size_t size) { | ||
| 39 | return (size / PageSize) * sizeof(RefCount); | ||
| 40 | } | ||
| 41 | |||
| 42 | void Initialize(KDynamicPageManager* page_allocator, size_t object_count, RefCount* rc) { | ||
| 43 | BaseHeap::Initialize(page_allocator, object_count); | ||
| 44 | this->Initialize(rc); | ||
| 45 | } | ||
| 46 | |||
| 47 | RefCount GetRefCount(VAddr addr) { | ||
| 48 | ASSERT(this->IsInRange(addr)); | ||
| 49 | return *this->GetRefCountPointer(addr); | ||
| 50 | } | ||
| 51 | |||
| 52 | void Open(VAddr addr, int count) { | ||
| 53 | ASSERT(this->IsInRange(addr)); | ||
| 54 | |||
| 55 | *this->GetRefCountPointer(addr) += static_cast<RefCount>(count); | ||
| 56 | |||
| 57 | ASSERT(this->GetRefCount(addr) > 0); | ||
| 58 | } | ||
| 59 | |||
| 60 | bool Close(VAddr addr, int count) { | ||
| 61 | ASSERT(this->IsInRange(addr)); | ||
| 62 | ASSERT(this->GetRefCount(addr) >= count); | ||
| 63 | |||
| 64 | *this->GetRefCountPointer(addr) -= static_cast<RefCount>(count); | ||
| 65 | return this->GetRefCount(addr) == 0; | ||
| 66 | } | ||
| 67 | |||
| 68 | bool IsInPageTableHeap(VAddr addr) const { | ||
| 69 | return this->IsInRange(addr); | ||
| 70 | } | ||
| 71 | |||
| 72 | private: | ||
| 73 | void Initialize([[maybe_unused]] RefCount* rc) { | ||
| 74 | // TODO(bunnei): Use rc once we support kernel virtual memory allocations. | ||
| 75 | const auto count = this->GetSize() / PageSize; | ||
| 76 | m_ref_counts.resize(count); | ||
| 77 | |||
| 78 | for (size_t i = 0; i < count; i++) { | ||
| 79 | m_ref_counts[i] = 0; | ||
| 80 | } | ||
| 81 | } | ||
| 82 | |||
| 83 | RefCount* GetRefCountPointer(VAddr addr) { | ||
| 84 | return m_ref_counts.data() + ((addr - this->GetAddress()) / PageSize); | ||
| 85 | } | ||
| 86 | |||
| 87 | private: | ||
| 88 | using BaseHeap = KDynamicSlabHeap<impl::PageTablePage, true>; | ||
| 89 | |||
| 90 | std::vector<RefCount> m_ref_counts; | ||
| 91 | }; | ||
| 92 | |||
| 93 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 8c3495e5a..4ddeea73b 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp | |||
| @@ -358,8 +358,8 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: | |||
| 358 | } | 358 | } |
| 359 | // Initialize proces address space | 359 | // Initialize proces address space |
| 360 | if (const Result result{page_table.InitializeForProcess( | 360 | if (const Result result{page_table.InitializeForProcess( |
| 361 | metadata.GetAddressSpaceType(), false, 0x8000000, code_size, | 361 | metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application, |
| 362 | &kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)}; | 362 | 0x8000000, code_size, &kernel.GetSystemSystemResource(), resource_limit)}; |
| 363 | result.IsError()) { | 363 | result.IsError()) { |
| 364 | R_RETURN(result); | 364 | R_RETURN(result); |
| 365 | } | 365 | } |
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index b1cabbca0..d6676904b 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -384,7 +384,8 @@ void KScheduler::SwitchThread(KThread* next_thread) { | |||
| 384 | 384 | ||
| 385 | void KScheduler::ScheduleImpl() { | 385 | void KScheduler::ScheduleImpl() { |
| 386 | // First, clear the needs scheduling bool. | 386 | // First, clear the needs scheduling bool. |
| 387 | m_state.needs_scheduling.store(false, std::memory_order_seq_cst); | 387 | m_state.needs_scheduling.store(false, std::memory_order_relaxed); |
| 388 | std::atomic_thread_fence(std::memory_order_seq_cst); | ||
| 388 | 389 | ||
| 389 | // Load the appropriate thread pointers for scheduling. | 390 | // Load the appropriate thread pointers for scheduling. |
| 390 | KThread* const cur_thread{GetCurrentThreadPointer(kernel)}; | 391 | KThread* const cur_thread{GetCurrentThreadPointer(kernel)}; |
| @@ -400,7 +401,8 @@ void KScheduler::ScheduleImpl() { | |||
| 400 | // If there aren't, we want to check if the highest priority thread is the same as the current | 401 | // If there aren't, we want to check if the highest priority thread is the same as the current |
| 401 | // thread. | 402 | // thread. |
| 402 | if (highest_priority_thread == cur_thread) { | 403 | if (highest_priority_thread == cur_thread) { |
| 403 | // If they're the same, then we can just return. | 404 | // If they're the same, then we can just issue a memory barrier and return. |
| 405 | std::atomic_thread_fence(std::memory_order_seq_cst); | ||
| 404 | return; | 406 | return; |
| 405 | } | 407 | } |
| 406 | 408 | ||
| @@ -476,7 +478,8 @@ void KScheduler::ScheduleImplFiber() { | |||
| 476 | 478 | ||
| 477 | // We failed to successfully do the context switch, and need to retry. | 479 | // We failed to successfully do the context switch, and need to retry. |
| 478 | // Clear needs_scheduling. | 480 | // Clear needs_scheduling. |
| 479 | m_state.needs_scheduling.store(false, std::memory_order_seq_cst); | 481 | m_state.needs_scheduling.store(false, std::memory_order_relaxed); |
| 482 | std::atomic_thread_fence(std::memory_order_seq_cst); | ||
| 480 | 483 | ||
| 481 | // Refresh the highest priority thread. | 484 | // Refresh the highest priority thread. |
| 482 | highest_priority_thread = m_state.highest_priority_thread; | 485 | highest_priority_thread = m_state.highest_priority_thread; |
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index 73314b45e..129d60472 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h | |||
| @@ -60,6 +60,9 @@ public: | |||
| 60 | 60 | ||
| 61 | // Release an instance of the lock. | 61 | // Release an instance of the lock. |
| 62 | if ((--lock_count) == 0) { | 62 | if ((--lock_count) == 0) { |
| 63 | // Perform a memory barrier here. | ||
| 64 | std::atomic_thread_fence(std::memory_order_seq_cst); | ||
| 65 | |||
| 63 | // We're no longer going to hold the lock. Take note of what cores need scheduling. | 66 | // We're no longer going to hold the lock. Take note of what cores need scheduling. |
| 64 | const u64 cores_needing_scheduling = | 67 | const u64 cores_needing_scheduling = |
| 65 | SchedulerType::UpdateHighestPriorityThreads(kernel); | 68 | SchedulerType::UpdateHighestPriorityThreads(kernel); |
diff --git a/src/core/hle/kernel/k_system_resource.cpp b/src/core/hle/kernel/k_system_resource.cpp new file mode 100644 index 000000000..4cc377a6c --- /dev/null +++ b/src/core/hle/kernel/k_system_resource.cpp | |||
| @@ -0,0 +1,26 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include "core/hle/kernel/k_system_resource.h" | ||
| 5 | |||
| 6 | namespace Kernel { | ||
| 7 | |||
| 8 | Result KSecureSystemResource::Initialize([[maybe_unused]] size_t size, | ||
| 9 | [[maybe_unused]] KResourceLimit* resource_limit, | ||
| 10 | [[maybe_unused]] KMemoryManager::Pool pool) { | ||
| 11 | // Unimplemented | ||
| 12 | UNREACHABLE(); | ||
| 13 | } | ||
| 14 | |||
| 15 | void KSecureSystemResource::Finalize() { | ||
| 16 | // Unimplemented | ||
| 17 | UNREACHABLE(); | ||
| 18 | } | ||
| 19 | |||
| 20 | size_t KSecureSystemResource::CalculateRequiredSecureMemorySize( | ||
| 21 | [[maybe_unused]] size_t size, [[maybe_unused]] KMemoryManager::Pool pool) { | ||
| 22 | // Unimplemented | ||
| 23 | UNREACHABLE(); | ||
| 24 | } | ||
| 25 | |||
| 26 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_system_resource.h b/src/core/hle/kernel/k_system_resource.h new file mode 100644 index 000000000..9a991f725 --- /dev/null +++ b/src/core/hle/kernel/k_system_resource.h | |||
| @@ -0,0 +1,137 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include "common/assert.h" | ||
| 7 | #include "common/common_types.h" | ||
| 8 | #include "core/hle/kernel/k_auto_object.h" | ||
| 9 | #include "core/hle/kernel/k_dynamic_resource_manager.h" | ||
| 10 | #include "core/hle/kernel/k_memory_manager.h" | ||
| 11 | #include "core/hle/kernel/k_page_table_manager.h" | ||
| 12 | #include "core/hle/kernel/k_resource_limit.h" | ||
| 13 | #include "core/hle/kernel/slab_helpers.h" | ||
| 14 | |||
| 15 | namespace Kernel { | ||
| 16 | |||
| 17 | // NOTE: Nintendo's implementation does not have the "is_secure_resource" field, and instead uses | ||
| 18 | // virtual IsSecureResource(). | ||
| 19 | |||
| 20 | class KSystemResource : public KAutoObject { | ||
| 21 | KERNEL_AUTOOBJECT_TRAITS(KSystemResource, KAutoObject); | ||
| 22 | |||
| 23 | public: | ||
| 24 | explicit KSystemResource(KernelCore& kernel_) : KAutoObject(kernel_) {} | ||
| 25 | |||
| 26 | protected: | ||
| 27 | void SetSecureResource() { | ||
| 28 | m_is_secure_resource = true; | ||
| 29 | } | ||
| 30 | |||
| 31 | public: | ||
| 32 | virtual void Destroy() override { | ||
| 33 | UNREACHABLE_MSG("KSystemResource::Destroy() was called"); | ||
| 34 | } | ||
| 35 | |||
| 36 | bool IsSecureResource() const { | ||
| 37 | return m_is_secure_resource; | ||
| 38 | } | ||
| 39 | |||
| 40 | void SetManagers(KMemoryBlockSlabManager& mb, KBlockInfoManager& bi, KPageTableManager& pt) { | ||
| 41 | ASSERT(m_p_memory_block_slab_manager == nullptr); | ||
| 42 | ASSERT(m_p_block_info_manager == nullptr); | ||
| 43 | ASSERT(m_p_page_table_manager == nullptr); | ||
| 44 | |||
| 45 | m_p_memory_block_slab_manager = std::addressof(mb); | ||
| 46 | m_p_block_info_manager = std::addressof(bi); | ||
| 47 | m_p_page_table_manager = std::addressof(pt); | ||
| 48 | } | ||
| 49 | |||
| 50 | const KMemoryBlockSlabManager& GetMemoryBlockSlabManager() const { | ||
| 51 | return *m_p_memory_block_slab_manager; | ||
| 52 | } | ||
| 53 | const KBlockInfoManager& GetBlockInfoManager() const { | ||
| 54 | return *m_p_block_info_manager; | ||
| 55 | } | ||
| 56 | const KPageTableManager& GetPageTableManager() const { | ||
| 57 | return *m_p_page_table_manager; | ||
| 58 | } | ||
| 59 | |||
| 60 | KMemoryBlockSlabManager& GetMemoryBlockSlabManager() { | ||
| 61 | return *m_p_memory_block_slab_manager; | ||
| 62 | } | ||
| 63 | KBlockInfoManager& GetBlockInfoManager() { | ||
| 64 | return *m_p_block_info_manager; | ||
| 65 | } | ||
| 66 | KPageTableManager& GetPageTableManager() { | ||
| 67 | return *m_p_page_table_manager; | ||
| 68 | } | ||
| 69 | |||
| 70 | KMemoryBlockSlabManager* GetMemoryBlockSlabManagerPointer() { | ||
| 71 | return m_p_memory_block_slab_manager; | ||
| 72 | } | ||
| 73 | KBlockInfoManager* GetBlockInfoManagerPointer() { | ||
| 74 | return m_p_block_info_manager; | ||
| 75 | } | ||
| 76 | KPageTableManager* GetPageTableManagerPointer() { | ||
| 77 | return m_p_page_table_manager; | ||
| 78 | } | ||
| 79 | |||
| 80 | private: | ||
| 81 | KMemoryBlockSlabManager* m_p_memory_block_slab_manager{}; | ||
| 82 | KBlockInfoManager* m_p_block_info_manager{}; | ||
| 83 | KPageTableManager* m_p_page_table_manager{}; | ||
| 84 | bool m_is_secure_resource{false}; | ||
| 85 | }; | ||
| 86 | |||
| 87 | class KSecureSystemResource final | ||
| 88 | : public KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource> { | ||
| 89 | public: | ||
| 90 | explicit KSecureSystemResource(KernelCore& kernel_) | ||
| 91 | : KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource>(kernel_) { | ||
| 92 | // Mark ourselves as being a secure resource. | ||
| 93 | this->SetSecureResource(); | ||
| 94 | } | ||
| 95 | |||
| 96 | Result Initialize(size_t size, KResourceLimit* resource_limit, KMemoryManager::Pool pool); | ||
| 97 | void Finalize(); | ||
| 98 | |||
| 99 | bool IsInitialized() const { | ||
| 100 | return m_is_initialized; | ||
| 101 | } | ||
| 102 | static void PostDestroy([[maybe_unused]] uintptr_t arg) {} | ||
| 103 | |||
| 104 | size_t CalculateRequiredSecureMemorySize() const { | ||
| 105 | return CalculateRequiredSecureMemorySize(m_resource_size, m_resource_pool); | ||
| 106 | } | ||
| 107 | |||
| 108 | size_t GetSize() const { | ||
| 109 | return m_resource_size; | ||
| 110 | } | ||
| 111 | size_t GetUsedSize() const { | ||
| 112 | return m_dynamic_page_manager.GetUsed() * PageSize; | ||
| 113 | } | ||
| 114 | |||
| 115 | const KDynamicPageManager& GetDynamicPageManager() const { | ||
| 116 | return m_dynamic_page_manager; | ||
| 117 | } | ||
| 118 | |||
| 119 | public: | ||
| 120 | static size_t CalculateRequiredSecureMemorySize(size_t size, KMemoryManager::Pool pool); | ||
| 121 | |||
| 122 | private: | ||
| 123 | bool m_is_initialized{}; | ||
| 124 | KMemoryManager::Pool m_resource_pool{}; | ||
| 125 | KDynamicPageManager m_dynamic_page_manager; | ||
| 126 | KMemoryBlockSlabManager m_memory_block_slab_manager; | ||
| 127 | KBlockInfoManager m_block_info_manager; | ||
| 128 | KPageTableManager m_page_table_manager; | ||
| 129 | KMemoryBlockSlabHeap m_memory_block_heap; | ||
| 130 | KBlockInfoSlabHeap m_block_info_heap; | ||
| 131 | KPageTableSlabHeap m_page_table_heap; | ||
| 132 | KResourceLimit* m_resource_limit{}; | ||
| 133 | VAddr m_resource_address{}; | ||
| 134 | size_t m_resource_size{}; | ||
| 135 | }; | ||
| 136 | |||
| 137 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index d1892e078..7f800d860 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -28,10 +28,12 @@ | |||
| 28 | #include "core/hle/kernel/k_handle_table.h" | 28 | #include "core/hle/kernel/k_handle_table.h" |
| 29 | #include "core/hle/kernel/k_memory_layout.h" | 29 | #include "core/hle/kernel/k_memory_layout.h" |
| 30 | #include "core/hle/kernel/k_memory_manager.h" | 30 | #include "core/hle/kernel/k_memory_manager.h" |
| 31 | #include "core/hle/kernel/k_page_buffer.h" | ||
| 31 | #include "core/hle/kernel/k_process.h" | 32 | #include "core/hle/kernel/k_process.h" |
| 32 | #include "core/hle/kernel/k_resource_limit.h" | 33 | #include "core/hle/kernel/k_resource_limit.h" |
| 33 | #include "core/hle/kernel/k_scheduler.h" | 34 | #include "core/hle/kernel/k_scheduler.h" |
| 34 | #include "core/hle/kernel/k_shared_memory.h" | 35 | #include "core/hle/kernel/k_shared_memory.h" |
| 36 | #include "core/hle/kernel/k_system_resource.h" | ||
| 35 | #include "core/hle/kernel/k_thread.h" | 37 | #include "core/hle/kernel/k_thread.h" |
| 36 | #include "core/hle/kernel/k_worker_task_manager.h" | 38 | #include "core/hle/kernel/k_worker_task_manager.h" |
| 37 | #include "core/hle/kernel/kernel.h" | 39 | #include "core/hle/kernel/kernel.h" |
| @@ -47,6 +49,11 @@ MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70)); | |||
| 47 | namespace Kernel { | 49 | namespace Kernel { |
| 48 | 50 | ||
| 49 | struct KernelCore::Impl { | 51 | struct KernelCore::Impl { |
| 52 | static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000; | ||
| 53 | static constexpr size_t SystemMemoryBlockSlabHeapSize = 10000; | ||
| 54 | static constexpr size_t BlockInfoSlabHeapSize = 4000; | ||
| 55 | static constexpr size_t ReservedDynamicPageCount = 64; | ||
| 56 | |||
| 50 | explicit Impl(Core::System& system_, KernelCore& kernel_) | 57 | explicit Impl(Core::System& system_, KernelCore& kernel_) |
| 51 | : time_manager{system_}, service_threads_manager{1, "ServiceThreadsManager"}, | 58 | : time_manager{system_}, service_threads_manager{1, "ServiceThreadsManager"}, |
| 52 | service_thread_barrier{2}, system{system_} {} | 59 | service_thread_barrier{2}, system{system_} {} |
| @@ -71,7 +78,6 @@ struct KernelCore::Impl { | |||
| 71 | // Initialize kernel memory and resources. | 78 | // Initialize kernel memory and resources. |
| 72 | InitializeSystemResourceLimit(kernel, system.CoreTiming()); | 79 | InitializeSystemResourceLimit(kernel, system.CoreTiming()); |
| 73 | InitializeMemoryLayout(); | 80 | InitializeMemoryLayout(); |
| 74 | Init::InitializeKPageBufferSlabHeap(system); | ||
| 75 | InitializeShutdownThreads(); | 81 | InitializeShutdownThreads(); |
| 76 | InitializePhysicalCores(); | 82 | InitializePhysicalCores(); |
| 77 | InitializePreemption(kernel); | 83 | InitializePreemption(kernel); |
| @@ -81,12 +87,13 @@ struct KernelCore::Impl { | |||
| 81 | const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion(); | 87 | const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion(); |
| 82 | ASSERT(pt_heap_region.GetEndAddress() != 0); | 88 | ASSERT(pt_heap_region.GetEndAddress() != 0); |
| 83 | 89 | ||
| 84 | InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize()); | 90 | InitializeResourceManagers(kernel, pt_heap_region.GetAddress(), |
| 91 | pt_heap_region.GetSize()); | ||
| 85 | } | 92 | } |
| 86 | 93 | ||
| 87 | RegisterHostThread(nullptr); | 94 | RegisterHostThread(nullptr); |
| 88 | 95 | ||
| 89 | default_service_thread = CreateServiceThread(kernel, "DefaultServiceThread"); | 96 | default_service_thread = &CreateServiceThread(kernel, "DefaultServiceThread"); |
| 90 | } | 97 | } |
| 91 | 98 | ||
| 92 | void InitializeCores() { | 99 | void InitializeCores() { |
| @@ -253,16 +260,82 @@ struct KernelCore::Impl { | |||
| 253 | system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event); | 260 | system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event); |
| 254 | } | 261 | } |
| 255 | 262 | ||
| 256 | void InitializeResourceManagers(VAddr address, size_t size) { | 263 | void InitializeResourceManagers(KernelCore& kernel, VAddr address, size_t size) { |
| 257 | dynamic_page_manager = std::make_unique<KDynamicPageManager>(); | 264 | // Ensure that the buffer is suitable for our use. |
| 258 | memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>(); | 265 | ASSERT(Common::IsAligned(address, PageSize)); |
| 266 | ASSERT(Common::IsAligned(size, PageSize)); | ||
| 267 | |||
| 268 | // Ensure that we have space for our reference counts. | ||
| 269 | const size_t rc_size = | ||
| 270 | Common::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(size), PageSize); | ||
| 271 | ASSERT(rc_size < size); | ||
| 272 | size -= rc_size; | ||
| 273 | |||
| 274 | // Initialize the resource managers' shared page manager. | ||
| 275 | resource_manager_page_manager = std::make_unique<KDynamicPageManager>(); | ||
| 276 | resource_manager_page_manager->Initialize( | ||
| 277 | address, size, std::max<size_t>(PageSize, KPageBufferSlabHeap::BufferSize)); | ||
| 278 | |||
| 279 | // Initialize the KPageBuffer slab heap. | ||
| 280 | page_buffer_slab_heap.Initialize(system); | ||
| 281 | |||
| 282 | // Initialize the fixed-size slabheaps. | ||
| 283 | app_memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>(); | ||
| 284 | sys_memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>(); | ||
| 285 | block_info_heap = std::make_unique<KBlockInfoSlabHeap>(); | ||
| 286 | app_memory_block_heap->Initialize(resource_manager_page_manager.get(), | ||
| 287 | ApplicationMemoryBlockSlabHeapSize); | ||
| 288 | sys_memory_block_heap->Initialize(resource_manager_page_manager.get(), | ||
| 289 | SystemMemoryBlockSlabHeapSize); | ||
| 290 | block_info_heap->Initialize(resource_manager_page_manager.get(), BlockInfoSlabHeapSize); | ||
| 291 | |||
| 292 | // Reserve all but a fixed number of remaining pages for the page table heap. | ||
| 293 | const size_t num_pt_pages = resource_manager_page_manager->GetCount() - | ||
| 294 | resource_manager_page_manager->GetUsed() - | ||
| 295 | ReservedDynamicPageCount; | ||
| 296 | page_table_heap = std::make_unique<KPageTableSlabHeap>(); | ||
| 297 | |||
| 298 | // TODO(bunnei): Pass in address once we support kernel virtual memory allocations. | ||
| 299 | page_table_heap->Initialize( | ||
| 300 | resource_manager_page_manager.get(), num_pt_pages, | ||
| 301 | /*GetPointer<KPageTableManager::RefCount>(address + size)*/ nullptr); | ||
| 302 | |||
| 303 | // Setup the slab managers. | ||
| 304 | KDynamicPageManager* const app_dynamic_page_manager = nullptr; | ||
| 305 | KDynamicPageManager* const sys_dynamic_page_manager = | ||
| 306 | /*KTargetSystem::IsDynamicResourceLimitsEnabled()*/ true | ||
| 307 | ? resource_manager_page_manager.get() | ||
| 308 | : nullptr; | ||
| 259 | app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>(); | 309 | app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>(); |
| 260 | 310 | sys_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>(); | |
| 261 | dynamic_page_manager->Initialize(address, size); | 311 | app_block_info_manager = std::make_unique<KBlockInfoManager>(); |
| 262 | static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000; | 312 | sys_block_info_manager = std::make_unique<KBlockInfoManager>(); |
| 263 | memory_block_heap->Initialize(dynamic_page_manager.get(), | 313 | app_page_table_manager = std::make_unique<KPageTableManager>(); |
| 264 | ApplicationMemoryBlockSlabHeapSize); | 314 | sys_page_table_manager = std::make_unique<KPageTableManager>(); |
| 265 | app_memory_block_manager->Initialize(nullptr, memory_block_heap.get()); | 315 | |
| 316 | app_memory_block_manager->Initialize(app_dynamic_page_manager, app_memory_block_heap.get()); | ||
| 317 | sys_memory_block_manager->Initialize(sys_dynamic_page_manager, sys_memory_block_heap.get()); | ||
| 318 | |||
| 319 | app_block_info_manager->Initialize(app_dynamic_page_manager, block_info_heap.get()); | ||
| 320 | sys_block_info_manager->Initialize(sys_dynamic_page_manager, block_info_heap.get()); | ||
| 321 | |||
| 322 | app_page_table_manager->Initialize(app_dynamic_page_manager, page_table_heap.get()); | ||
| 323 | sys_page_table_manager->Initialize(sys_dynamic_page_manager, page_table_heap.get()); | ||
| 324 | |||
| 325 | // Check that we have the correct number of dynamic pages available. | ||
| 326 | ASSERT(resource_manager_page_manager->GetCount() - | ||
| 327 | resource_manager_page_manager->GetUsed() == | ||
| 328 | ReservedDynamicPageCount); | ||
| 329 | |||
| 330 | // Create the system page table managers. | ||
| 331 | app_system_resource = std::make_unique<KSystemResource>(kernel); | ||
| 332 | sys_system_resource = std::make_unique<KSystemResource>(kernel); | ||
| 333 | |||
| 334 | // Set the managers for the system resources. | ||
| 335 | app_system_resource->SetManagers(*app_memory_block_manager, *app_block_info_manager, | ||
| 336 | *app_page_table_manager); | ||
| 337 | sys_system_resource->SetManagers(*sys_memory_block_manager, *sys_block_info_manager, | ||
| 338 | *sys_page_table_manager); | ||
| 266 | } | 339 | } |
| 267 | 340 | ||
| 268 | void InitializeShutdownThreads() { | 341 | void InitializeShutdownThreads() { |
| @@ -449,6 +522,9 @@ struct KernelCore::Impl { | |||
| 449 | ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( | 522 | ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert( |
| 450 | misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc)); | 523 | misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc)); |
| 451 | 524 | ||
| 525 | // Determine if we'll use extra thread resources. | ||
| 526 | const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit(); | ||
| 527 | |||
| 452 | // Setup the stack region. | 528 | // Setup the stack region. |
| 453 | constexpr size_t StackRegionSize = 14_MiB; | 529 | constexpr size_t StackRegionSize = 14_MiB; |
| 454 | constexpr size_t StackRegionAlign = KernelAslrAlignment; | 530 | constexpr size_t StackRegionAlign = KernelAslrAlignment; |
| @@ -459,7 +535,8 @@ struct KernelCore::Impl { | |||
| 459 | stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack)); | 535 | stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack)); |
| 460 | 536 | ||
| 461 | // Determine the size of the resource region. | 537 | // Determine the size of the resource region. |
| 462 | const size_t resource_region_size = memory_layout->GetResourceRegionSizeForInit(); | 538 | const size_t resource_region_size = |
| 539 | memory_layout->GetResourceRegionSizeForInit(use_extra_resources); | ||
| 463 | 540 | ||
| 464 | // Determine the size of the slab region. | 541 | // Determine the size of the slab region. |
| 465 | const size_t slab_region_size = | 542 | const size_t slab_region_size = |
| @@ -705,33 +782,31 @@ struct KernelCore::Impl { | |||
| 705 | search->second(system.ServiceManager(), server_port); | 782 | search->second(system.ServiceManager(), server_port); |
| 706 | } | 783 | } |
| 707 | 784 | ||
| 708 | std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(KernelCore& kernel, | 785 | Kernel::ServiceThread& CreateServiceThread(KernelCore& kernel, const std::string& name) { |
| 709 | const std::string& name) { | 786 | auto* ptr = new ServiceThread(kernel, name); |
| 710 | auto service_thread = std::make_shared<Kernel::ServiceThread>(kernel, name); | ||
| 711 | 787 | ||
| 712 | service_threads_manager.QueueWork( | 788 | service_threads_manager.QueueWork( |
| 713 | [this, service_thread]() { service_threads.emplace(service_thread); }); | 789 | [this, ptr]() { service_threads.emplace(ptr, std::unique_ptr<ServiceThread>(ptr)); }); |
| 714 | 790 | ||
| 715 | return service_thread; | 791 | return *ptr; |
| 716 | } | 792 | } |
| 717 | 793 | ||
| 718 | void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) { | 794 | void ReleaseServiceThread(Kernel::ServiceThread& service_thread) { |
| 719 | if (auto strong_ptr = service_thread.lock()) { | 795 | auto* ptr = &service_thread; |
| 720 | if (strong_ptr == default_service_thread.lock()) { | ||
| 721 | // Nothing to do here, the service is using default_service_thread, which will be | ||
| 722 | // released on shutdown. | ||
| 723 | return; | ||
| 724 | } | ||
| 725 | 796 | ||
| 726 | service_threads_manager.QueueWork( | 797 | if (ptr == default_service_thread) { |
| 727 | [this, strong_ptr{std::move(strong_ptr)}]() { service_threads.erase(strong_ptr); }); | 798 | // Nothing to do here, the service is using default_service_thread, which will be |
| 799 | // released on shutdown. | ||
| 800 | return; | ||
| 728 | } | 801 | } |
| 802 | |||
| 803 | service_threads_manager.QueueWork([this, ptr]() { service_threads.erase(ptr); }); | ||
| 729 | } | 804 | } |
| 730 | 805 | ||
| 731 | void ClearServiceThreads() { | 806 | void ClearServiceThreads() { |
| 732 | service_threads_manager.QueueWork([this] { | 807 | service_threads_manager.QueueWork([this] { |
| 733 | service_threads.clear(); | 808 | service_threads.clear(); |
| 734 | default_service_thread.reset(); | 809 | default_service_thread = nullptr; |
| 735 | service_thread_barrier.Sync(); | 810 | service_thread_barrier.Sync(); |
| 736 | }); | 811 | }); |
| 737 | service_thread_barrier.Sync(); | 812 | service_thread_barrier.Sync(); |
| @@ -754,6 +829,8 @@ struct KernelCore::Impl { | |||
| 754 | Init::KSlabResourceCounts slab_resource_counts{}; | 829 | Init::KSlabResourceCounts slab_resource_counts{}; |
| 755 | KResourceLimit* system_resource_limit{}; | 830 | KResourceLimit* system_resource_limit{}; |
| 756 | 831 | ||
| 832 | KPageBufferSlabHeap page_buffer_slab_heap; | ||
| 833 | |||
| 757 | std::shared_ptr<Core::Timing::EventType> preemption_event; | 834 | std::shared_ptr<Core::Timing::EventType> preemption_event; |
| 758 | 835 | ||
| 759 | // This is the kernel's handle table or supervisor handle table which | 836 | // This is the kernel's handle table or supervisor handle table which |
| @@ -779,10 +856,20 @@ struct KernelCore::Impl { | |||
| 779 | // Kernel memory management | 856 | // Kernel memory management |
| 780 | std::unique_ptr<KMemoryManager> memory_manager; | 857 | std::unique_ptr<KMemoryManager> memory_manager; |
| 781 | 858 | ||
| 782 | // Dynamic slab managers | 859 | // Resource managers |
| 783 | std::unique_ptr<KDynamicPageManager> dynamic_page_manager; | 860 | std::unique_ptr<KDynamicPageManager> resource_manager_page_manager; |
| 784 | std::unique_ptr<KMemoryBlockSlabHeap> memory_block_heap; | 861 | std::unique_ptr<KPageTableSlabHeap> page_table_heap; |
| 862 | std::unique_ptr<KMemoryBlockSlabHeap> app_memory_block_heap; | ||
| 863 | std::unique_ptr<KMemoryBlockSlabHeap> sys_memory_block_heap; | ||
| 864 | std::unique_ptr<KBlockInfoSlabHeap> block_info_heap; | ||
| 865 | std::unique_ptr<KPageTableManager> app_page_table_manager; | ||
| 866 | std::unique_ptr<KPageTableManager> sys_page_table_manager; | ||
| 785 | std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager; | 867 | std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager; |
| 868 | std::unique_ptr<KMemoryBlockSlabManager> sys_memory_block_manager; | ||
| 869 | std::unique_ptr<KBlockInfoManager> app_block_info_manager; | ||
| 870 | std::unique_ptr<KBlockInfoManager> sys_block_info_manager; | ||
| 871 | std::unique_ptr<KSystemResource> app_system_resource; | ||
| 872 | std::unique_ptr<KSystemResource> sys_system_resource; | ||
| 786 | 873 | ||
| 787 | // Shared memory for services | 874 | // Shared memory for services |
| 788 | Kernel::KSharedMemory* hid_shared_mem{}; | 875 | Kernel::KSharedMemory* hid_shared_mem{}; |
| @@ -795,8 +882,8 @@ struct KernelCore::Impl { | |||
| 795 | std::unique_ptr<KMemoryLayout> memory_layout; | 882 | std::unique_ptr<KMemoryLayout> memory_layout; |
| 796 | 883 | ||
| 797 | // Threads used for services | 884 | // Threads used for services |
| 798 | std::unordered_set<std::shared_ptr<ServiceThread>> service_threads; | 885 | std::unordered_map<ServiceThread*, std::unique_ptr<ServiceThread>> service_threads; |
| 799 | std::weak_ptr<ServiceThread> default_service_thread; | 886 | ServiceThread* default_service_thread{}; |
| 800 | Common::ThreadWorker service_threads_manager; | 887 | Common::ThreadWorker service_threads_manager; |
| 801 | Common::Barrier service_thread_barrier; | 888 | Common::Barrier service_thread_barrier; |
| 802 | 889 | ||
| @@ -1064,12 +1151,12 @@ const KMemoryManager& KernelCore::MemoryManager() const { | |||
| 1064 | return *impl->memory_manager; | 1151 | return *impl->memory_manager; |
| 1065 | } | 1152 | } |
| 1066 | 1153 | ||
| 1067 | KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() { | 1154 | KSystemResource& KernelCore::GetSystemSystemResource() { |
| 1068 | return *impl->app_memory_block_manager; | 1155 | return *impl->sys_system_resource; |
| 1069 | } | 1156 | } |
| 1070 | 1157 | ||
| 1071 | const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const { | 1158 | const KSystemResource& KernelCore::GetSystemSystemResource() const { |
| 1072 | return *impl->app_memory_block_manager; | 1159 | return *impl->sys_system_resource; |
| 1073 | } | 1160 | } |
| 1074 | 1161 | ||
| 1075 | Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { | 1162 | Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { |
| @@ -1169,15 +1256,15 @@ void KernelCore::ExitSVCProfile() { | |||
| 1169 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]); | 1256 | MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]); |
| 1170 | } | 1257 | } |
| 1171 | 1258 | ||
| 1172 | std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { | 1259 | Kernel::ServiceThread& KernelCore::CreateServiceThread(const std::string& name) { |
| 1173 | return impl->CreateServiceThread(*this, name); | 1260 | return impl->CreateServiceThread(*this, name); |
| 1174 | } | 1261 | } |
| 1175 | 1262 | ||
| 1176 | std::weak_ptr<Kernel::ServiceThread> KernelCore::GetDefaultServiceThread() const { | 1263 | Kernel::ServiceThread& KernelCore::GetDefaultServiceThread() const { |
| 1177 | return impl->default_service_thread; | 1264 | return *impl->default_service_thread; |
| 1178 | } | 1265 | } |
| 1179 | 1266 | ||
| 1180 | void KernelCore::ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread) { | 1267 | void KernelCore::ReleaseServiceThread(Kernel::ServiceThread& service_thread) { |
| 1181 | impl->ReleaseServiceThread(service_thread); | 1268 | impl->ReleaseServiceThread(service_thread); |
| 1182 | } | 1269 | } |
| 1183 | 1270 | ||
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 8a21568f7..2e22fe0f6 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h | |||
| @@ -34,13 +34,16 @@ class KClientPort; | |||
| 34 | class GlobalSchedulerContext; | 34 | class GlobalSchedulerContext; |
| 35 | class KAutoObjectWithListContainer; | 35 | class KAutoObjectWithListContainer; |
| 36 | class KClientSession; | 36 | class KClientSession; |
| 37 | class KDebug; | ||
| 38 | class KDynamicPageManager; | ||
| 37 | class KEvent; | 39 | class KEvent; |
| 40 | class KEventInfo; | ||
| 38 | class KHandleTable; | 41 | class KHandleTable; |
| 39 | class KLinkedListNode; | 42 | class KLinkedListNode; |
| 40 | class KMemoryBlockSlabManager; | ||
| 41 | class KMemoryLayout; | 43 | class KMemoryLayout; |
| 42 | class KMemoryManager; | 44 | class KMemoryManager; |
| 43 | class KPageBuffer; | 45 | class KPageBuffer; |
| 46 | class KPageBufferSlabHeap; | ||
| 44 | class KPort; | 47 | class KPort; |
| 45 | class KProcess; | 48 | class KProcess; |
| 46 | class KResourceLimit; | 49 | class KResourceLimit; |
| @@ -51,6 +54,7 @@ class KSession; | |||
| 51 | class KSessionRequest; | 54 | class KSessionRequest; |
| 52 | class KSharedMemory; | 55 | class KSharedMemory; |
| 53 | class KSharedMemoryInfo; | 56 | class KSharedMemoryInfo; |
| 57 | class KSecureSystemResource; | ||
| 54 | class KThread; | 58 | class KThread; |
| 55 | class KThreadLocalPage; | 59 | class KThreadLocalPage; |
| 56 | class KTransferMemory; | 60 | class KTransferMemory; |
| @@ -244,11 +248,11 @@ public: | |||
| 244 | /// Gets the virtual memory manager for the kernel. | 248 | /// Gets the virtual memory manager for the kernel. |
| 245 | const KMemoryManager& MemoryManager() const; | 249 | const KMemoryManager& MemoryManager() const; |
| 246 | 250 | ||
| 247 | /// Gets the application memory block manager for the kernel. | 251 | /// Gets the system resource manager. |
| 248 | KMemoryBlockSlabManager& GetApplicationMemoryBlockManager(); | 252 | KSystemResource& GetSystemSystemResource(); |
| 249 | 253 | ||
| 250 | /// Gets the application memory block manager for the kernel. | 254 | /// Gets the system resource manager. |
| 251 | const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const; | 255 | const KSystemResource& GetSystemSystemResource() const; |
| 252 | 256 | ||
| 253 | /// Gets the shared memory object for HID services. | 257 | /// Gets the shared memory object for HID services. |
| 254 | Kernel::KSharedMemory& GetHidSharedMem(); | 258 | Kernel::KSharedMemory& GetHidSharedMem(); |
| @@ -305,24 +309,24 @@ public: | |||
| 305 | * See GetDefaultServiceThread. | 309 | * See GetDefaultServiceThread. |
| 306 | * @param name String name for the ServerSession creating this thread, used for debug | 310 | * @param name String name for the ServerSession creating this thread, used for debug |
| 307 | * purposes. | 311 | * purposes. |
| 308 | * @returns The a weak pointer newly created service thread. | 312 | * @returns A reference to the newly created service thread. |
| 309 | */ | 313 | */ |
| 310 | std::weak_ptr<Kernel::ServiceThread> CreateServiceThread(const std::string& name); | 314 | Kernel::ServiceThread& CreateServiceThread(const std::string& name); |
| 311 | 315 | ||
| 312 | /** | 316 | /** |
| 313 | * Gets the default host service thread, which executes HLE service requests. Unless service | 317 | * Gets the default host service thread, which executes HLE service requests. Unless service |
| 314 | * requests need to block on the host, the default service thread should be used in favor of | 318 | * requests need to block on the host, the default service thread should be used in favor of |
| 315 | * creating a new service thread. | 319 | * creating a new service thread. |
| 316 | * @returns The a weak pointer for the default service thread. | 320 | * @returns A reference to the default service thread. |
| 317 | */ | 321 | */ |
| 318 | std::weak_ptr<Kernel::ServiceThread> GetDefaultServiceThread() const; | 322 | Kernel::ServiceThread& GetDefaultServiceThread() const; |
| 319 | 323 | ||
| 320 | /** | 324 | /** |
| 321 | * Releases a HLE service thread, instructing KernelCore to free it. This should be called when | 325 | * Releases a HLE service thread, instructing KernelCore to free it. This should be called when |
| 322 | * the ServerSession associated with the thread is destroyed. | 326 | * the ServerSession associated with the thread is destroyed. |
| 323 | * @param service_thread Service thread to release. | 327 | * @param service_thread Service thread to release. |
| 324 | */ | 328 | */ |
| 325 | void ReleaseServiceThread(std::weak_ptr<Kernel::ServiceThread> service_thread); | 329 | void ReleaseServiceThread(Kernel::ServiceThread& service_thread); |
| 326 | 330 | ||
| 327 | /// Workaround for single-core mode when preempting threads while idle. | 331 | /// Workaround for single-core mode when preempting threads while idle. |
| 328 | bool IsPhantomModeForSingleCore() const; | 332 | bool IsPhantomModeForSingleCore() const; |
| @@ -364,6 +368,12 @@ public: | |||
| 364 | return slab_heap_container->thread_local_page; | 368 | return slab_heap_container->thread_local_page; |
| 365 | } else if constexpr (std::is_same_v<T, KSessionRequest>) { | 369 | } else if constexpr (std::is_same_v<T, KSessionRequest>) { |
| 366 | return slab_heap_container->session_request; | 370 | return slab_heap_container->session_request; |
| 371 | } else if constexpr (std::is_same_v<T, KSecureSystemResource>) { | ||
| 372 | return slab_heap_container->secure_system_resource; | ||
| 373 | } else if constexpr (std::is_same_v<T, KEventInfo>) { | ||
| 374 | return slab_heap_container->event_info; | ||
| 375 | } else if constexpr (std::is_same_v<T, KDebug>) { | ||
| 376 | return slab_heap_container->debug; | ||
| 367 | } | 377 | } |
| 368 | } | 378 | } |
| 369 | 379 | ||
| @@ -427,6 +437,9 @@ private: | |||
| 427 | KSlabHeap<KPageBuffer> page_buffer; | 437 | KSlabHeap<KPageBuffer> page_buffer; |
| 428 | KSlabHeap<KThreadLocalPage> thread_local_page; | 438 | KSlabHeap<KThreadLocalPage> thread_local_page; |
| 429 | KSlabHeap<KSessionRequest> session_request; | 439 | KSlabHeap<KSessionRequest> session_request; |
| 440 | KSlabHeap<KSecureSystemResource> secure_system_resource; | ||
| 441 | KSlabHeap<KEventInfo> event_info; | ||
| 442 | KSlabHeap<KDebug> debug; | ||
| 430 | }; | 443 | }; |
| 431 | 444 | ||
| 432 | std::unique_ptr<SlabHeapContainer> slab_heap_container; | 445 | std::unique_ptr<SlabHeapContainer> slab_heap_container; |
diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h index 06b51e919..0228ce188 100644 --- a/src/core/hle/kernel/slab_helpers.h +++ b/src/core/hle/kernel/slab_helpers.h | |||
| @@ -53,6 +53,84 @@ public: | |||
| 53 | }; | 53 | }; |
| 54 | 54 | ||
| 55 | template <typename Derived, typename Base> | 55 | template <typename Derived, typename Base> |
| 56 | class KAutoObjectWithSlabHeap : public Base { | ||
| 57 | static_assert(std::is_base_of<KAutoObject, Base>::value); | ||
| 58 | |||
| 59 | private: | ||
| 60 | static Derived* Allocate(KernelCore& kernel) { | ||
| 61 | return kernel.SlabHeap<Derived>().Allocate(kernel); | ||
| 62 | } | ||
| 63 | |||
| 64 | static void Free(KernelCore& kernel, Derived* obj) { | ||
| 65 | kernel.SlabHeap<Derived>().Free(obj); | ||
| 66 | } | ||
| 67 | |||
| 68 | public: | ||
| 69 | explicit KAutoObjectWithSlabHeap(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {} | ||
| 70 | virtual ~KAutoObjectWithSlabHeap() = default; | ||
| 71 | |||
| 72 | virtual void Destroy() override { | ||
| 73 | const bool is_initialized = this->IsInitialized(); | ||
| 74 | uintptr_t arg = 0; | ||
| 75 | if (is_initialized) { | ||
| 76 | arg = this->GetPostDestroyArgument(); | ||
| 77 | this->Finalize(); | ||
| 78 | } | ||
| 79 | Free(kernel, static_cast<Derived*>(this)); | ||
| 80 | if (is_initialized) { | ||
| 81 | Derived::PostDestroy(arg); | ||
| 82 | } | ||
| 83 | } | ||
| 84 | |||
| 85 | virtual bool IsInitialized() const { | ||
| 86 | return true; | ||
| 87 | } | ||
| 88 | virtual uintptr_t GetPostDestroyArgument() const { | ||
| 89 | return 0; | ||
| 90 | } | ||
| 91 | |||
| 92 | size_t GetSlabIndex() const { | ||
| 93 | return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this)); | ||
| 94 | } | ||
| 95 | |||
| 96 | public: | ||
| 97 | static void InitializeSlabHeap(KernelCore& kernel, void* memory, size_t memory_size) { | ||
| 98 | kernel.SlabHeap<Derived>().Initialize(memory, memory_size); | ||
| 99 | } | ||
| 100 | |||
| 101 | static Derived* Create(KernelCore& kernel) { | ||
| 102 | Derived* obj = Allocate(kernel); | ||
| 103 | if (obj != nullptr) { | ||
| 104 | KAutoObject::Create(obj); | ||
| 105 | } | ||
| 106 | return obj; | ||
| 107 | } | ||
| 108 | |||
| 109 | static size_t GetObjectSize(KernelCore& kernel) { | ||
| 110 | return kernel.SlabHeap<Derived>().GetObjectSize(); | ||
| 111 | } | ||
| 112 | |||
| 113 | static size_t GetSlabHeapSize(KernelCore& kernel) { | ||
| 114 | return kernel.SlabHeap<Derived>().GetSlabHeapSize(); | ||
| 115 | } | ||
| 116 | |||
| 117 | static size_t GetPeakIndex(KernelCore& kernel) { | ||
| 118 | return kernel.SlabHeap<Derived>().GetPeakIndex(); | ||
| 119 | } | ||
| 120 | |||
| 121 | static uintptr_t GetSlabHeapAddress(KernelCore& kernel) { | ||
| 122 | return kernel.SlabHeap<Derived>().GetSlabHeapAddress(); | ||
| 123 | } | ||
| 124 | |||
| 125 | static size_t GetNumRemaining(KernelCore& kernel) { | ||
| 126 | return kernel.SlabHeap<Derived>().GetNumRemaining(); | ||
| 127 | } | ||
| 128 | |||
| 129 | protected: | ||
| 130 | KernelCore& kernel; | ||
| 131 | }; | ||
| 132 | |||
| 133 | template <typename Derived, typename Base> | ||
| 56 | class KAutoObjectWithSlabHeapAndContainer : public Base { | 134 | class KAutoObjectWithSlabHeapAndContainer : public Base { |
| 57 | static_assert(std::is_base_of<KAutoObjectWithList, Base>::value); | 135 | static_assert(std::is_base_of<KAutoObjectWithList, Base>::value); |
| 58 | 136 | ||
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 4c819f4b6..ecac97a52 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -2247,7 +2247,7 @@ static u64 GetSystemTick(Core::System& system) { | |||
| 2247 | auto& core_timing = system.CoreTiming(); | 2247 | auto& core_timing = system.CoreTiming(); |
| 2248 | 2248 | ||
| 2249 | // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick) | 2249 | // Returns the value of cntpct_el0 (https://switchbrew.org/wiki/SVC#svcGetSystemTick) |
| 2250 | const u64 result{system.CoreTiming().GetClockTicks()}; | 2250 | const u64 result{core_timing.GetClockTicks()}; |
| 2251 | 2251 | ||
| 2252 | if (!system.Kernel().IsMulticore()) { | 2252 | if (!system.Kernel().IsMulticore()) { |
| 2253 | core_timing.AddTicks(400U); | 2253 | core_timing.AddTicks(400U); |
diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h index f27cade33..b7ca53085 100644 --- a/src/core/hle/kernel/svc_results.h +++ b/src/core/hle/kernel/svc_results.h | |||
| @@ -37,6 +37,7 @@ constexpr Result ResultInvalidState{ErrorModule::Kernel, 125}; | |||
| 37 | constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126}; | 37 | constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126}; |
| 38 | constexpr Result ResultPortClosed{ErrorModule::Kernel, 131}; | 38 | constexpr Result ResultPortClosed{ErrorModule::Kernel, 131}; |
| 39 | constexpr Result ResultLimitReached{ErrorModule::Kernel, 132}; | 39 | constexpr Result ResultLimitReached{ErrorModule::Kernel, 132}; |
| 40 | constexpr Result ResultOutOfAddressSpace{ErrorModule::Kernel, 259}; | ||
| 40 | constexpr Result ResultInvalidId{ErrorModule::Kernel, 519}; | 41 | constexpr Result ResultInvalidId{ErrorModule::Kernel, 519}; |
| 41 | 42 | ||
| 42 | } // namespace Kernel | 43 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h index abb9847fe..9b0305552 100644 --- a/src/core/hle/kernel/svc_types.h +++ b/src/core/hle/kernel/svc_types.h | |||
| @@ -22,8 +22,8 @@ enum class MemoryState : u32 { | |||
| 22 | Ipc = 0x0A, | 22 | Ipc = 0x0A, |
| 23 | Stack = 0x0B, | 23 | Stack = 0x0B, |
| 24 | ThreadLocal = 0x0C, | 24 | ThreadLocal = 0x0C, |
| 25 | Transferred = 0x0D, | 25 | Transfered = 0x0D, |
| 26 | SharedTransferred = 0x0E, | 26 | SharedTransfered = 0x0E, |
| 27 | SharedCode = 0x0F, | 27 | SharedCode = 0x0F, |
| 28 | Inaccessible = 0x10, | 28 | Inaccessible = 0x10, |
| 29 | NonSecureIpc = 0x11, | 29 | NonSecureIpc = 0x11, |
| @@ -32,6 +32,7 @@ enum class MemoryState : u32 { | |||
| 32 | GeneratedCode = 0x14, | 32 | GeneratedCode = 0x14, |
| 33 | CodeOut = 0x15, | 33 | CodeOut = 0x15, |
| 34 | Coverage = 0x16, | 34 | Coverage = 0x16, |
| 35 | Insecure = 0x17, | ||
| 35 | }; | 36 | }; |
| 36 | DECLARE_ENUM_FLAG_OPERATORS(MemoryState); | 37 | DECLARE_ENUM_FLAG_OPERATORS(MemoryState); |
| 37 | 38 | ||
| @@ -83,6 +84,13 @@ enum class YieldType : s64 { | |||
| 83 | ToAnyThread = -2, | 84 | ToAnyThread = -2, |
| 84 | }; | 85 | }; |
| 85 | 86 | ||
| 87 | enum class ThreadExitReason : u32 { | ||
| 88 | ExitThread = 0, | ||
| 89 | TerminateThread = 1, | ||
| 90 | ExitProcess = 2, | ||
| 91 | TerminateProcess = 3, | ||
| 92 | }; | ||
| 93 | |||
| 86 | enum class ThreadActivity : u32 { | 94 | enum class ThreadActivity : u32 { |
| 87 | Runnable = 0, | 95 | Runnable = 0, |
| 88 | Paused = 1, | 96 | Paused = 1, |
| @@ -108,6 +116,34 @@ enum class ProcessState : u32 { | |||
| 108 | DebugBreak = 7, | 116 | DebugBreak = 7, |
| 109 | }; | 117 | }; |
| 110 | 118 | ||
| 119 | enum class ProcessExitReason : u32 { | ||
| 120 | ExitProcess = 0, | ||
| 121 | TerminateProcess = 1, | ||
| 122 | Exception = 2, | ||
| 123 | }; | ||
| 124 | |||
| 111 | constexpr inline size_t ThreadLocalRegionSize = 0x200; | 125 | constexpr inline size_t ThreadLocalRegionSize = 0x200; |
| 112 | 126 | ||
| 127 | // Debug types. | ||
| 128 | enum class DebugEvent : u32 { | ||
| 129 | CreateProcess = 0, | ||
| 130 | CreateThread = 1, | ||
| 131 | ExitProcess = 2, | ||
| 132 | ExitThread = 3, | ||
| 133 | Exception = 4, | ||
| 134 | }; | ||
| 135 | |||
| 136 | enum class DebugException : u32 { | ||
| 137 | UndefinedInstruction = 0, | ||
| 138 | InstructionAbort = 1, | ||
| 139 | DataAbort = 2, | ||
| 140 | AlignmentFault = 3, | ||
| 141 | DebuggerAttached = 4, | ||
| 142 | BreakPoint = 5, | ||
| 143 | UserBreak = 6, | ||
| 144 | DebuggerBreak = 7, | ||
| 145 | UndefinedSystemCall = 8, | ||
| 146 | MemorySystemError = 9, | ||
| 147 | }; | ||
| 148 | |||
| 113 | } // namespace Kernel::Svc | 149 | } // namespace Kernel::Svc |
diff --git a/src/core/hle/result.h b/src/core/hle/result.h index ef4b2d417..56c990728 100644 --- a/src/core/hle/result.h +++ b/src/core/hle/result.h | |||
| @@ -423,16 +423,17 @@ constexpr void UpdateCurrentResultReference<const Result>(Result result_referenc | |||
| 423 | } // namespace ResultImpl | 423 | } // namespace ResultImpl |
| 424 | 424 | ||
| 425 | #define DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(COUNTER_VALUE) \ | 425 | #define DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(COUNTER_VALUE) \ |
| 426 | [[maybe_unused]] constexpr bool HasPrevRef_##COUNTER_VALUE = \ | 426 | [[maybe_unused]] constexpr bool CONCAT2(HasPrevRef_, COUNTER_VALUE) = \ |
| 427 | std::same_as<decltype(__TmpCurrentResultReference), Result&>; \ | 427 | std::same_as<decltype(__TmpCurrentResultReference), Result&>; \ |
| 428 | [[maybe_unused]] auto& PrevRef_##COUNTER_VALUE = __TmpCurrentResultReference; \ | 428 | [[maybe_unused]] Result CONCAT2(PrevRef_, COUNTER_VALUE) = __TmpCurrentResultReference; \ |
| 429 | [[maybe_unused]] Result __tmp_result_##COUNTER_VALUE = ResultSuccess; \ | 429 | [[maybe_unused]] Result CONCAT2(__tmp_result_, COUNTER_VALUE) = ResultSuccess; \ |
| 430 | Result& __TmpCurrentResultReference = \ | 430 | Result& __TmpCurrentResultReference = CONCAT2(HasPrevRef_, COUNTER_VALUE) \ |
| 431 | HasPrevRef_##COUNTER_VALUE ? PrevRef_##COUNTER_VALUE : __tmp_result_##COUNTER_VALUE | 431 | ? CONCAT2(PrevRef_, COUNTER_VALUE) \ |
| 432 | : CONCAT2(__tmp_result_, COUNTER_VALUE) | ||
| 432 | 433 | ||
| 433 | #define ON_RESULT_RETURN_IMPL(...) \ | 434 | #define ON_RESULT_RETURN_IMPL(...) \ |
| 434 | static_assert(std::same_as<decltype(__TmpCurrentResultReference), Result&>); \ | 435 | static_assert(std::same_as<decltype(__TmpCurrentResultReference), Result&>); \ |
| 435 | auto RESULT_GUARD_STATE_##__COUNTER__ = \ | 436 | auto CONCAT2(RESULT_GUARD_STATE_, __COUNTER__) = \ |
| 436 | ResultImpl::ResultReferenceForScopedResultGuard<__VA_ARGS__>( \ | 437 | ResultImpl::ResultReferenceForScopedResultGuard<__VA_ARGS__>( \ |
| 437 | __TmpCurrentResultReference) + \ | 438 | __TmpCurrentResultReference) + \ |
| 438 | [&]() | 439 | [&]() |
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 44388655d..fa29db758 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp | |||
| @@ -126,10 +126,12 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) | |||
| 126 | LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); | 126 | LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); |
| 127 | return result; | 127 | return result; |
| 128 | } | 128 | } |
| 129 | bool is_out_io{}; | ||
| 129 | ASSERT(system.CurrentProcess() | 130 | ASSERT(system.CurrentProcess() |
| 130 | ->PageTable() | 131 | ->PageTable() |
| 131 | .LockForMapDeviceAddressSpace(handle_description->address, handle_description->size, | 132 | .LockForMapDeviceAddressSpace(&is_out_io, handle_description->address, |
| 132 | Kernel::KMemoryPermission::None, true) | 133 | handle_description->size, |
| 134 | Kernel::KMemoryPermission::None, true, false) | ||
| 133 | .IsSuccess()); | 135 | .IsSuccess()); |
| 134 | std::memcpy(output.data(), ¶ms, sizeof(params)); | 136 | std::memcpy(output.data(), ¶ms, sizeof(params)); |
| 135 | return result; | 137 | return result; |
diff --git a/src/core/internal_network/socket_proxy.cpp b/src/core/internal_network/socket_proxy.cpp index 7d5d37bbc..1e1c42cea 100644 --- a/src/core/internal_network/socket_proxy.cpp +++ b/src/core/internal_network/socket_proxy.cpp | |||
| @@ -11,6 +11,10 @@ | |||
| 11 | #include "core/internal_network/network_interface.h" | 11 | #include "core/internal_network/network_interface.h" |
| 12 | #include "core/internal_network/socket_proxy.h" | 12 | #include "core/internal_network/socket_proxy.h" |
| 13 | 13 | ||
| 14 | #if YUZU_UNIX | ||
| 15 | #include <sys/socket.h> | ||
| 16 | #endif | ||
| 17 | |||
| 14 | namespace Network { | 18 | namespace Network { |
| 15 | 19 | ||
| 16 | ProxySocket::ProxySocket(RoomNetwork& room_network_) noexcept : room_network{room_network_} {} | 20 | ProxySocket::ProxySocket(RoomNetwork& room_network_) noexcept : room_network{room_network_} {} |
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index f9794dfe4..4a2f2c1fd 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp | |||
| @@ -631,47 +631,40 @@ void Maxwell3D::ProcessDeferredDraw() { | |||
| 631 | Instance, | 631 | Instance, |
| 632 | }; | 632 | }; |
| 633 | DrawMode draw_mode{DrawMode::Undefined}; | 633 | DrawMode draw_mode{DrawMode::Undefined}; |
| 634 | u32 instance_count = 1; | ||
| 635 | |||
| 636 | u32 index = 0; | ||
| 637 | u32 method = 0; | ||
| 638 | u32 method_count = static_cast<u32>(deferred_draw_method.size()); | 634 | u32 method_count = static_cast<u32>(deferred_draw_method.size()); |
| 639 | for (; index < method_count && | 635 | u32 method = deferred_draw_method[method_count - 1]; |
| 640 | (method = deferred_draw_method[index]) != MAXWELL3D_REG_INDEX(draw.begin); | 636 | if (MAXWELL3D_REG_INDEX(draw.end) != method) { |
| 641 | ++index) | ||
| 642 | ; | ||
| 643 | |||
| 644 | if (MAXWELL3D_REG_INDEX(draw.begin) != method) { | ||
| 645 | return; | ||
| 646 | } | ||
| 647 | |||
| 648 | // The minimum number of methods for drawing must be greater than or equal to | ||
| 649 | // 3[draw.begin->vertex(index)count(first)->draw.end] to avoid errors in index mode drawing | ||
| 650 | if ((method_count - index) < 3) { | ||
| 651 | return; | 637 | return; |
| 652 | } | 638 | } |
| 653 | draw_mode = (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Subsequent) || | 639 | draw_mode = (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Subsequent) || |
| 654 | (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Unchanged) | 640 | (regs.draw.instance_id == Maxwell3D::Regs::Draw::InstanceId::Unchanged) |
| 655 | ? DrawMode::Instance | 641 | ? DrawMode::Instance |
| 656 | : DrawMode::General; | 642 | : DrawMode::General; |
| 657 | 643 | u32 instance_count = 0; | |
| 658 | // Drawing will only begin with draw.begin or index_buffer method, other methods directly | ||
| 659 | // clear | ||
| 660 | if (draw_mode == DrawMode::Undefined) { | ||
| 661 | deferred_draw_method.clear(); | ||
| 662 | return; | ||
| 663 | } | ||
| 664 | |||
| 665 | if (draw_mode == DrawMode::Instance) { | 644 | if (draw_mode == DrawMode::Instance) { |
| 666 | ASSERT_MSG(deferred_draw_method.size() % 4 == 0, "Instance mode method size error"); | 645 | u32 vertex_buffer_count = 0; |
| 667 | instance_count = static_cast<u32>(method_count - index) / 4; | 646 | u32 index_buffer_count = 0; |
| 647 | for (u32 index = 0; index < method_count; ++index) { | ||
| 648 | method = deferred_draw_method[index]; | ||
| 649 | if (method == MAXWELL3D_REG_INDEX(vertex_buffer.count)) { | ||
| 650 | instance_count = ++vertex_buffer_count; | ||
| 651 | } else if (method == MAXWELL3D_REG_INDEX(index_buffer.count)) { | ||
| 652 | instance_count = ++index_buffer_count; | ||
| 653 | } | ||
| 654 | } | ||
| 655 | ASSERT_MSG(!(vertex_buffer_count && index_buffer_count), | ||
| 656 | "Instance both indexed and direct?"); | ||
| 668 | } else { | 657 | } else { |
| 669 | method = deferred_draw_method[index + 1]; | 658 | instance_count = 1; |
| 670 | if (MAXWELL3D_REG_INDEX(draw_inline_index) == method || | 659 | for (u32 index = 0; index < method_count; ++index) { |
| 671 | MAXWELL3D_REG_INDEX(inline_index_2x16.even) == method || | 660 | method = deferred_draw_method[index]; |
| 672 | MAXWELL3D_REG_INDEX(inline_index_4x8.index0) == method) { | 661 | if (MAXWELL3D_REG_INDEX(draw_inline_index) == method || |
| 673 | regs.index_buffer.count = static_cast<u32>(inline_index_draw_indexes.size() / 4); | 662 | MAXWELL3D_REG_INDEX(inline_index_2x16.even) == method || |
| 674 | regs.index_buffer.format = Regs::IndexFormat::UnsignedInt; | 663 | MAXWELL3D_REG_INDEX(inline_index_4x8.index0) == method) { |
| 664 | regs.index_buffer.count = static_cast<u32>(inline_index_draw_indexes.size() / 4); | ||
| 665 | regs.index_buffer.format = Regs::IndexFormat::UnsignedInt; | ||
| 666 | break; | ||
| 667 | } | ||
| 675 | } | 668 | } |
| 676 | } | 669 | } |
| 677 | 670 | ||
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 384350dbd..8c8dfcca6 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp | |||
| @@ -45,7 +45,7 @@ MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 | |||
| 45 | kind_valus.fill(PTEKind::INVALID); | 45 | kind_valus.fill(PTEKind::INVALID); |
| 46 | big_kinds.resize(big_page_table_size / 32, kind_valus); | 46 | big_kinds.resize(big_page_table_size / 32, kind_valus); |
| 47 | entries.resize(page_table_size / 32, 0); | 47 | entries.resize(page_table_size / 32, 0); |
| 48 | kinds.resize(big_page_table_size / 32, kind_valus); | 48 | kinds.resize(page_table_size / 32, kind_valus); |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | MemoryManager::~MemoryManager() = default; | 51 | MemoryManager::~MemoryManager() = default; |
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp index 81b6c372d..1aa116cea 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp | |||
| @@ -467,7 +467,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { | |||
| 467 | } | 467 | } |
| 468 | 468 | ||
| 469 | void GraphicsPipeline::ConfigureDraw(const RescalingPushConstant& rescaling, | 469 | void GraphicsPipeline::ConfigureDraw(const RescalingPushConstant& rescaling, |
| 470 | const RenderAreaPushConstant& render_are) { | 470 | const RenderAreaPushConstant& render_area) { |
| 471 | texture_cache.UpdateRenderTargets(false); | 471 | texture_cache.UpdateRenderTargets(false); |
| 472 | scheduler.RequestRenderpass(texture_cache.GetFramebuffer()); | 472 | scheduler.RequestRenderpass(texture_cache.GetFramebuffer()); |
| 473 | 473 | ||
| @@ -484,8 +484,8 @@ void GraphicsPipeline::ConfigureDraw(const RescalingPushConstant& rescaling, | |||
| 484 | const void* const descriptor_data{update_descriptor_queue.UpdateData()}; | 484 | const void* const descriptor_data{update_descriptor_queue.UpdateData()}; |
| 485 | scheduler.Record([this, descriptor_data, bind_pipeline, rescaling_data = rescaling.Data(), | 485 | scheduler.Record([this, descriptor_data, bind_pipeline, rescaling_data = rescaling.Data(), |
| 486 | is_rescaling, update_rescaling, | 486 | is_rescaling, update_rescaling, |
| 487 | uses_render_area = render_are.uses_render_area, | 487 | uses_render_area = render_area.uses_render_area, |
| 488 | render_area_data = render_are.words](vk::CommandBuffer cmdbuf) { | 488 | render_area_data = render_area.words](vk::CommandBuffer cmdbuf) { |
| 489 | if (bind_pipeline) { | 489 | if (bind_pipeline) { |
| 490 | cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline); | 490 | cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline); |
| 491 | } | 491 | } |
| @@ -494,7 +494,7 @@ void GraphicsPipeline::ConfigureDraw(const RescalingPushConstant& rescaling, | |||
| 494 | rescaling_data.data()); | 494 | rescaling_data.data()); |
| 495 | if (update_rescaling) { | 495 | if (update_rescaling) { |
| 496 | const f32 config_down_factor{Settings::values.resolution_info.down_factor}; | 496 | const f32 config_down_factor{Settings::values.resolution_info.down_factor}; |
| 497 | const f32 scale_down_factor{is_rescaling ? config_down_factor : 2.0f}; | 497 | const f32 scale_down_factor{is_rescaling ? config_down_factor : 1.0f}; |
| 498 | cmdbuf.PushConstants(*pipeline_layout, VK_SHADER_STAGE_ALL_GRAPHICS, | 498 | cmdbuf.PushConstants(*pipeline_layout, VK_SHADER_STAGE_ALL_GRAPHICS, |
| 499 | RESCALING_LAYOUT_DOWN_FACTOR_OFFSET, sizeof(scale_down_factor), | 499 | RESCALING_LAYOUT_DOWN_FACTOR_OFFSET, sizeof(scale_down_factor), |
| 500 | &scale_down_factor); | 500 | &scale_down_factor); |
diff --git a/src/yuzu/CMakeLists.txt b/src/yuzu/CMakeLists.txt index 29d506c47..239f12382 100644 --- a/src/yuzu/CMakeLists.txt +++ b/src/yuzu/CMakeLists.txt | |||
| @@ -315,7 +315,7 @@ target_include_directories(yuzu PRIVATE ../../externals/Vulkan-Headers/include) | |||
| 315 | if (NOT WIN32) | 315 | if (NOT WIN32) |
| 316 | target_include_directories(yuzu PRIVATE ${Qt5Gui_PRIVATE_INCLUDE_DIRS}) | 316 | target_include_directories(yuzu PRIVATE ${Qt5Gui_PRIVATE_INCLUDE_DIRS}) |
| 317 | endif() | 317 | endif() |
| 318 | if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux") | 318 | if (UNIX AND NOT APPLE) |
| 319 | target_link_libraries(yuzu PRIVATE Qt::DBus) | 319 | target_link_libraries(yuzu PRIVATE Qt::DBus) |
| 320 | endif() | 320 | endif() |
| 321 | 321 | ||
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp index 7b16d7f7e..59e56633a 100644 --- a/src/yuzu/main.cpp +++ b/src/yuzu/main.cpp | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | #ifdef __APPLE__ | 9 | #ifdef __APPLE__ |
| 10 | #include <unistd.h> // for chdir | 10 | #include <unistd.h> // for chdir |
| 11 | #endif | 11 | #endif |
| 12 | #ifdef __linux__ | 12 | #ifdef __unix__ |
| 13 | #include <csignal> | 13 | #include <csignal> |
| 14 | #include <sys/socket.h> | 14 | #include <sys/socket.h> |
| 15 | #endif | 15 | #endif |
| @@ -275,7 +275,7 @@ static void OverrideWindowsFont() { | |||
| 275 | #endif | 275 | #endif |
| 276 | 276 | ||
| 277 | bool GMainWindow::CheckDarkMode() { | 277 | bool GMainWindow::CheckDarkMode() { |
| 278 | #ifdef __linux__ | 278 | #ifdef __unix__ |
| 279 | const QPalette test_palette(qApp->palette()); | 279 | const QPalette test_palette(qApp->palette()); |
| 280 | const QColor text_color = test_palette.color(QPalette::Active, QPalette::Text); | 280 | const QColor text_color = test_palette.color(QPalette::Active, QPalette::Text); |
| 281 | const QColor window_color = test_palette.color(QPalette::Active, QPalette::Window); | 281 | const QColor window_color = test_palette.color(QPalette::Active, QPalette::Window); |
| @@ -283,7 +283,7 @@ bool GMainWindow::CheckDarkMode() { | |||
| 283 | #else | 283 | #else |
| 284 | // TODO: Windows | 284 | // TODO: Windows |
| 285 | return false; | 285 | return false; |
| 286 | #endif // __linux__ | 286 | #endif // __unix__ |
| 287 | } | 287 | } |
| 288 | 288 | ||
| 289 | GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan) | 289 | GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan) |
| @@ -291,7 +291,7 @@ GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan | |||
| 291 | input_subsystem{std::make_shared<InputCommon::InputSubsystem>()}, config{std::move(config_)}, | 291 | input_subsystem{std::make_shared<InputCommon::InputSubsystem>()}, config{std::move(config_)}, |
| 292 | vfs{std::make_shared<FileSys::RealVfsFilesystem>()}, | 292 | vfs{std::make_shared<FileSys::RealVfsFilesystem>()}, |
| 293 | provider{std::make_unique<FileSys::ManualContentProvider>()} { | 293 | provider{std::make_unique<FileSys::ManualContentProvider>()} { |
| 294 | #ifdef __linux__ | 294 | #ifdef __unix__ |
| 295 | SetupSigInterrupts(); | 295 | SetupSigInterrupts(); |
| 296 | #endif | 296 | #endif |
| 297 | system->Initialize(); | 297 | system->Initialize(); |
| @@ -509,7 +509,7 @@ GMainWindow::~GMainWindow() { | |||
| 509 | delete render_window; | 509 | delete render_window; |
| 510 | } | 510 | } |
| 511 | 511 | ||
| 512 | #ifdef __linux__ | 512 | #ifdef __unix__ |
| 513 | ::close(sig_interrupt_fds[0]); | 513 | ::close(sig_interrupt_fds[0]); |
| 514 | ::close(sig_interrupt_fds[1]); | 514 | ::close(sig_interrupt_fds[1]); |
| 515 | #endif | 515 | #endif |
| @@ -1379,7 +1379,7 @@ void GMainWindow::OnDisplayTitleBars(bool show) { | |||
| 1379 | } | 1379 | } |
| 1380 | 1380 | ||
| 1381 | void GMainWindow::SetupPrepareForSleep() { | 1381 | void GMainWindow::SetupPrepareForSleep() { |
| 1382 | #ifdef __linux__ | 1382 | #ifdef __unix__ |
| 1383 | auto bus = QDBusConnection::systemBus(); | 1383 | auto bus = QDBusConnection::systemBus(); |
| 1384 | if (bus.isConnected()) { | 1384 | if (bus.isConnected()) { |
| 1385 | const bool success = bus.connect( | 1385 | const bool success = bus.connect( |
| @@ -1393,7 +1393,7 @@ void GMainWindow::SetupPrepareForSleep() { | |||
| 1393 | } else { | 1393 | } else { |
| 1394 | LOG_WARNING(Frontend, "QDBusConnection system bus is not connected"); | 1394 | LOG_WARNING(Frontend, "QDBusConnection system bus is not connected"); |
| 1395 | } | 1395 | } |
| 1396 | #endif // __linux__ | 1396 | #endif // __unix__ |
| 1397 | } | 1397 | } |
| 1398 | 1398 | ||
| 1399 | void GMainWindow::OnPrepareForSleep(bool prepare_sleep) { | 1399 | void GMainWindow::OnPrepareForSleep(bool prepare_sleep) { |
| @@ -1415,7 +1415,7 @@ void GMainWindow::OnPrepareForSleep(bool prepare_sleep) { | |||
| 1415 | } | 1415 | } |
| 1416 | } | 1416 | } |
| 1417 | 1417 | ||
| 1418 | #ifdef __linux__ | 1418 | #ifdef __unix__ |
| 1419 | static std::optional<QDBusObjectPath> HoldWakeLockLinux(u32 window_id = 0) { | 1419 | static std::optional<QDBusObjectPath> HoldWakeLockLinux(u32 window_id = 0) { |
| 1420 | if (!QDBusConnection::sessionBus().isConnected()) { | 1420 | if (!QDBusConnection::sessionBus().isConnected()) { |
| 1421 | return {}; | 1421 | return {}; |
| @@ -1500,14 +1500,14 @@ void GMainWindow::OnSigInterruptNotifierActivated() { | |||
| 1500 | 1500 | ||
| 1501 | emit SigInterrupt(); | 1501 | emit SigInterrupt(); |
| 1502 | } | 1502 | } |
| 1503 | #endif // __linux__ | 1503 | #endif // __unix__ |
| 1504 | 1504 | ||
| 1505 | void GMainWindow::PreventOSSleep() { | 1505 | void GMainWindow::PreventOSSleep() { |
| 1506 | #ifdef _WIN32 | 1506 | #ifdef _WIN32 |
| 1507 | SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED | ES_DISPLAY_REQUIRED); | 1507 | SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED | ES_DISPLAY_REQUIRED); |
| 1508 | #elif defined(HAVE_SDL2) | 1508 | #elif defined(HAVE_SDL2) |
| 1509 | SDL_DisableScreenSaver(); | 1509 | SDL_DisableScreenSaver(); |
| 1510 | #ifdef __linux__ | 1510 | #ifdef __unix__ |
| 1511 | auto reply = HoldWakeLockLinux(winId()); | 1511 | auto reply = HoldWakeLockLinux(winId()); |
| 1512 | if (reply) { | 1512 | if (reply) { |
| 1513 | wake_lock = std::move(reply.value()); | 1513 | wake_lock = std::move(reply.value()); |
| @@ -1521,7 +1521,7 @@ void GMainWindow::AllowOSSleep() { | |||
| 1521 | SetThreadExecutionState(ES_CONTINUOUS); | 1521 | SetThreadExecutionState(ES_CONTINUOUS); |
| 1522 | #elif defined(HAVE_SDL2) | 1522 | #elif defined(HAVE_SDL2) |
| 1523 | SDL_EnableScreenSaver(); | 1523 | SDL_EnableScreenSaver(); |
| 1524 | #ifdef __linux__ | 1524 | #ifdef __unix__ |
| 1525 | if (!wake_lock.path().isEmpty()) { | 1525 | if (!wake_lock.path().isEmpty()) { |
| 1526 | ReleaseWakeLockLinux(wake_lock); | 1526 | ReleaseWakeLockLinux(wake_lock); |
| 1527 | } | 1527 | } |
| @@ -4070,7 +4070,7 @@ void GMainWindow::SetDiscordEnabled([[maybe_unused]] bool state) { | |||
| 4070 | } | 4070 | } |
| 4071 | 4071 | ||
| 4072 | void GMainWindow::changeEvent(QEvent* event) { | 4072 | void GMainWindow::changeEvent(QEvent* event) { |
| 4073 | #ifdef __linux__ | 4073 | #ifdef __unix__ |
| 4074 | // PaletteChange event appears to only reach so far into the GUI, explicitly asking to | 4074 | // PaletteChange event appears to only reach so far into the GUI, explicitly asking to |
| 4075 | // UpdateUITheme is a decent work around | 4075 | // UpdateUITheme is a decent work around |
| 4076 | if (event->type() == QEvent::PaletteChange) { | 4076 | if (event->type() == QEvent::PaletteChange) { |
| @@ -4085,7 +4085,7 @@ void GMainWindow::changeEvent(QEvent* event) { | |||
| 4085 | } | 4085 | } |
| 4086 | last_window_color = window_color; | 4086 | last_window_color = window_color; |
| 4087 | } | 4087 | } |
| 4088 | #endif // __linux__ | 4088 | #endif // __unix__ |
| 4089 | QWidget::changeEvent(event); | 4089 | QWidget::changeEvent(event); |
| 4090 | } | 4090 | } |
| 4091 | 4091 | ||
diff --git a/src/yuzu/main.h b/src/yuzu/main.h index f7aa8e417..150ada84c 100644 --- a/src/yuzu/main.h +++ b/src/yuzu/main.h | |||
| @@ -15,7 +15,7 @@ | |||
| 15 | #include "yuzu/compatibility_list.h" | 15 | #include "yuzu/compatibility_list.h" |
| 16 | #include "yuzu/hotkeys.h" | 16 | #include "yuzu/hotkeys.h" |
| 17 | 17 | ||
| 18 | #ifdef __linux__ | 18 | #ifdef __unix__ |
| 19 | #include <QVariant> | 19 | #include <QVariant> |
| 20 | #include <QtDBus/QDBusInterface> | 20 | #include <QtDBus/QDBusInterface> |
| 21 | #include <QtDBus/QtDBus> | 21 | #include <QtDBus/QtDBus> |
| @@ -255,7 +255,7 @@ private: | |||
| 255 | void changeEvent(QEvent* event) override; | 255 | void changeEvent(QEvent* event) override; |
| 256 | void closeEvent(QCloseEvent* event) override; | 256 | void closeEvent(QCloseEvent* event) override; |
| 257 | 257 | ||
| 258 | #ifdef __linux__ | 258 | #ifdef __unix__ |
| 259 | void SetupSigInterrupts(); | 259 | void SetupSigInterrupts(); |
| 260 | static void HandleSigInterrupt(int); | 260 | static void HandleSigInterrupt(int); |
| 261 | void OnSigInterruptNotifierActivated(); | 261 | void OnSigInterruptNotifierActivated(); |
| @@ -435,7 +435,7 @@ private: | |||
| 435 | // True if TAS recording dialog is visible | 435 | // True if TAS recording dialog is visible |
| 436 | bool is_tas_recording_dialog_active{}; | 436 | bool is_tas_recording_dialog_active{}; |
| 437 | 437 | ||
| 438 | #ifdef __linux__ | 438 | #ifdef __unix__ |
| 439 | QSocketNotifier* sig_interrupt_notifier; | 439 | QSocketNotifier* sig_interrupt_notifier; |
| 440 | static std::array<int, 3> sig_interrupt_fds; | 440 | static std::array<int, 3> sig_interrupt_fds; |
| 441 | 441 | ||