diff options
| author | 2022-03-16 12:15:33 +0100 | |
|---|---|---|
| committer | 2022-03-16 12:15:33 +0100 | |
| commit | 2db5076ec9abab880ea2cc363f4e5e93c528d801 (patch) | |
| tree | a87668b7aab0bba9dd73dabf093b9f77b1ad6937 /src/core/hle/kernel | |
| parent | Merge pull request #8023 from ameerj/kirby-pop-in (diff) | |
| parent | core: hle: kernel: init_slab_setup: Move CalculateSlabHeapGapSize to global n... (diff) | |
| download | yuzu-2db5076ec9abab880ea2cc363f4e5e93c528d801.tar.gz yuzu-2db5076ec9abab880ea2cc363f4e5e93c528d801.tar.xz yuzu-2db5076ec9abab880ea2cc363f4e5e93c528d801.zip | |
Merge pull request #8013 from bunnei/kernel-slab-rework-v2
Kernel Memory Updates (Part 6): Use guest memory for slab heaps & update TLS.
Diffstat (limited to 'src/core/hle/kernel')
25 files changed, 711 insertions, 371 deletions
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index e19544c54..9f2175f82 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp | |||
| @@ -45,7 +45,7 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co | |||
| 45 | LOG_CRITICAL(IPC, "object_id {} is too big!", object_id); | 45 | LOG_CRITICAL(IPC, "object_id {} is too big!", object_id); |
| 46 | return false; | 46 | return false; |
| 47 | } | 47 | } |
| 48 | return DomainHandler(object_id - 1) != nullptr; | 48 | return DomainHandler(object_id - 1).lock() != nullptr; |
| 49 | } else { | 49 | } else { |
| 50 | return session_handler != nullptr; | 50 | return session_handler != nullptr; |
| 51 | } | 51 | } |
| @@ -53,9 +53,6 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co | |||
| 53 | 53 | ||
| 54 | void SessionRequestHandler::ClientConnected(KServerSession* session) { | 54 | void SessionRequestHandler::ClientConnected(KServerSession* session) { |
| 55 | session->ClientConnected(shared_from_this()); | 55 | session->ClientConnected(shared_from_this()); |
| 56 | |||
| 57 | // Ensure our server session is tracked globally. | ||
| 58 | kernel.RegisterServerSession(session); | ||
| 59 | } | 56 | } |
| 60 | 57 | ||
| 61 | void SessionRequestHandler::ClientDisconnected(KServerSession* session) { | 58 | void SessionRequestHandler::ClientDisconnected(KServerSession* session) { |
diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index 754b41ff6..670cc741c 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h | |||
| @@ -94,6 +94,7 @@ protected: | |||
| 94 | std::weak_ptr<ServiceThread> service_thread; | 94 | std::weak_ptr<ServiceThread> service_thread; |
| 95 | }; | 95 | }; |
| 96 | 96 | ||
| 97 | using SessionRequestHandlerWeakPtr = std::weak_ptr<SessionRequestHandler>; | ||
| 97 | using SessionRequestHandlerPtr = std::shared_ptr<SessionRequestHandler>; | 98 | using SessionRequestHandlerPtr = std::shared_ptr<SessionRequestHandler>; |
| 98 | 99 | ||
| 99 | /** | 100 | /** |
| @@ -139,7 +140,7 @@ public: | |||
| 139 | } | 140 | } |
| 140 | } | 141 | } |
| 141 | 142 | ||
| 142 | SessionRequestHandlerPtr DomainHandler(std::size_t index) const { | 143 | SessionRequestHandlerWeakPtr DomainHandler(std::size_t index) const { |
| 143 | ASSERT_MSG(index < DomainHandlerCount(), "Unexpected handler index {}", index); | 144 | ASSERT_MSG(index < DomainHandlerCount(), "Unexpected handler index {}", index); |
| 144 | return domain_handlers.at(index); | 145 | return domain_handlers.at(index); |
| 145 | } | 146 | } |
| @@ -328,10 +329,10 @@ public: | |||
| 328 | 329 | ||
| 329 | template <typename T> | 330 | template <typename T> |
| 330 | std::shared_ptr<T> GetDomainHandler(std::size_t index) const { | 331 | std::shared_ptr<T> GetDomainHandler(std::size_t index) const { |
| 331 | return std::static_pointer_cast<T>(manager->DomainHandler(index)); | 332 | return std::static_pointer_cast<T>(manager.lock()->DomainHandler(index).lock()); |
| 332 | } | 333 | } |
| 333 | 334 | ||
| 334 | void SetSessionRequestManager(std::shared_ptr<SessionRequestManager> manager_) { | 335 | void SetSessionRequestManager(std::weak_ptr<SessionRequestManager> manager_) { |
| 335 | manager = std::move(manager_); | 336 | manager = std::move(manager_); |
| 336 | } | 337 | } |
| 337 | 338 | ||
| @@ -374,7 +375,7 @@ private: | |||
| 374 | u32 handles_offset{}; | 375 | u32 handles_offset{}; |
| 375 | u32 domain_offset{}; | 376 | u32 domain_offset{}; |
| 376 | 377 | ||
| 377 | std::shared_ptr<SessionRequestManager> manager; | 378 | std::weak_ptr<SessionRequestManager> manager; |
| 378 | 379 | ||
| 379 | KernelCore& kernel; | 380 | KernelCore& kernel; |
| 380 | Core::Memory::Memory& memory; | 381 | Core::Memory::Memory& memory; |
diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index 36fc0944a..b0f773ee0 100644 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp | |||
| @@ -7,19 +7,23 @@ | |||
| 7 | #include "common/common_funcs.h" | 7 | #include "common/common_funcs.h" |
| 8 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 9 | #include "core/core.h" | 9 | #include "core/core.h" |
| 10 | #include "core/device_memory.h" | ||
| 10 | #include "core/hardware_properties.h" | 11 | #include "core/hardware_properties.h" |
| 11 | #include "core/hle/kernel/init/init_slab_setup.h" | 12 | #include "core/hle/kernel/init/init_slab_setup.h" |
| 12 | #include "core/hle/kernel/k_code_memory.h" | 13 | #include "core/hle/kernel/k_code_memory.h" |
| 13 | #include "core/hle/kernel/k_event.h" | 14 | #include "core/hle/kernel/k_event.h" |
| 14 | #include "core/hle/kernel/k_memory_layout.h" | 15 | #include "core/hle/kernel/k_memory_layout.h" |
| 15 | #include "core/hle/kernel/k_memory_manager.h" | 16 | #include "core/hle/kernel/k_memory_manager.h" |
| 17 | #include "core/hle/kernel/k_page_buffer.h" | ||
| 16 | #include "core/hle/kernel/k_port.h" | 18 | #include "core/hle/kernel/k_port.h" |
| 17 | #include "core/hle/kernel/k_process.h" | 19 | #include "core/hle/kernel/k_process.h" |
| 18 | #include "core/hle/kernel/k_resource_limit.h" | 20 | #include "core/hle/kernel/k_resource_limit.h" |
| 19 | #include "core/hle/kernel/k_session.h" | 21 | #include "core/hle/kernel/k_session.h" |
| 20 | #include "core/hle/kernel/k_shared_memory.h" | 22 | #include "core/hle/kernel/k_shared_memory.h" |
| 23 | #include "core/hle/kernel/k_shared_memory_info.h" | ||
| 21 | #include "core/hle/kernel/k_system_control.h" | 24 | #include "core/hle/kernel/k_system_control.h" |
| 22 | #include "core/hle/kernel/k_thread.h" | 25 | #include "core/hle/kernel/k_thread.h" |
| 26 | #include "core/hle/kernel/k_thread_local_page.h" | ||
| 23 | #include "core/hle/kernel/k_transfer_memory.h" | 27 | #include "core/hle/kernel/k_transfer_memory.h" |
| 24 | 28 | ||
| 25 | namespace Kernel::Init { | 29 | namespace Kernel::Init { |
| @@ -32,9 +36,13 @@ namespace Kernel::Init { | |||
| 32 | HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \ | 36 | HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \ |
| 33 | HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \ | 37 | HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \ |
| 34 | HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \ | 38 | HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \ |
| 39 | HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \ | ||
| 35 | HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \ | 40 | HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \ |
| 36 | HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ##__VA_ARGS__) \ | 41 | HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ##__VA_ARGS__) \ |
| 37 | HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__) \ | 42 | HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__) \ |
| 43 | HANDLER(KThreadLocalPage, \ | ||
| 44 | (SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), \ | ||
| 45 | ##__VA_ARGS__) \ | ||
| 38 | HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) | 46 | HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) |
| 39 | 47 | ||
| 40 | namespace { | 48 | namespace { |
| @@ -50,38 +58,46 @@ enum KSlabType : u32 { | |||
| 50 | // Constexpr counts. | 58 | // Constexpr counts. |
| 51 | constexpr size_t SlabCountKProcess = 80; | 59 | constexpr size_t SlabCountKProcess = 80; |
| 52 | constexpr size_t SlabCountKThread = 800; | 60 | constexpr size_t SlabCountKThread = 800; |
| 53 | constexpr size_t SlabCountKEvent = 700; | 61 | constexpr size_t SlabCountKEvent = 900; |
| 54 | constexpr size_t SlabCountKInterruptEvent = 100; | 62 | constexpr size_t SlabCountKInterruptEvent = 100; |
| 55 | constexpr size_t SlabCountKPort = 256 + 0x20; // Extra 0x20 ports over Nintendo for homebrew. | 63 | constexpr size_t SlabCountKPort = 384; |
| 56 | constexpr size_t SlabCountKSharedMemory = 80; | 64 | constexpr size_t SlabCountKSharedMemory = 80; |
| 57 | constexpr size_t SlabCountKTransferMemory = 200; | 65 | constexpr size_t SlabCountKTransferMemory = 200; |
| 58 | constexpr size_t SlabCountKCodeMemory = 10; | 66 | constexpr size_t SlabCountKCodeMemory = 10; |
| 59 | constexpr size_t SlabCountKDeviceAddressSpace = 300; | 67 | constexpr size_t SlabCountKDeviceAddressSpace = 300; |
| 60 | constexpr size_t SlabCountKSession = 933; | 68 | constexpr size_t SlabCountKSession = 1133; |
| 61 | constexpr size_t SlabCountKLightSession = 100; | 69 | constexpr size_t SlabCountKLightSession = 100; |
| 62 | constexpr size_t SlabCountKObjectName = 7; | 70 | constexpr size_t SlabCountKObjectName = 7; |
| 63 | constexpr size_t SlabCountKResourceLimit = 5; | 71 | constexpr size_t SlabCountKResourceLimit = 5; |
| 64 | constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES; | 72 | constexpr size_t SlabCountKDebug = Core::Hardware::NUM_CPU_CORES; |
| 65 | constexpr size_t SlabCountKAlpha = 1; | 73 | constexpr size_t SlabCountKIoPool = 1; |
| 66 | constexpr size_t SlabCountKBeta = 6; | 74 | constexpr size_t SlabCountKIoRegion = 6; |
| 67 | 75 | ||
| 68 | constexpr size_t SlabCountExtraKThread = 160; | 76 | constexpr size_t SlabCountExtraKThread = 160; |
| 69 | 77 | ||
| 78 | /// Helper function to translate from the slab virtual address to the reserved location in physical | ||
| 79 | /// memory. | ||
| 80 | static PAddr TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout, VAddr slab_addr) { | ||
| 81 | slab_addr -= memory_layout.GetSlabRegionAddress(); | ||
| 82 | return slab_addr + Core::DramMemoryMap::SlabHeapBase; | ||
| 83 | } | ||
| 84 | |||
| 70 | template <typename T> | 85 | template <typename T> |
| 71 | VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address, | 86 | VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAddr address, |
| 72 | size_t num_objects) { | 87 | size_t num_objects) { |
| 73 | // TODO(bunnei): This is just a place holder. We should initialize the appropriate KSlabHeap for | ||
| 74 | // kernel object type T with the backing kernel memory pointer once we emulate kernel memory. | ||
| 75 | 88 | ||
| 76 | const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*)); | 89 | const size_t size = Common::AlignUp(sizeof(T) * num_objects, alignof(void*)); |
| 77 | VAddr start = Common::AlignUp(address, alignof(T)); | 90 | VAddr start = Common::AlignUp(address, alignof(T)); |
| 78 | 91 | ||
| 79 | // This is intentionally empty. Once KSlabHeap is fully implemented, we can replace this with | 92 | // This should use the virtual memory address passed in, but currently, we do not setup the |
| 80 | // the pointer to emulated memory to pass along. Until then, KSlabHeap will just allocate/free | 93 | // kernel virtual memory layout. Instead, we simply map these at a region of physical memory |
| 81 | // host memory. | 94 | // that we reserve for the slab heaps. |
| 82 | void* backing_kernel_memory{}; | 95 | // TODO(bunnei): Fix this once we support the kernel virtual memory layout. |
| 83 | 96 | ||
| 84 | if (size > 0) { | 97 | if (size > 0) { |
| 98 | void* backing_kernel_memory{ | ||
| 99 | system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))}; | ||
| 100 | |||
| 85 | const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1); | 101 | const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1); |
| 86 | ASSERT(region != nullptr); | 102 | ASSERT(region != nullptr); |
| 87 | ASSERT(region->IsDerivedFrom(KMemoryRegionType_KernelSlab)); | 103 | ASSERT(region->IsDerivedFrom(KMemoryRegionType_KernelSlab)); |
| @@ -91,6 +107,12 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd | |||
| 91 | return start + size; | 107 | return start + size; |
| 92 | } | 108 | } |
| 93 | 109 | ||
| 110 | size_t CalculateSlabHeapGapSize() { | ||
| 111 | constexpr size_t KernelSlabHeapGapSize = 2_MiB - 296_KiB; | ||
| 112 | static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax); | ||
| 113 | return KernelSlabHeapGapSize; | ||
| 114 | } | ||
| 115 | |||
| 94 | } // namespace | 116 | } // namespace |
| 95 | 117 | ||
| 96 | KSlabResourceCounts KSlabResourceCounts::CreateDefault() { | 118 | KSlabResourceCounts KSlabResourceCounts::CreateDefault() { |
| @@ -109,8 +131,8 @@ KSlabResourceCounts KSlabResourceCounts::CreateDefault() { | |||
| 109 | .num_KObjectName = SlabCountKObjectName, | 131 | .num_KObjectName = SlabCountKObjectName, |
| 110 | .num_KResourceLimit = SlabCountKResourceLimit, | 132 | .num_KResourceLimit = SlabCountKResourceLimit, |
| 111 | .num_KDebug = SlabCountKDebug, | 133 | .num_KDebug = SlabCountKDebug, |
| 112 | .num_KAlpha = SlabCountKAlpha, | 134 | .num_KIoPool = SlabCountKIoPool, |
| 113 | .num_KBeta = SlabCountKBeta, | 135 | .num_KIoRegion = SlabCountKIoRegion, |
| 114 | }; | 136 | }; |
| 115 | } | 137 | } |
| 116 | 138 | ||
| @@ -136,11 +158,34 @@ size_t CalculateTotalSlabHeapSize(const KernelCore& kernel) { | |||
| 136 | #undef ADD_SLAB_SIZE | 158 | #undef ADD_SLAB_SIZE |
| 137 | 159 | ||
| 138 | // Add the reserved size. | 160 | // Add the reserved size. |
| 139 | size += KernelSlabHeapGapsSize; | 161 | size += CalculateSlabHeapGapSize(); |
| 140 | 162 | ||
| 141 | return size; | 163 | return size; |
| 142 | } | 164 | } |
| 143 | 165 | ||
| 166 | void InitializeKPageBufferSlabHeap(Core::System& system) { | ||
| 167 | auto& kernel = system.Kernel(); | ||
| 168 | |||
| 169 | const auto& counts = kernel.SlabResourceCounts(); | ||
| 170 | const size_t num_pages = | ||
| 171 | counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8; | ||
| 172 | const size_t slab_size = num_pages * PageSize; | ||
| 173 | |||
| 174 | // Reserve memory from the system resource limit. | ||
| 175 | ASSERT(kernel.GetSystemResourceLimit()->Reserve(LimitableResource::PhysicalMemory, slab_size)); | ||
| 176 | |||
| 177 | // Allocate memory for the slab. | ||
| 178 | constexpr auto AllocateOption = KMemoryManager::EncodeOption( | ||
| 179 | KMemoryManager::Pool::System, KMemoryManager::Direction::FromFront); | ||
| 180 | const PAddr slab_address = | ||
| 181 | kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, AllocateOption); | ||
| 182 | ASSERT(slab_address != 0); | ||
| 183 | |||
| 184 | // Initialize the slabheap. | ||
| 185 | KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address), | ||
| 186 | slab_size); | ||
| 187 | } | ||
| 188 | |||
| 144 | void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { | 189 | void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { |
| 145 | auto& kernel = system.Kernel(); | 190 | auto& kernel = system.Kernel(); |
| 146 | 191 | ||
| @@ -160,13 +205,13 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { | |||
| 160 | } | 205 | } |
| 161 | 206 | ||
| 162 | // Create an array to represent the gaps between the slabs. | 207 | // Create an array to represent the gaps between the slabs. |
| 163 | const size_t total_gap_size = KernelSlabHeapGapsSize; | 208 | const size_t total_gap_size = CalculateSlabHeapGapSize(); |
| 164 | std::array<size_t, slab_types.size()> slab_gaps; | 209 | std::array<size_t, slab_types.size()> slab_gaps; |
| 165 | for (size_t i = 0; i < slab_gaps.size(); i++) { | 210 | for (auto& slab_gap : slab_gaps) { |
| 166 | // Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange | 211 | // Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange |
| 167 | // is inclusive. However, Nintendo also has the off-by-one error, and it's "harmless", so we | 212 | // is inclusive. However, Nintendo also has the off-by-one error, and it's "harmless", so we |
| 168 | // will include it ourselves. | 213 | // will include it ourselves. |
| 169 | slab_gaps[i] = KSystemControl::GenerateRandomRange(0, total_gap_size); | 214 | slab_gap = KSystemControl::GenerateRandomRange(0, total_gap_size); |
| 170 | } | 215 | } |
| 171 | 216 | ||
| 172 | // Sort the array, so that we can treat differences between values as offsets to the starts of | 217 | // Sort the array, so that we can treat differences between values as offsets to the starts of |
| @@ -177,13 +222,21 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { | |||
| 177 | } | 222 | } |
| 178 | } | 223 | } |
| 179 | 224 | ||
| 180 | for (size_t i = 0; i < slab_types.size(); i++) { | 225 | // Track the gaps, so that we can free them to the unused slab tree. |
| 226 | VAddr gap_start = address; | ||
| 227 | size_t gap_size = 0; | ||
| 228 | |||
| 229 | for (size_t i = 0; i < slab_gaps.size(); i++) { | ||
| 181 | // Add the random gap to the address. | 230 | // Add the random gap to the address. |
| 182 | address += (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1]; | 231 | const auto cur_gap = (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1]; |
| 232 | address += cur_gap; | ||
| 233 | gap_size += cur_gap; | ||
| 183 | 234 | ||
| 184 | #define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \ | 235 | #define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \ |
| 185 | case KSlabType_##NAME: \ | 236 | case KSlabType_##NAME: \ |
| 186 | address = InitializeSlabHeap<NAME>(system, memory_layout, address, COUNT); \ | 237 | if (COUNT > 0) { \ |
| 238 | address = InitializeSlabHeap<NAME>(system, memory_layout, address, COUNT); \ | ||
| 239 | } \ | ||
| 187 | break; | 240 | break; |
| 188 | 241 | ||
| 189 | // Initialize the slabheap. | 242 | // Initialize the slabheap. |
| @@ -192,7 +245,13 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { | |||
| 192 | FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP) | 245 | FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP) |
| 193 | // If we somehow get an invalid type, abort. | 246 | // If we somehow get an invalid type, abort. |
| 194 | default: | 247 | default: |
| 195 | UNREACHABLE(); | 248 | UNREACHABLE_MSG("Unknown slab type: {}", slab_types[i]); |
| 249 | } | ||
| 250 | |||
| 251 | // If we've hit the end of a gap, free it. | ||
| 252 | if (gap_start + gap_size != address) { | ||
| 253 | gap_start = address; | ||
| 254 | gap_size = 0; | ||
| 196 | } | 255 | } |
| 197 | } | 256 | } |
| 198 | } | 257 | } |
diff --git a/src/core/hle/kernel/init/init_slab_setup.h b/src/core/hle/kernel/init/init_slab_setup.h index a8f7e0918..f54b67d02 100644 --- a/src/core/hle/kernel/init/init_slab_setup.h +++ b/src/core/hle/kernel/init/init_slab_setup.h | |||
| @@ -32,12 +32,13 @@ struct KSlabResourceCounts { | |||
| 32 | size_t num_KObjectName; | 32 | size_t num_KObjectName; |
| 33 | size_t num_KResourceLimit; | 33 | size_t num_KResourceLimit; |
| 34 | size_t num_KDebug; | 34 | size_t num_KDebug; |
| 35 | size_t num_KAlpha; | 35 | size_t num_KIoPool; |
| 36 | size_t num_KBeta; | 36 | size_t num_KIoRegion; |
| 37 | }; | 37 | }; |
| 38 | 38 | ||
| 39 | void InitializeSlabResourceCounts(KernelCore& kernel); | 39 | void InitializeSlabResourceCounts(KernelCore& kernel); |
| 40 | size_t CalculateTotalSlabHeapSize(const KernelCore& kernel); | 40 | size_t CalculateTotalSlabHeapSize(const KernelCore& kernel); |
| 41 | void InitializeKPageBufferSlabHeap(Core::System& system); | ||
| 41 | void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout); | 42 | void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout); |
| 42 | 43 | ||
| 43 | } // namespace Kernel::Init | 44 | } // namespace Kernel::Init |
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp index 1d1f5e5f8..8cdd0490f 100644 --- a/src/core/hle/kernel/k_address_arbiter.cpp +++ b/src/core/hle/kernel/k_address_arbiter.cpp | |||
| @@ -115,7 +115,7 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) { | |||
| 115 | { | 115 | { |
| 116 | KScopedSchedulerLock sl(kernel); | 116 | KScopedSchedulerLock sl(kernel); |
| 117 | 117 | ||
| 118 | auto it = thread_tree.nfind_light({addr, -1}); | 118 | auto it = thread_tree.nfind_key({addr, -1}); |
| 119 | while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && | 119 | while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && |
| 120 | (it->GetAddressArbiterKey() == addr)) { | 120 | (it->GetAddressArbiterKey() == addr)) { |
| 121 | // End the thread's wait. | 121 | // End the thread's wait. |
| @@ -148,7 +148,7 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 | |||
| 148 | return ResultInvalidState; | 148 | return ResultInvalidState; |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | auto it = thread_tree.nfind_light({addr, -1}); | 151 | auto it = thread_tree.nfind_key({addr, -1}); |
| 152 | while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && | 152 | while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && |
| 153 | (it->GetAddressArbiterKey() == addr)) { | 153 | (it->GetAddressArbiterKey() == addr)) { |
| 154 | // End the thread's wait. | 154 | // End the thread's wait. |
| @@ -171,7 +171,7 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 | |||
| 171 | { | 171 | { |
| 172 | [[maybe_unused]] const KScopedSchedulerLock sl(kernel); | 172 | [[maybe_unused]] const KScopedSchedulerLock sl(kernel); |
| 173 | 173 | ||
| 174 | auto it = thread_tree.nfind_light({addr, -1}); | 174 | auto it = thread_tree.nfind_key({addr, -1}); |
| 175 | // Determine the updated value. | 175 | // Determine the updated value. |
| 176 | s32 new_value{}; | 176 | s32 new_value{}; |
| 177 | if (count <= 0) { | 177 | if (count <= 0) { |
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index aadcc297a..8e2a9593c 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp | |||
| @@ -244,7 +244,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { | |||
| 244 | { | 244 | { |
| 245 | KScopedSchedulerLock sl(kernel); | 245 | KScopedSchedulerLock sl(kernel); |
| 246 | 246 | ||
| 247 | auto it = thread_tree.nfind_light({cv_key, -1}); | 247 | auto it = thread_tree.nfind_key({cv_key, -1}); |
| 248 | while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && | 248 | while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && |
| 249 | (it->GetConditionVariableKey() == cv_key)) { | 249 | (it->GetConditionVariableKey() == cv_key)) { |
| 250 | KThread* target_thread = std::addressof(*it); | 250 | KThread* target_thread = std::addressof(*it); |
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index bcddb0d62..0858827b6 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h | |||
| @@ -57,11 +57,11 @@ constexpr std::size_t KernelPageTableHeapSize = GetMaximumOverheadSize(MainMemor | |||
| 57 | constexpr std::size_t KernelInitialPageHeapSize = 128_KiB; | 57 | constexpr std::size_t KernelInitialPageHeapSize = 128_KiB; |
| 58 | 58 | ||
| 59 | constexpr std::size_t KernelSlabHeapDataSize = 5_MiB; | 59 | constexpr std::size_t KernelSlabHeapDataSize = 5_MiB; |
| 60 | constexpr std::size_t KernelSlabHeapGapsSize = 2_MiB - 64_KiB; | 60 | constexpr std::size_t KernelSlabHeapGapsSizeMax = 2_MiB - 64_KiB; |
| 61 | constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSize; | 61 | constexpr std::size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax; |
| 62 | 62 | ||
| 63 | // NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860. | 63 | // NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860. |
| 64 | constexpr std::size_t KernelSlabHeapAdditionalSize = 416_KiB; | 64 | constexpr std::size_t KernelSlabHeapAdditionalSize = 0x68000; |
| 65 | 65 | ||
| 66 | constexpr std::size_t KernelResourceSize = | 66 | constexpr std::size_t KernelResourceSize = |
| 67 | KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize; | 67 | KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize; |
diff --git a/src/core/hle/kernel/k_page_buffer.h b/src/core/hle/kernel/k_page_buffer.h new file mode 100644 index 000000000..0a9451228 --- /dev/null +++ b/src/core/hle/kernel/k_page_buffer.h | |||
| @@ -0,0 +1,34 @@ | |||
| 1 | // Copyright 2022 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <array> | ||
| 8 | |||
| 9 | #include "common/alignment.h" | ||
| 10 | #include "common/assert.h" | ||
| 11 | #include "common/common_types.h" | ||
| 12 | #include "core/core.h" | ||
| 13 | #include "core/device_memory.h" | ||
| 14 | #include "core/hle/kernel/memory_types.h" | ||
| 15 | |||
| 16 | namespace Kernel { | ||
| 17 | |||
| 18 | class KPageBuffer final : public KSlabAllocated<KPageBuffer> { | ||
| 19 | public: | ||
| 20 | KPageBuffer() = default; | ||
| 21 | |||
| 22 | static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr) { | ||
| 23 | ASSERT(Common::IsAligned(phys_addr, PageSize)); | ||
| 24 | return reinterpret_cast<KPageBuffer*>(system.DeviceMemory().GetPointer(phys_addr)); | ||
| 25 | } | ||
| 26 | |||
| 27 | private: | ||
| 28 | [[maybe_unused]] alignas(PageSize) std::array<u8, PageSize> m_buffer{}; | ||
| 29 | }; | ||
| 30 | |||
| 31 | static_assert(sizeof(KPageBuffer) == PageSize); | ||
| 32 | static_assert(alignof(KPageBuffer) == PageSize); | ||
| 33 | |||
| 34 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 0602de1f7..02d93b12e 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp | |||
| @@ -424,6 +424,68 @@ ResultCode KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std | |||
| 424 | return ResultSuccess; | 424 | return ResultSuccess; |
| 425 | } | 425 | } |
| 426 | 426 | ||
| 427 | VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages, | ||
| 428 | std::size_t num_pages, std::size_t alignment, std::size_t offset, | ||
| 429 | std::size_t guard_pages) { | ||
| 430 | VAddr address = 0; | ||
| 431 | |||
| 432 | if (num_pages <= region_num_pages) { | ||
| 433 | if (this->IsAslrEnabled()) { | ||
| 434 | // Try to directly find a free area up to 8 times. | ||
| 435 | for (std::size_t i = 0; i < 8; i++) { | ||
| 436 | const std::size_t random_offset = | ||
| 437 | KSystemControl::GenerateRandomRange( | ||
| 438 | 0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) * | ||
| 439 | alignment; | ||
| 440 | const VAddr candidate = | ||
| 441 | Common::AlignDown((region_start + random_offset), alignment) + offset; | ||
| 442 | |||
| 443 | KMemoryInfo info = this->QueryInfoImpl(candidate); | ||
| 444 | |||
| 445 | if (info.state != KMemoryState::Free) { | ||
| 446 | continue; | ||
| 447 | } | ||
| 448 | if (region_start > candidate) { | ||
| 449 | continue; | ||
| 450 | } | ||
| 451 | if (info.GetAddress() + guard_pages * PageSize > candidate) { | ||
| 452 | continue; | ||
| 453 | } | ||
| 454 | |||
| 455 | const VAddr candidate_end = candidate + (num_pages + guard_pages) * PageSize - 1; | ||
| 456 | if (candidate_end > info.GetLastAddress()) { | ||
| 457 | continue; | ||
| 458 | } | ||
| 459 | if (candidate_end > region_start + region_num_pages * PageSize - 1) { | ||
| 460 | continue; | ||
| 461 | } | ||
| 462 | |||
| 463 | address = candidate; | ||
| 464 | break; | ||
| 465 | } | ||
| 466 | // Fall back to finding the first free area with a random offset. | ||
| 467 | if (address == 0) { | ||
| 468 | // NOTE: Nintendo does not account for guard pages here. | ||
| 469 | // This may theoretically cause an offset to be chosen that cannot be mapped. We | ||
| 470 | // will account for guard pages. | ||
| 471 | const std::size_t offset_pages = KSystemControl::GenerateRandomRange( | ||
| 472 | 0, region_num_pages - num_pages - guard_pages); | ||
| 473 | address = block_manager->FindFreeArea(region_start + offset_pages * PageSize, | ||
| 474 | region_num_pages - offset_pages, num_pages, | ||
| 475 | alignment, offset, guard_pages); | ||
| 476 | } | ||
| 477 | } | ||
| 478 | |||
| 479 | // Find the first free area. | ||
| 480 | if (address == 0) { | ||
| 481 | address = block_manager->FindFreeArea(region_start, region_num_pages, num_pages, | ||
| 482 | alignment, offset, guard_pages); | ||
| 483 | } | ||
| 484 | } | ||
| 485 | |||
| 486 | return address; | ||
| 487 | } | ||
| 488 | |||
| 427 | ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, | 489 | ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, |
| 428 | KPageTable& src_page_table, VAddr src_addr) { | 490 | KPageTable& src_page_table, VAddr src_addr) { |
| 429 | KScopedLightLock lk(general_lock); | 491 | KScopedLightLock lk(general_lock); |
| @@ -1055,6 +1117,46 @@ ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list | |||
| 1055 | return ResultSuccess; | 1117 | return ResultSuccess; |
| 1056 | } | 1118 | } |
| 1057 | 1119 | ||
| 1120 | ResultCode KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, | ||
| 1121 | PAddr phys_addr, bool is_pa_valid, VAddr region_start, | ||
| 1122 | std::size_t region_num_pages, KMemoryState state, | ||
| 1123 | KMemoryPermission perm) { | ||
| 1124 | ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); | ||
| 1125 | |||
| 1126 | // Ensure this is a valid map request. | ||
| 1127 | R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), | ||
| 1128 | ResultInvalidCurrentMemory); | ||
| 1129 | R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); | ||
| 1130 | |||
| 1131 | // Lock the table. | ||
| 1132 | KScopedLightLock lk(general_lock); | ||
| 1133 | |||
| 1134 | // Find a random address to map at. | ||
| 1135 | VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, | ||
| 1136 | this->GetNumGuardPages()); | ||
| 1137 | R_UNLESS(addr != 0, ResultOutOfMemory); | ||
| 1138 | ASSERT(Common::IsAligned(addr, alignment)); | ||
| 1139 | ASSERT(this->CanContain(addr, num_pages * PageSize, state)); | ||
| 1140 | ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState::All, KMemoryState::Free, | ||
| 1141 | KMemoryPermission::None, KMemoryPermission::None, | ||
| 1142 | KMemoryAttribute::None, KMemoryAttribute::None) | ||
| 1143 | .IsSuccess()); | ||
| 1144 | |||
| 1145 | // Perform mapping operation. | ||
| 1146 | if (is_pa_valid) { | ||
| 1147 | R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr)); | ||
| 1148 | } else { | ||
| 1149 | UNIMPLEMENTED(); | ||
| 1150 | } | ||
| 1151 | |||
| 1152 | // Update the blocks. | ||
| 1153 | block_manager->Update(addr, num_pages, state, perm); | ||
| 1154 | |||
| 1155 | // We successfully mapped the pages. | ||
| 1156 | *out_addr = addr; | ||
| 1157 | return ResultSuccess; | ||
| 1158 | } | ||
| 1159 | |||
| 1058 | ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) { | 1160 | ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) { |
| 1059 | ASSERT(this->IsLockedByCurrentThread()); | 1161 | ASSERT(this->IsLockedByCurrentThread()); |
| 1060 | 1162 | ||
| @@ -1097,6 +1199,30 @@ ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, | |||
| 1097 | return ResultSuccess; | 1199 | return ResultSuccess; |
| 1098 | } | 1200 | } |
| 1099 | 1201 | ||
| 1202 | ResultCode KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) { | ||
| 1203 | // Check that the unmap is in range. | ||
| 1204 | const std::size_t size = num_pages * PageSize; | ||
| 1205 | R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); | ||
| 1206 | |||
| 1207 | // Lock the table. | ||
| 1208 | KScopedLightLock lk(general_lock); | ||
| 1209 | |||
| 1210 | // Check the memory state. | ||
| 1211 | std::size_t num_allocator_blocks{}; | ||
| 1212 | R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, | ||
| 1213 | KMemoryState::All, state, KMemoryPermission::None, | ||
| 1214 | KMemoryPermission::None, KMemoryAttribute::All, | ||
| 1215 | KMemoryAttribute::None)); | ||
| 1216 | |||
| 1217 | // Perform the unmap. | ||
| 1218 | R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap)); | ||
| 1219 | |||
| 1220 | // Update the blocks. | ||
| 1221 | block_manager->Update(address, num_pages, KMemoryState::Free, KMemoryPermission::None); | ||
| 1222 | |||
| 1223 | return ResultSuccess; | ||
| 1224 | } | ||
| 1225 | |||
| 1100 | ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, | 1226 | ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, |
| 1101 | Svc::MemoryPermission svc_perm) { | 1227 | Svc::MemoryPermission svc_perm) { |
| 1102 | const size_t num_pages = size / PageSize; | 1228 | const size_t num_pages = size / PageSize; |
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index e99abe36a..54c6adf8d 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h | |||
| @@ -46,7 +46,14 @@ public: | |||
| 46 | ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); | 46 | ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); |
| 47 | ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state, | 47 | ResultCode MapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state, |
| 48 | KMemoryPermission perm); | 48 | KMemoryPermission perm); |
| 49 | ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, | ||
| 50 | PAddr phys_addr, KMemoryState state, KMemoryPermission perm) { | ||
| 51 | return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, | ||
| 52 | this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, | ||
| 53 | state, perm); | ||
| 54 | } | ||
| 49 | ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state); | 55 | ResultCode UnmapPages(VAddr addr, KPageLinkedList& page_linked_list, KMemoryState state); |
| 56 | ResultCode UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state); | ||
| 50 | ResultCode SetProcessMemoryPermission(VAddr addr, std::size_t size, | 57 | ResultCode SetProcessMemoryPermission(VAddr addr, std::size_t size, |
| 51 | Svc::MemoryPermission svc_perm); | 58 | Svc::MemoryPermission svc_perm); |
| 52 | KMemoryInfo QueryInfo(VAddr addr); | 59 | KMemoryInfo QueryInfo(VAddr addr); |
| @@ -91,6 +98,9 @@ private: | |||
| 91 | ResultCode InitializeMemoryLayout(VAddr start, VAddr end); | 98 | ResultCode InitializeMemoryLayout(VAddr start, VAddr end); |
| 92 | ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list, | 99 | ResultCode MapPages(VAddr addr, const KPageLinkedList& page_linked_list, |
| 93 | KMemoryPermission perm); | 100 | KMemoryPermission perm); |
| 101 | ResultCode MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, | ||
| 102 | PAddr phys_addr, bool is_pa_valid, VAddr region_start, | ||
| 103 | std::size_t region_num_pages, KMemoryState state, KMemoryPermission perm); | ||
| 94 | ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list); | 104 | ResultCode UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list); |
| 95 | bool IsRegionMapped(VAddr address, u64 size); | 105 | bool IsRegionMapped(VAddr address, u64 size); |
| 96 | bool IsRegionContiguous(VAddr addr, u64 size) const; | 106 | bool IsRegionContiguous(VAddr addr, u64 size) const; |
| @@ -105,6 +115,9 @@ private: | |||
| 105 | VAddr GetRegionAddress(KMemoryState state) const; | 115 | VAddr GetRegionAddress(KMemoryState state) const; |
| 106 | std::size_t GetRegionSize(KMemoryState state) const; | 116 | std::size_t GetRegionSize(KMemoryState state) const; |
| 107 | 117 | ||
| 118 | VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, | ||
| 119 | std::size_t alignment, std::size_t offset, std::size_t guard_pages); | ||
| 120 | |||
| 108 | ResultCode CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, | 121 | ResultCode CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, |
| 109 | std::size_t size, KMemoryState state_mask, | 122 | std::size_t size, KMemoryState state_mask, |
| 110 | KMemoryState state, KMemoryPermission perm_mask, | 123 | KMemoryState state, KMemoryPermission perm_mask, |
| @@ -137,7 +150,7 @@ private: | |||
| 137 | return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, | 150 | return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, |
| 138 | state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr); | 151 | state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr); |
| 139 | } | 152 | } |
| 140 | ResultCode CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, | 153 | ResultCode CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask, |
| 141 | KMemoryState state, KMemoryPermission perm_mask, | 154 | KMemoryState state, KMemoryPermission perm_mask, |
| 142 | KMemoryPermission perm, KMemoryAttribute attr_mask, | 155 | KMemoryPermission perm, KMemoryAttribute attr_mask, |
| 143 | KMemoryAttribute attr, | 156 | KMemoryAttribute attr, |
| @@ -210,7 +223,7 @@ public: | |||
| 210 | constexpr VAddr GetAliasCodeRegionSize() const { | 223 | constexpr VAddr GetAliasCodeRegionSize() const { |
| 211 | return alias_code_region_end - alias_code_region_start; | 224 | return alias_code_region_end - alias_code_region_start; |
| 212 | } | 225 | } |
| 213 | size_t GetNormalMemorySize() { | 226 | std::size_t GetNormalMemorySize() { |
| 214 | KScopedLightLock lk(general_lock); | 227 | KScopedLightLock lk(general_lock); |
| 215 | return GetHeapSize() + mapped_physical_memory_size; | 228 | return GetHeapSize() + mapped_physical_memory_size; |
| 216 | } | 229 | } |
diff --git a/src/core/hle/kernel/k_port.cpp b/src/core/hle/kernel/k_port.cpp index a8ba09c4a..ceb98709f 100644 --- a/src/core/hle/kernel/k_port.cpp +++ b/src/core/hle/kernel/k_port.cpp | |||
| @@ -57,7 +57,12 @@ ResultCode KPort::EnqueueSession(KServerSession* session) { | |||
| 57 | R_UNLESS(state == State::Normal, ResultPortClosed); | 57 | R_UNLESS(state == State::Normal, ResultPortClosed); |
| 58 | 58 | ||
| 59 | server.EnqueueSession(session); | 59 | server.EnqueueSession(session); |
| 60 | server.GetSessionRequestHandler()->ClientConnected(server.AcceptSession()); | 60 | |
| 61 | if (auto session_ptr = server.GetSessionRequestHandler().lock()) { | ||
| 62 | session_ptr->ClientConnected(server.AcceptSession()); | ||
| 63 | } else { | ||
| 64 | UNREACHABLE(); | ||
| 65 | } | ||
| 61 | 66 | ||
| 62 | return ResultSuccess; | 67 | return ResultSuccess; |
| 63 | } | 68 | } |
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 9233261cd..b39405496 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp | |||
| @@ -70,58 +70,6 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority | |||
| 70 | } | 70 | } |
| 71 | } // Anonymous namespace | 71 | } // Anonymous namespace |
| 72 | 72 | ||
| 73 | // Represents a page used for thread-local storage. | ||
| 74 | // | ||
| 75 | // Each TLS page contains slots that may be used by processes and threads. | ||
| 76 | // Every process and thread is created with a slot in some arbitrary page | ||
| 77 | // (whichever page happens to have an available slot). | ||
| 78 | class TLSPage { | ||
| 79 | public: | ||
| 80 | static constexpr std::size_t num_slot_entries = | ||
| 81 | Core::Memory::PAGE_SIZE / Core::Memory::TLS_ENTRY_SIZE; | ||
| 82 | |||
| 83 | explicit TLSPage(VAddr address) : base_address{address} {} | ||
| 84 | |||
| 85 | bool HasAvailableSlots() const { | ||
| 86 | return !is_slot_used.all(); | ||
| 87 | } | ||
| 88 | |||
| 89 | VAddr GetBaseAddress() const { | ||
| 90 | return base_address; | ||
| 91 | } | ||
| 92 | |||
| 93 | std::optional<VAddr> ReserveSlot() { | ||
| 94 | for (std::size_t i = 0; i < is_slot_used.size(); i++) { | ||
| 95 | if (is_slot_used[i]) { | ||
| 96 | continue; | ||
| 97 | } | ||
| 98 | |||
| 99 | is_slot_used[i] = true; | ||
| 100 | return base_address + (i * Core::Memory::TLS_ENTRY_SIZE); | ||
| 101 | } | ||
| 102 | |||
| 103 | return std::nullopt; | ||
| 104 | } | ||
| 105 | |||
| 106 | void ReleaseSlot(VAddr address) { | ||
| 107 | // Ensure that all given addresses are consistent with how TLS pages | ||
| 108 | // are intended to be used when releasing slots. | ||
| 109 | ASSERT(IsWithinPage(address)); | ||
| 110 | ASSERT((address % Core::Memory::TLS_ENTRY_SIZE) == 0); | ||
| 111 | |||
| 112 | const std::size_t index = (address - base_address) / Core::Memory::TLS_ENTRY_SIZE; | ||
| 113 | is_slot_used[index] = false; | ||
| 114 | } | ||
| 115 | |||
| 116 | private: | ||
| 117 | bool IsWithinPage(VAddr address) const { | ||
| 118 | return base_address <= address && address < base_address + Core::Memory::PAGE_SIZE; | ||
| 119 | } | ||
| 120 | |||
| 121 | VAddr base_address; | ||
| 122 | std::bitset<num_slot_entries> is_slot_used; | ||
| 123 | }; | ||
| 124 | |||
| 125 | ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name, | 73 | ResultCode KProcess::Initialize(KProcess* process, Core::System& system, std::string process_name, |
| 126 | ProcessType type, KResourceLimit* res_limit) { | 74 | ProcessType type, KResourceLimit* res_limit) { |
| 127 | auto& kernel = system.Kernel(); | 75 | auto& kernel = system.Kernel(); |
| @@ -404,7 +352,7 @@ ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, | |||
| 404 | } | 352 | } |
| 405 | 353 | ||
| 406 | // Create TLS region | 354 | // Create TLS region |
| 407 | tls_region_address = CreateTLSRegion(); | 355 | R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address))); |
| 408 | memory_reservation.Commit(); | 356 | memory_reservation.Commit(); |
| 409 | 357 | ||
| 410 | return handle_table.Initialize(capabilities.GetHandleTableSize()); | 358 | return handle_table.Initialize(capabilities.GetHandleTableSize()); |
| @@ -444,7 +392,7 @@ void KProcess::PrepareForTermination() { | |||
| 444 | 392 | ||
| 445 | stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList()); | 393 | stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList()); |
| 446 | 394 | ||
| 447 | FreeTLSRegion(tls_region_address); | 395 | this->DeleteThreadLocalRegion(tls_region_address); |
| 448 | tls_region_address = 0; | 396 | tls_region_address = 0; |
| 449 | 397 | ||
| 450 | if (resource_limit) { | 398 | if (resource_limit) { |
| @@ -456,9 +404,6 @@ void KProcess::PrepareForTermination() { | |||
| 456 | } | 404 | } |
| 457 | 405 | ||
| 458 | void KProcess::Finalize() { | 406 | void KProcess::Finalize() { |
| 459 | // Finalize the handle table and close any open handles. | ||
| 460 | handle_table.Finalize(); | ||
| 461 | |||
| 462 | // Free all shared memory infos. | 407 | // Free all shared memory infos. |
| 463 | { | 408 | { |
| 464 | auto it = shared_memory_list.begin(); | 409 | auto it = shared_memory_list.begin(); |
| @@ -483,67 +428,110 @@ void KProcess::Finalize() { | |||
| 483 | resource_limit = nullptr; | 428 | resource_limit = nullptr; |
| 484 | } | 429 | } |
| 485 | 430 | ||
| 431 | // Finalize the page table. | ||
| 432 | page_table.reset(); | ||
| 433 | |||
| 486 | // Perform inherited finalization. | 434 | // Perform inherited finalization. |
| 487 | KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize(); | 435 | KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize(); |
| 488 | } | 436 | } |
| 489 | 437 | ||
| 490 | /** | 438 | ResultCode KProcess::CreateThreadLocalRegion(VAddr* out) { |
| 491 | * Attempts to find a TLS page that contains a free slot for | 439 | KThreadLocalPage* tlp = nullptr; |
| 492 | * use by a thread. | 440 | VAddr tlr = 0; |
| 493 | * | ||
| 494 | * @returns If a page with an available slot is found, then an iterator | ||
| 495 | * pointing to the page is returned. Otherwise the end iterator | ||
| 496 | * is returned instead. | ||
| 497 | */ | ||
| 498 | static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) { | ||
| 499 | return std::find_if(tls_pages.begin(), tls_pages.end(), | ||
| 500 | [](const auto& page) { return page.HasAvailableSlots(); }); | ||
| 501 | } | ||
| 502 | 441 | ||
| 503 | VAddr KProcess::CreateTLSRegion() { | 442 | // See if we can get a region from a partially used TLP. |
| 504 | KScopedSchedulerLock lock(kernel); | 443 | { |
| 505 | if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)}; | 444 | KScopedSchedulerLock sl{kernel}; |
| 506 | tls_page_iter != tls_pages.cend()) { | ||
| 507 | return *tls_page_iter->ReserveSlot(); | ||
| 508 | } | ||
| 509 | 445 | ||
| 510 | Page* const tls_page_ptr{kernel.GetUserSlabHeapPages().Allocate()}; | 446 | if (auto it = partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) { |
| 511 | ASSERT(tls_page_ptr); | 447 | tlr = it->Reserve(); |
| 448 | ASSERT(tlr != 0); | ||
| 512 | 449 | ||
| 513 | const VAddr start{page_table->GetKernelMapRegionStart()}; | 450 | if (it->IsAllUsed()) { |
| 514 | const VAddr size{page_table->GetKernelMapRegionEnd() - start}; | 451 | tlp = std::addressof(*it); |
| 515 | const PAddr tls_map_addr{kernel.System().DeviceMemory().GetPhysicalAddr(tls_page_ptr)}; | 452 | partially_used_tlp_tree.erase(it); |
| 516 | const VAddr tls_page_addr{page_table | 453 | fully_used_tlp_tree.insert(*tlp); |
| 517 | ->AllocateAndMapMemory(1, PageSize, true, start, size / PageSize, | 454 | } |
| 518 | KMemoryState::ThreadLocal, | ||
| 519 | KMemoryPermission::UserReadWrite, | ||
| 520 | tls_map_addr) | ||
| 521 | .ValueOr(0)}; | ||
| 522 | 455 | ||
| 523 | ASSERT(tls_page_addr); | 456 | *out = tlr; |
| 457 | return ResultSuccess; | ||
| 458 | } | ||
| 459 | } | ||
| 524 | 460 | ||
| 525 | std::memset(tls_page_ptr, 0, PageSize); | 461 | // Allocate a new page. |
| 526 | tls_pages.emplace_back(tls_page_addr); | 462 | tlp = KThreadLocalPage::Allocate(kernel); |
| 463 | R_UNLESS(tlp != nullptr, ResultOutOfMemory); | ||
| 464 | auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(kernel, tlp); }); | ||
| 527 | 465 | ||
| 528 | const auto reserve_result{tls_pages.back().ReserveSlot()}; | 466 | // Initialize the new page. |
| 529 | ASSERT(reserve_result.has_value()); | 467 | R_TRY(tlp->Initialize(kernel, this)); |
| 468 | |||
| 469 | // Reserve a TLR. | ||
| 470 | tlr = tlp->Reserve(); | ||
| 471 | ASSERT(tlr != 0); | ||
| 472 | |||
| 473 | // Insert into our tree. | ||
| 474 | { | ||
| 475 | KScopedSchedulerLock sl{kernel}; | ||
| 476 | if (tlp->IsAllUsed()) { | ||
| 477 | fully_used_tlp_tree.insert(*tlp); | ||
| 478 | } else { | ||
| 479 | partially_used_tlp_tree.insert(*tlp); | ||
| 480 | } | ||
| 481 | } | ||
| 530 | 482 | ||
| 531 | return *reserve_result; | 483 | // We succeeded! |
| 484 | tlp_guard.Cancel(); | ||
| 485 | *out = tlr; | ||
| 486 | return ResultSuccess; | ||
| 532 | } | 487 | } |
| 533 | 488 | ||
| 534 | void KProcess::FreeTLSRegion(VAddr tls_address) { | 489 | ResultCode KProcess::DeleteThreadLocalRegion(VAddr addr) { |
| 535 | KScopedSchedulerLock lock(kernel); | 490 | KThreadLocalPage* page_to_free = nullptr; |
| 536 | const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE); | 491 | |
| 537 | auto iter = | 492 | // Release the region. |
| 538 | std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) { | 493 | { |
| 539 | return page.GetBaseAddress() == aligned_address; | 494 | KScopedSchedulerLock sl{kernel}; |
| 540 | }); | 495 | |
| 496 | // Try to find the page in the partially used list. | ||
| 497 | auto it = partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize)); | ||
| 498 | if (it == partially_used_tlp_tree.end()) { | ||
| 499 | // If we don't find it, it has to be in the fully used list. | ||
| 500 | it = fully_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize)); | ||
| 501 | R_UNLESS(it != fully_used_tlp_tree.end(), ResultInvalidAddress); | ||
| 502 | |||
| 503 | // Release the region. | ||
| 504 | it->Release(addr); | ||
| 505 | |||
| 506 | // Move the page out of the fully used list. | ||
| 507 | KThreadLocalPage* tlp = std::addressof(*it); | ||
| 508 | fully_used_tlp_tree.erase(it); | ||
| 509 | if (tlp->IsAllFree()) { | ||
| 510 | page_to_free = tlp; | ||
| 511 | } else { | ||
| 512 | partially_used_tlp_tree.insert(*tlp); | ||
| 513 | } | ||
| 514 | } else { | ||
| 515 | // Release the region. | ||
| 516 | it->Release(addr); | ||
| 517 | |||
| 518 | // Handle the all-free case. | ||
| 519 | KThreadLocalPage* tlp = std::addressof(*it); | ||
| 520 | if (tlp->IsAllFree()) { | ||
| 521 | partially_used_tlp_tree.erase(it); | ||
| 522 | page_to_free = tlp; | ||
| 523 | } | ||
| 524 | } | ||
| 525 | } | ||
| 526 | |||
| 527 | // If we should free the page it was in, do so. | ||
| 528 | if (page_to_free != nullptr) { | ||
| 529 | page_to_free->Finalize(); | ||
| 541 | 530 | ||
| 542 | // Something has gone very wrong if we're freeing a region | 531 | KThreadLocalPage::Free(kernel, page_to_free); |
| 543 | // with no actual page available. | 532 | } |
| 544 | ASSERT(iter != tls_pages.cend()); | ||
| 545 | 533 | ||
| 546 | iter->ReleaseSlot(tls_address); | 534 | return ResultSuccess; |
| 547 | } | 535 | } |
| 548 | 536 | ||
| 549 | void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { | 537 | void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { |
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index cf1b67428..5ed0f2d83 100644 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include "core/hle/kernel/k_condition_variable.h" | 15 | #include "core/hle/kernel/k_condition_variable.h" |
| 16 | #include "core/hle/kernel/k_handle_table.h" | 16 | #include "core/hle/kernel/k_handle_table.h" |
| 17 | #include "core/hle/kernel/k_synchronization_object.h" | 17 | #include "core/hle/kernel/k_synchronization_object.h" |
| 18 | #include "core/hle/kernel/k_thread_local_page.h" | ||
| 18 | #include "core/hle/kernel/k_worker_task.h" | 19 | #include "core/hle/kernel/k_worker_task.h" |
| 19 | #include "core/hle/kernel/process_capability.h" | 20 | #include "core/hle/kernel/process_capability.h" |
| 20 | #include "core/hle/kernel/slab_helpers.h" | 21 | #include "core/hle/kernel/slab_helpers.h" |
| @@ -362,10 +363,10 @@ public: | |||
| 362 | // Thread-local storage management | 363 | // Thread-local storage management |
| 363 | 364 | ||
| 364 | // Marks the next available region as used and returns the address of the slot. | 365 | // Marks the next available region as used and returns the address of the slot. |
| 365 | [[nodiscard]] VAddr CreateTLSRegion(); | 366 | [[nodiscard]] ResultCode CreateThreadLocalRegion(VAddr* out); |
| 366 | 367 | ||
| 367 | // Frees a used TLS slot identified by the given address | 368 | // Frees a used TLS slot identified by the given address |
| 368 | void FreeTLSRegion(VAddr tls_address); | 369 | ResultCode DeleteThreadLocalRegion(VAddr addr); |
| 369 | 370 | ||
| 370 | private: | 371 | private: |
| 371 | void PinThread(s32 core_id, KThread* thread) { | 372 | void PinThread(s32 core_id, KThread* thread) { |
| @@ -413,13 +414,6 @@ private: | |||
| 413 | /// The ideal CPU core for this process, threads are scheduled on this core by default. | 414 | /// The ideal CPU core for this process, threads are scheduled on this core by default. |
| 414 | u8 ideal_core = 0; | 415 | u8 ideal_core = 0; |
| 415 | 416 | ||
| 416 | /// The Thread Local Storage area is allocated as processes create threads, | ||
| 417 | /// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part | ||
| 418 | /// holds the TLS for a specific thread. This vector contains which parts are in use for each | ||
| 419 | /// page as a bitmask. | ||
| 420 | /// This vector will grow as more pages are allocated for new threads. | ||
| 421 | std::vector<TLSPage> tls_pages; | ||
| 422 | |||
| 423 | /// Contains the parsed process capability descriptors. | 417 | /// Contains the parsed process capability descriptors. |
| 424 | ProcessCapabilities capabilities; | 418 | ProcessCapabilities capabilities; |
| 425 | 419 | ||
| @@ -482,6 +476,12 @@ private: | |||
| 482 | KThread* exception_thread{}; | 476 | KThread* exception_thread{}; |
| 483 | 477 | ||
| 484 | KLightLock state_lock; | 478 | KLightLock state_lock; |
| 479 | |||
| 480 | using TLPTree = | ||
| 481 | Common::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>; | ||
| 482 | using TLPIterator = TLPTree::iterator; | ||
| 483 | TLPTree fully_used_tlp_tree; | ||
| 484 | TLPTree partially_used_tlp_tree; | ||
| 485 | }; | 485 | }; |
| 486 | 486 | ||
| 487 | } // namespace Kernel | 487 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_server_port.h b/src/core/hle/kernel/k_server_port.h index 6302d5e61..2185736be 100644 --- a/src/core/hle/kernel/k_server_port.h +++ b/src/core/hle/kernel/k_server_port.h | |||
| @@ -30,11 +30,11 @@ public: | |||
| 30 | 30 | ||
| 31 | /// Whether or not this server port has an HLE handler available. | 31 | /// Whether or not this server port has an HLE handler available. |
| 32 | bool HasSessionRequestHandler() const { | 32 | bool HasSessionRequestHandler() const { |
| 33 | return session_handler != nullptr; | 33 | return !session_handler.expired(); |
| 34 | } | 34 | } |
| 35 | 35 | ||
| 36 | /// Gets the HLE handler for this port. | 36 | /// Gets the HLE handler for this port. |
| 37 | SessionRequestHandlerPtr GetSessionRequestHandler() const { | 37 | SessionRequestHandlerWeakPtr GetSessionRequestHandler() const { |
| 38 | return session_handler; | 38 | return session_handler; |
| 39 | } | 39 | } |
| 40 | 40 | ||
| @@ -42,7 +42,7 @@ public: | |||
| 42 | * Sets the HLE handler template for the port. ServerSessions crated by connecting to this port | 42 | * Sets the HLE handler template for the port. ServerSessions crated by connecting to this port |
| 43 | * will inherit a reference to this handler. | 43 | * will inherit a reference to this handler. |
| 44 | */ | 44 | */ |
| 45 | void SetSessionHandler(SessionRequestHandlerPtr&& handler) { | 45 | void SetSessionHandler(SessionRequestHandlerWeakPtr&& handler) { |
| 46 | session_handler = std::move(handler); | 46 | session_handler = std::move(handler); |
| 47 | } | 47 | } |
| 48 | 48 | ||
| @@ -66,7 +66,7 @@ private: | |||
| 66 | void CleanupSessions(); | 66 | void CleanupSessions(); |
| 67 | 67 | ||
| 68 | SessionList session_list; | 68 | SessionList session_list; |
| 69 | SessionRequestHandlerPtr session_handler; | 69 | SessionRequestHandlerWeakPtr session_handler; |
| 70 | KPort* parent{}; | 70 | KPort* parent{}; |
| 71 | }; | 71 | }; |
| 72 | 72 | ||
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp index 4d94eb9cf..30c56ff29 100644 --- a/src/core/hle/kernel/k_server_session.cpp +++ b/src/core/hle/kernel/k_server_session.cpp | |||
| @@ -27,10 +27,7 @@ namespace Kernel { | |||
| 27 | 27 | ||
| 28 | KServerSession::KServerSession(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} | 28 | KServerSession::KServerSession(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} |
| 29 | 29 | ||
| 30 | KServerSession::~KServerSession() { | 30 | KServerSession::~KServerSession() = default; |
| 31 | // Ensure that the global list tracking server sessions does not hold on to a reference. | ||
| 32 | kernel.UnregisterServerSession(this); | ||
| 33 | } | ||
| 34 | 31 | ||
| 35 | void KServerSession::Initialize(KSession* parent_session_, std::string&& name_, | 32 | void KServerSession::Initialize(KSession* parent_session_, std::string&& name_, |
| 36 | std::shared_ptr<SessionRequestManager> manager_) { | 33 | std::shared_ptr<SessionRequestManager> manager_) { |
| @@ -49,6 +46,9 @@ void KServerSession::Destroy() { | |||
| 49 | parent->OnServerClosed(); | 46 | parent->OnServerClosed(); |
| 50 | 47 | ||
| 51 | parent->Close(); | 48 | parent->Close(); |
| 49 | |||
| 50 | // Release host emulation members. | ||
| 51 | manager.reset(); | ||
| 52 | } | 52 | } |
| 53 | 53 | ||
| 54 | void KServerSession::OnClientClosed() { | 54 | void KServerSession::OnClientClosed() { |
| @@ -98,7 +98,12 @@ ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& co | |||
| 98 | UNREACHABLE(); | 98 | UNREACHABLE(); |
| 99 | return ResultSuccess; // Ignore error if asserts are off | 99 | return ResultSuccess; // Ignore error if asserts are off |
| 100 | } | 100 | } |
| 101 | return manager->DomainHandler(object_id - 1)->HandleSyncRequest(*this, context); | 101 | if (auto strong_ptr = manager->DomainHandler(object_id - 1).lock()) { |
| 102 | return strong_ptr->HandleSyncRequest(*this, context); | ||
| 103 | } else { | ||
| 104 | UNREACHABLE(); | ||
| 105 | return ResultSuccess; | ||
| 106 | } | ||
| 102 | 107 | ||
| 103 | case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: { | 108 | case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: { |
| 104 | LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id); | 109 | LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id); |
diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h index 05c0bec9c..5690cc757 100644 --- a/src/core/hle/kernel/k_slab_heap.h +++ b/src/core/hle/kernel/k_slab_heap.h | |||
| @@ -16,39 +16,34 @@ class KernelCore; | |||
| 16 | 16 | ||
| 17 | namespace impl { | 17 | namespace impl { |
| 18 | 18 | ||
| 19 | class KSlabHeapImpl final { | 19 | class KSlabHeapImpl { |
| 20 | public: | ||
| 21 | YUZU_NON_COPYABLE(KSlabHeapImpl); | 20 | YUZU_NON_COPYABLE(KSlabHeapImpl); |
| 22 | YUZU_NON_MOVEABLE(KSlabHeapImpl); | 21 | YUZU_NON_MOVEABLE(KSlabHeapImpl); |
| 23 | 22 | ||
| 23 | public: | ||
| 24 | struct Node { | 24 | struct Node { |
| 25 | Node* next{}; | 25 | Node* next{}; |
| 26 | }; | 26 | }; |
| 27 | 27 | ||
| 28 | public: | ||
| 28 | constexpr KSlabHeapImpl() = default; | 29 | constexpr KSlabHeapImpl() = default; |
| 29 | constexpr ~KSlabHeapImpl() = default; | ||
| 30 | 30 | ||
| 31 | void Initialize(std::size_t size) { | 31 | void Initialize() { |
| 32 | ASSERT(head == nullptr); | 32 | ASSERT(m_head == nullptr); |
| 33 | obj_size = size; | ||
| 34 | } | ||
| 35 | |||
| 36 | constexpr std::size_t GetObjectSize() const { | ||
| 37 | return obj_size; | ||
| 38 | } | 33 | } |
| 39 | 34 | ||
| 40 | Node* GetHead() const { | 35 | Node* GetHead() const { |
| 41 | return head; | 36 | return m_head; |
| 42 | } | 37 | } |
| 43 | 38 | ||
| 44 | void* Allocate() { | 39 | void* Allocate() { |
| 45 | Node* ret = head.load(); | 40 | Node* ret = m_head.load(); |
| 46 | 41 | ||
| 47 | do { | 42 | do { |
| 48 | if (ret == nullptr) { | 43 | if (ret == nullptr) { |
| 49 | break; | 44 | break; |
| 50 | } | 45 | } |
| 51 | } while (!head.compare_exchange_weak(ret, ret->next)); | 46 | } while (!m_head.compare_exchange_weak(ret, ret->next)); |
| 52 | 47 | ||
| 53 | return ret; | 48 | return ret; |
| 54 | } | 49 | } |
| @@ -56,170 +51,157 @@ public: | |||
| 56 | void Free(void* obj) { | 51 | void Free(void* obj) { |
| 57 | Node* node = static_cast<Node*>(obj); | 52 | Node* node = static_cast<Node*>(obj); |
| 58 | 53 | ||
| 59 | Node* cur_head = head.load(); | 54 | Node* cur_head = m_head.load(); |
| 60 | do { | 55 | do { |
| 61 | node->next = cur_head; | 56 | node->next = cur_head; |
| 62 | } while (!head.compare_exchange_weak(cur_head, node)); | 57 | } while (!m_head.compare_exchange_weak(cur_head, node)); |
| 63 | } | 58 | } |
| 64 | 59 | ||
| 65 | private: | 60 | private: |
| 66 | std::atomic<Node*> head{}; | 61 | std::atomic<Node*> m_head{}; |
| 67 | std::size_t obj_size{}; | ||
| 68 | }; | 62 | }; |
| 69 | 63 | ||
| 70 | } // namespace impl | 64 | } // namespace impl |
| 71 | 65 | ||
| 72 | class KSlabHeapBase { | 66 | template <bool SupportDynamicExpansion> |
| 73 | public: | 67 | class KSlabHeapBase : protected impl::KSlabHeapImpl { |
| 74 | YUZU_NON_COPYABLE(KSlabHeapBase); | 68 | YUZU_NON_COPYABLE(KSlabHeapBase); |
| 75 | YUZU_NON_MOVEABLE(KSlabHeapBase); | 69 | YUZU_NON_MOVEABLE(KSlabHeapBase); |
| 76 | 70 | ||
| 77 | constexpr KSlabHeapBase() = default; | 71 | private: |
| 78 | constexpr ~KSlabHeapBase() = default; | 72 | size_t m_obj_size{}; |
| 73 | uintptr_t m_peak{}; | ||
| 74 | uintptr_t m_start{}; | ||
| 75 | uintptr_t m_end{}; | ||
| 79 | 76 | ||
| 80 | constexpr bool Contains(uintptr_t addr) const { | 77 | private: |
| 81 | return start <= addr && addr < end; | 78 | void UpdatePeakImpl(uintptr_t obj) { |
| 82 | } | 79 | static_assert(std::atomic_ref<uintptr_t>::is_always_lock_free); |
| 80 | std::atomic_ref<uintptr_t> peak_ref(m_peak); | ||
| 83 | 81 | ||
| 84 | constexpr std::size_t GetSlabHeapSize() const { | 82 | const uintptr_t alloc_peak = obj + this->GetObjectSize(); |
| 85 | return (end - start) / GetObjectSize(); | 83 | uintptr_t cur_peak = m_peak; |
| 84 | do { | ||
| 85 | if (alloc_peak <= cur_peak) { | ||
| 86 | break; | ||
| 87 | } | ||
| 88 | } while (!peak_ref.compare_exchange_strong(cur_peak, alloc_peak)); | ||
| 86 | } | 89 | } |
| 87 | 90 | ||
| 88 | constexpr std::size_t GetObjectSize() const { | 91 | public: |
| 89 | return impl.GetObjectSize(); | 92 | constexpr KSlabHeapBase() = default; |
| 90 | } | ||
| 91 | 93 | ||
| 92 | constexpr uintptr_t GetSlabHeapAddress() const { | 94 | bool Contains(uintptr_t address) const { |
| 93 | return start; | 95 | return m_start <= address && address < m_end; |
| 94 | } | 96 | } |
| 95 | 97 | ||
| 96 | std::size_t GetObjectIndexImpl(const void* obj) const { | 98 | void Initialize(size_t obj_size, void* memory, size_t memory_size) { |
| 97 | return (reinterpret_cast<uintptr_t>(obj) - start) / GetObjectSize(); | 99 | // Ensure we don't initialize a slab using null memory. |
| 100 | ASSERT(memory != nullptr); | ||
| 101 | |||
| 102 | // Set our object size. | ||
| 103 | m_obj_size = obj_size; | ||
| 104 | |||
| 105 | // Initialize the base allocator. | ||
| 106 | KSlabHeapImpl::Initialize(); | ||
| 107 | |||
| 108 | // Set our tracking variables. | ||
| 109 | const size_t num_obj = (memory_size / obj_size); | ||
| 110 | m_start = reinterpret_cast<uintptr_t>(memory); | ||
| 111 | m_end = m_start + num_obj * obj_size; | ||
| 112 | m_peak = m_start; | ||
| 113 | |||
| 114 | // Free the objects. | ||
| 115 | u8* cur = reinterpret_cast<u8*>(m_end); | ||
| 116 | |||
| 117 | for (size_t i = 0; i < num_obj; i++) { | ||
| 118 | cur -= obj_size; | ||
| 119 | KSlabHeapImpl::Free(cur); | ||
| 120 | } | ||
| 98 | } | 121 | } |
| 99 | 122 | ||
| 100 | std::size_t GetPeakIndex() const { | 123 | size_t GetSlabHeapSize() const { |
| 101 | return GetObjectIndexImpl(reinterpret_cast<const void*>(peak)); | 124 | return (m_end - m_start) / this->GetObjectSize(); |
| 102 | } | 125 | } |
| 103 | 126 | ||
| 104 | void* AllocateImpl() { | 127 | size_t GetObjectSize() const { |
| 105 | return impl.Allocate(); | 128 | return m_obj_size; |
| 106 | } | 129 | } |
| 107 | 130 | ||
| 108 | void FreeImpl(void* obj) { | 131 | void* Allocate() { |
| 109 | // Don't allow freeing an object that wasn't allocated from this heap | 132 | void* obj = KSlabHeapImpl::Allocate(); |
| 110 | ASSERT(Contains(reinterpret_cast<uintptr_t>(obj))); | ||
| 111 | 133 | ||
| 112 | impl.Free(obj); | 134 | return obj; |
| 113 | } | 135 | } |
| 114 | 136 | ||
| 115 | void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) { | 137 | void Free(void* obj) { |
| 116 | // Ensure we don't initialize a slab using null memory | 138 | // Don't allow freeing an object that wasn't allocated from this heap. |
| 117 | ASSERT(memory != nullptr); | 139 | const bool contained = this->Contains(reinterpret_cast<uintptr_t>(obj)); |
| 118 | 140 | ASSERT(contained); | |
| 119 | // Initialize the base allocator | 141 | KSlabHeapImpl::Free(obj); |
| 120 | impl.Initialize(obj_size); | 142 | } |
| 121 | 143 | ||
| 122 | // Set our tracking variables | 144 | size_t GetObjectIndex(const void* obj) const { |
| 123 | const std::size_t num_obj = (memory_size / obj_size); | 145 | if constexpr (SupportDynamicExpansion) { |
| 124 | start = reinterpret_cast<uintptr_t>(memory); | 146 | if (!this->Contains(reinterpret_cast<uintptr_t>(obj))) { |
| 125 | end = start + num_obj * obj_size; | 147 | return std::numeric_limits<size_t>::max(); |
| 126 | peak = start; | 148 | } |
| 149 | } | ||
| 127 | 150 | ||
| 128 | // Free the objects | 151 | return (reinterpret_cast<uintptr_t>(obj) - m_start) / this->GetObjectSize(); |
| 129 | u8* cur = reinterpret_cast<u8*>(end); | 152 | } |
| 130 | 153 | ||
| 131 | for (std::size_t i{}; i < num_obj; i++) { | 154 | size_t GetPeakIndex() const { |
| 132 | cur -= obj_size; | 155 | return this->GetObjectIndex(reinterpret_cast<const void*>(m_peak)); |
| 133 | impl.Free(cur); | ||
| 134 | } | ||
| 135 | } | 156 | } |
| 136 | 157 | ||
| 137 | private: | 158 | uintptr_t GetSlabHeapAddress() const { |
| 138 | using Impl = impl::KSlabHeapImpl; | 159 | return m_start; |
| 160 | } | ||
| 139 | 161 | ||
| 140 | Impl impl; | 162 | size_t GetNumRemaining() const { |
| 141 | uintptr_t peak{}; | 163 | // Only calculate the number of remaining objects under debug configuration. |
| 142 | uintptr_t start{}; | 164 | return 0; |
| 143 | uintptr_t end{}; | 165 | } |
| 144 | }; | 166 | }; |
| 145 | 167 | ||
| 146 | template <typename T> | 168 | template <typename T> |
| 147 | class KSlabHeap final : public KSlabHeapBase { | 169 | class KSlabHeap final : public KSlabHeapBase<false> { |
| 148 | public: | 170 | private: |
| 149 | enum class AllocationType { | 171 | using BaseHeap = KSlabHeapBase<false>; |
| 150 | Host, | ||
| 151 | Guest, | ||
| 152 | }; | ||
| 153 | 172 | ||
| 154 | explicit constexpr KSlabHeap(AllocationType allocation_type_ = AllocationType::Host) | 173 | public: |
| 155 | : KSlabHeapBase(), allocation_type{allocation_type_} {} | 174 | constexpr KSlabHeap() = default; |
| 156 | 175 | ||
| 157 | void Initialize(void* memory, std::size_t memory_size) { | 176 | void Initialize(void* memory, size_t memory_size) { |
| 158 | if (allocation_type == AllocationType::Guest) { | 177 | BaseHeap::Initialize(sizeof(T), memory, memory_size); |
| 159 | InitializeImpl(sizeof(T), memory, memory_size); | ||
| 160 | } | ||
| 161 | } | 178 | } |
| 162 | 179 | ||
| 163 | T* Allocate() { | 180 | T* Allocate() { |
| 164 | switch (allocation_type) { | 181 | T* obj = static_cast<T*>(BaseHeap::Allocate()); |
| 165 | case AllocationType::Host: | ||
| 166 | // Fallback for cases where we do not yet support allocating guest memory from the slab | ||
| 167 | // heap, such as for kernel memory regions. | ||
| 168 | return new T; | ||
| 169 | |||
| 170 | case AllocationType::Guest: | ||
| 171 | T* obj = static_cast<T*>(AllocateImpl()); | ||
| 172 | if (obj != nullptr) { | ||
| 173 | new (obj) T(); | ||
| 174 | } | ||
| 175 | return obj; | ||
| 176 | } | ||
| 177 | 182 | ||
| 178 | UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type); | 183 | if (obj != nullptr) [[likely]] { |
| 179 | return nullptr; | 184 | std::construct_at(obj); |
| 185 | } | ||
| 186 | return obj; | ||
| 180 | } | 187 | } |
| 181 | 188 | ||
| 182 | T* AllocateWithKernel(KernelCore& kernel) { | 189 | T* Allocate(KernelCore& kernel) { |
| 183 | switch (allocation_type) { | 190 | T* obj = static_cast<T*>(BaseHeap::Allocate()); |
| 184 | case AllocationType::Host: | ||
| 185 | // Fallback for cases where we do not yet support allocating guest memory from the slab | ||
| 186 | // heap, such as for kernel memory regions. | ||
| 187 | return new T(kernel); | ||
| 188 | 191 | ||
| 189 | case AllocationType::Guest: | 192 | if (obj != nullptr) [[likely]] { |
| 190 | T* obj = static_cast<T*>(AllocateImpl()); | 193 | std::construct_at(obj, kernel); |
| 191 | if (obj != nullptr) { | ||
| 192 | new (obj) T(kernel); | ||
| 193 | } | ||
| 194 | return obj; | ||
| 195 | } | 194 | } |
| 196 | 195 | return obj; | |
| 197 | UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type); | ||
| 198 | return nullptr; | ||
| 199 | } | 196 | } |
| 200 | 197 | ||
| 201 | void Free(T* obj) { | 198 | void Free(T* obj) { |
| 202 | switch (allocation_type) { | 199 | BaseHeap::Free(obj); |
| 203 | case AllocationType::Host: | ||
| 204 | // Fallback for cases where we do not yet support allocating guest memory from the slab | ||
| 205 | // heap, such as for kernel memory regions. | ||
| 206 | delete obj; | ||
| 207 | return; | ||
| 208 | |||
| 209 | case AllocationType::Guest: | ||
| 210 | FreeImpl(obj); | ||
| 211 | return; | ||
| 212 | } | ||
| 213 | |||
| 214 | UNREACHABLE_MSG("Invalid AllocationType {}", allocation_type); | ||
| 215 | } | 200 | } |
| 216 | 201 | ||
| 217 | constexpr std::size_t GetObjectIndex(const T* obj) const { | 202 | size_t GetObjectIndex(const T* obj) const { |
| 218 | return GetObjectIndexImpl(obj); | 203 | return BaseHeap::GetObjectIndex(obj); |
| 219 | } | 204 | } |
| 220 | |||
| 221 | private: | ||
| 222 | const AllocationType allocation_type; | ||
| 223 | }; | 205 | }; |
| 224 | 206 | ||
| 225 | } // namespace Kernel | 207 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index de3ffe0c7..ba7f72c6b 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp | |||
| @@ -210,7 +210,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s | |||
| 210 | if (owner != nullptr) { | 210 | if (owner != nullptr) { |
| 211 | // Setup the TLS, if needed. | 211 | // Setup the TLS, if needed. |
| 212 | if (type == ThreadType::User) { | 212 | if (type == ThreadType::User) { |
| 213 | tls_address = owner->CreateTLSRegion(); | 213 | R_TRY(owner->CreateThreadLocalRegion(std::addressof(tls_address))); |
| 214 | } | 214 | } |
| 215 | 215 | ||
| 216 | parent = owner; | 216 | parent = owner; |
| @@ -305,7 +305,7 @@ void KThread::Finalize() { | |||
| 305 | 305 | ||
| 306 | // If the thread has a local region, delete it. | 306 | // If the thread has a local region, delete it. |
| 307 | if (tls_address != 0) { | 307 | if (tls_address != 0) { |
| 308 | parent->FreeTLSRegion(tls_address); | 308 | ASSERT(parent->DeleteThreadLocalRegion(tls_address).IsSuccess()); |
| 309 | } | 309 | } |
| 310 | 310 | ||
| 311 | // Release any waiters. | 311 | // Release any waiters. |
| @@ -326,6 +326,9 @@ void KThread::Finalize() { | |||
| 326 | } | 326 | } |
| 327 | } | 327 | } |
| 328 | 328 | ||
| 329 | // Release host emulation members. | ||
| 330 | host_context.reset(); | ||
| 331 | |||
| 329 | // Perform inherited finalization. | 332 | // Perform inherited finalization. |
| 330 | KSynchronizationObject::Finalize(); | 333 | KSynchronizationObject::Finalize(); |
| 331 | } | 334 | } |
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index d058db62c..f46db7298 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h | |||
| @@ -656,7 +656,7 @@ private: | |||
| 656 | static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles)); | 656 | static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles)); |
| 657 | 657 | ||
| 658 | struct ConditionVariableComparator { | 658 | struct ConditionVariableComparator { |
| 659 | struct LightCompareType { | 659 | struct RedBlackKeyType { |
| 660 | u64 cv_key{}; | 660 | u64 cv_key{}; |
| 661 | s32 priority{}; | 661 | s32 priority{}; |
| 662 | 662 | ||
| @@ -672,8 +672,8 @@ private: | |||
| 672 | template <typename T> | 672 | template <typename T> |
| 673 | requires( | 673 | requires( |
| 674 | std::same_as<T, KThread> || | 674 | std::same_as<T, KThread> || |
| 675 | std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs, | 675 | std::same_as<T, RedBlackKeyType>) static constexpr int Compare(const T& lhs, |
| 676 | const KThread& rhs) { | 676 | const KThread& rhs) { |
| 677 | const u64 l_key = lhs.GetConditionVariableKey(); | 677 | const u64 l_key = lhs.GetConditionVariableKey(); |
| 678 | const u64 r_key = rhs.GetConditionVariableKey(); | 678 | const u64 r_key = rhs.GetConditionVariableKey(); |
| 679 | 679 | ||
diff --git a/src/core/hle/kernel/k_thread_local_page.cpp b/src/core/hle/kernel/k_thread_local_page.cpp new file mode 100644 index 000000000..4653c29f6 --- /dev/null +++ b/src/core/hle/kernel/k_thread_local_page.cpp | |||
| @@ -0,0 +1,65 @@ | |||
| 1 | // Copyright 2022 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "common/scope_exit.h" | ||
| 6 | #include "core/hle/kernel/k_memory_block.h" | ||
| 7 | #include "core/hle/kernel/k_page_table.h" | ||
| 8 | #include "core/hle/kernel/k_process.h" | ||
| 9 | #include "core/hle/kernel/k_thread_local_page.h" | ||
| 10 | #include "core/hle/kernel/kernel.h" | ||
| 11 | |||
| 12 | namespace Kernel { | ||
| 13 | |||
| 14 | ResultCode KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) { | ||
| 15 | // Set that this process owns us. | ||
| 16 | m_owner = process; | ||
| 17 | m_kernel = &kernel; | ||
| 18 | |||
| 19 | // Allocate a new page. | ||
| 20 | KPageBuffer* page_buf = KPageBuffer::Allocate(kernel); | ||
| 21 | R_UNLESS(page_buf != nullptr, ResultOutOfMemory); | ||
| 22 | auto page_buf_guard = SCOPE_GUARD({ KPageBuffer::Free(kernel, page_buf); }); | ||
| 23 | |||
| 24 | // Map the address in. | ||
| 25 | const auto phys_addr = kernel.System().DeviceMemory().GetPhysicalAddr(page_buf); | ||
| 26 | R_TRY(m_owner->PageTable().MapPages(std::addressof(m_virt_addr), 1, PageSize, phys_addr, | ||
| 27 | KMemoryState::ThreadLocal, | ||
| 28 | KMemoryPermission::UserReadWrite)); | ||
| 29 | |||
| 30 | // We succeeded. | ||
| 31 | page_buf_guard.Cancel(); | ||
| 32 | |||
| 33 | return ResultSuccess; | ||
| 34 | } | ||
| 35 | |||
| 36 | ResultCode KThreadLocalPage::Finalize() { | ||
| 37 | // Get the physical address of the page. | ||
| 38 | const PAddr phys_addr = m_owner->PageTable().GetPhysicalAddr(m_virt_addr); | ||
| 39 | ASSERT(phys_addr); | ||
| 40 | |||
| 41 | // Unmap the page. | ||
| 42 | R_TRY(m_owner->PageTable().UnmapPages(this->GetAddress(), 1, KMemoryState::ThreadLocal)); | ||
| 43 | |||
| 44 | // Free the page. | ||
| 45 | KPageBuffer::Free(*m_kernel, KPageBuffer::FromPhysicalAddress(m_kernel->System(), phys_addr)); | ||
| 46 | |||
| 47 | return ResultSuccess; | ||
| 48 | } | ||
| 49 | |||
| 50 | VAddr KThreadLocalPage::Reserve() { | ||
| 51 | for (size_t i = 0; i < m_is_region_free.size(); i++) { | ||
| 52 | if (m_is_region_free[i]) { | ||
| 53 | m_is_region_free[i] = false; | ||
| 54 | return this->GetRegionAddress(i); | ||
| 55 | } | ||
| 56 | } | ||
| 57 | |||
| 58 | return 0; | ||
| 59 | } | ||
| 60 | |||
| 61 | void KThreadLocalPage::Release(VAddr addr) { | ||
| 62 | m_is_region_free[this->GetRegionIndex(addr)] = true; | ||
| 63 | } | ||
| 64 | |||
| 65 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/k_thread_local_page.h b/src/core/hle/kernel/k_thread_local_page.h new file mode 100644 index 000000000..658c67e94 --- /dev/null +++ b/src/core/hle/kernel/k_thread_local_page.h | |||
| @@ -0,0 +1,112 @@ | |||
| 1 | // Copyright 2022 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <algorithm> | ||
| 8 | #include <array> | ||
| 9 | |||
| 10 | #include "common/alignment.h" | ||
| 11 | #include "common/assert.h" | ||
| 12 | #include "common/common_types.h" | ||
| 13 | #include "common/intrusive_red_black_tree.h" | ||
| 14 | #include "core/hle/kernel/k_page_buffer.h" | ||
| 15 | #include "core/hle/kernel/memory_types.h" | ||
| 16 | #include "core/hle/kernel/slab_helpers.h" | ||
| 17 | #include "core/hle/result.h" | ||
| 18 | |||
| 19 | namespace Kernel { | ||
| 20 | |||
| 21 | class KernelCore; | ||
| 22 | class KProcess; | ||
| 23 | |||
| 24 | class KThreadLocalPage final : public Common::IntrusiveRedBlackTreeBaseNode<KThreadLocalPage>, | ||
| 25 | public KSlabAllocated<KThreadLocalPage> { | ||
| 26 | public: | ||
| 27 | static constexpr size_t RegionsPerPage = PageSize / Svc::ThreadLocalRegionSize; | ||
| 28 | static_assert(RegionsPerPage > 0); | ||
| 29 | |||
| 30 | public: | ||
| 31 | constexpr explicit KThreadLocalPage(VAddr addr = {}) : m_virt_addr(addr) { | ||
| 32 | m_is_region_free.fill(true); | ||
| 33 | } | ||
| 34 | |||
| 35 | constexpr VAddr GetAddress() const { | ||
| 36 | return m_virt_addr; | ||
| 37 | } | ||
| 38 | |||
| 39 | ResultCode Initialize(KernelCore& kernel, KProcess* process); | ||
| 40 | ResultCode Finalize(); | ||
| 41 | |||
| 42 | VAddr Reserve(); | ||
| 43 | void Release(VAddr addr); | ||
| 44 | |||
| 45 | bool IsAllUsed() const { | ||
| 46 | return std::ranges::all_of(m_is_region_free.begin(), m_is_region_free.end(), | ||
| 47 | [](bool is_free) { return !is_free; }); | ||
| 48 | } | ||
| 49 | |||
| 50 | bool IsAllFree() const { | ||
| 51 | return std::ranges::all_of(m_is_region_free.begin(), m_is_region_free.end(), | ||
| 52 | [](bool is_free) { return is_free; }); | ||
| 53 | } | ||
| 54 | |||
| 55 | bool IsAnyUsed() const { | ||
| 56 | return !this->IsAllFree(); | ||
| 57 | } | ||
| 58 | |||
| 59 | bool IsAnyFree() const { | ||
| 60 | return !this->IsAllUsed(); | ||
| 61 | } | ||
| 62 | |||
| 63 | public: | ||
| 64 | using RedBlackKeyType = VAddr; | ||
| 65 | |||
| 66 | static constexpr RedBlackKeyType GetRedBlackKey(const RedBlackKeyType& v) { | ||
| 67 | return v; | ||
| 68 | } | ||
| 69 | static constexpr RedBlackKeyType GetRedBlackKey(const KThreadLocalPage& v) { | ||
| 70 | return v.GetAddress(); | ||
| 71 | } | ||
| 72 | |||
| 73 | template <typename T> | ||
| 74 | requires(std::same_as<T, KThreadLocalPage> || | ||
| 75 | std::same_as<T, RedBlackKeyType>) static constexpr int Compare(const T& lhs, | ||
| 76 | const KThreadLocalPage& | ||
| 77 | rhs) { | ||
| 78 | const VAddr lval = GetRedBlackKey(lhs); | ||
| 79 | const VAddr rval = GetRedBlackKey(rhs); | ||
| 80 | |||
| 81 | if (lval < rval) { | ||
| 82 | return -1; | ||
| 83 | } else if (lval == rval) { | ||
| 84 | return 0; | ||
| 85 | } else { | ||
| 86 | return 1; | ||
| 87 | } | ||
| 88 | } | ||
| 89 | |||
| 90 | private: | ||
| 91 | constexpr VAddr GetRegionAddress(size_t i) const { | ||
| 92 | return this->GetAddress() + i * Svc::ThreadLocalRegionSize; | ||
| 93 | } | ||
| 94 | |||
| 95 | constexpr bool Contains(VAddr addr) const { | ||
| 96 | return this->GetAddress() <= addr && addr < this->GetAddress() + PageSize; | ||
| 97 | } | ||
| 98 | |||
| 99 | constexpr size_t GetRegionIndex(VAddr addr) const { | ||
| 100 | ASSERT(Common::IsAligned(addr, Svc::ThreadLocalRegionSize)); | ||
| 101 | ASSERT(this->Contains(addr)); | ||
| 102 | return (addr - this->GetAddress()) / Svc::ThreadLocalRegionSize; | ||
| 103 | } | ||
| 104 | |||
| 105 | private: | ||
| 106 | VAddr m_virt_addr{}; | ||
| 107 | KProcess* m_owner{}; | ||
| 108 | KernelCore* m_kernel{}; | ||
| 109 | std::array<bool, RegionsPerPage> m_is_region_free{}; | ||
| 110 | }; | ||
| 111 | |||
| 112 | } // namespace Kernel | ||
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 71bd466cf..f9828bc43 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -52,7 +52,7 @@ namespace Kernel { | |||
| 52 | 52 | ||
| 53 | struct KernelCore::Impl { | 53 | struct KernelCore::Impl { |
| 54 | explicit Impl(Core::System& system_, KernelCore& kernel_) | 54 | explicit Impl(Core::System& system_, KernelCore& kernel_) |
| 55 | : time_manager{system_}, object_list_container{kernel_}, | 55 | : time_manager{system_}, |
| 56 | service_threads_manager{1, "yuzu:ServiceThreadsManager"}, system{system_} {} | 56 | service_threads_manager{1, "yuzu:ServiceThreadsManager"}, system{system_} {} |
| 57 | 57 | ||
| 58 | void SetMulticore(bool is_multi) { | 58 | void SetMulticore(bool is_multi) { |
| @@ -60,6 +60,7 @@ struct KernelCore::Impl { | |||
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | void Initialize(KernelCore& kernel) { | 62 | void Initialize(KernelCore& kernel) { |
| 63 | global_object_list_container = std::make_unique<KAutoObjectWithListContainer>(kernel); | ||
| 63 | global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel); | 64 | global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel); |
| 64 | global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel); | 65 | global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel); |
| 65 | global_handle_table->Initialize(KHandleTable::MaxTableSize); | 66 | global_handle_table->Initialize(KHandleTable::MaxTableSize); |
| @@ -76,7 +77,7 @@ struct KernelCore::Impl { | |||
| 76 | // Initialize kernel memory and resources. | 77 | // Initialize kernel memory and resources. |
| 77 | InitializeSystemResourceLimit(kernel, system.CoreTiming()); | 78 | InitializeSystemResourceLimit(kernel, system.CoreTiming()); |
| 78 | InitializeMemoryLayout(); | 79 | InitializeMemoryLayout(); |
| 79 | InitializePageSlab(); | 80 | Init::InitializeKPageBufferSlabHeap(system); |
| 80 | InitializeSchedulers(); | 81 | InitializeSchedulers(); |
| 81 | InitializeSuspendThreads(); | 82 | InitializeSuspendThreads(); |
| 82 | InitializePreemption(kernel); | 83 | InitializePreemption(kernel); |
| @@ -107,19 +108,6 @@ struct KernelCore::Impl { | |||
| 107 | for (auto* server_port : server_ports_) { | 108 | for (auto* server_port : server_ports_) { |
| 108 | server_port->Close(); | 109 | server_port->Close(); |
| 109 | } | 110 | } |
| 110 | // Close all open server sessions. | ||
| 111 | std::unordered_set<KServerSession*> server_sessions_; | ||
| 112 | { | ||
| 113 | std::lock_guard lk(server_sessions_lock); | ||
| 114 | server_sessions_ = server_sessions; | ||
| 115 | server_sessions.clear(); | ||
| 116 | } | ||
| 117 | for (auto* server_session : server_sessions_) { | ||
| 118 | server_session->Close(); | ||
| 119 | } | ||
| 120 | |||
| 121 | // Ensure that the object list container is finalized and properly shutdown. | ||
| 122 | object_list_container.Finalize(); | ||
| 123 | 111 | ||
| 124 | // Ensures all service threads gracefully shutdown. | 112 | // Ensures all service threads gracefully shutdown. |
| 125 | ClearServiceThreads(); | 113 | ClearServiceThreads(); |
| @@ -194,11 +182,15 @@ struct KernelCore::Impl { | |||
| 194 | { | 182 | { |
| 195 | std::lock_guard lk(registered_objects_lock); | 183 | std::lock_guard lk(registered_objects_lock); |
| 196 | if (registered_objects.size()) { | 184 | if (registered_objects.size()) { |
| 197 | LOG_WARNING(Kernel, "{} kernel objects were dangling on shutdown!", | 185 | LOG_DEBUG(Kernel, "{} kernel objects were dangling on shutdown!", |
| 198 | registered_objects.size()); | 186 | registered_objects.size()); |
| 199 | registered_objects.clear(); | 187 | registered_objects.clear(); |
| 200 | } | 188 | } |
| 201 | } | 189 | } |
| 190 | |||
| 191 | // Ensure that the object list container is finalized and properly shutdown. | ||
| 192 | global_object_list_container->Finalize(); | ||
| 193 | global_object_list_container.reset(); | ||
| 202 | } | 194 | } |
| 203 | 195 | ||
| 204 | void InitializePhysicalCores() { | 196 | void InitializePhysicalCores() { |
| @@ -291,15 +283,16 @@ struct KernelCore::Impl { | |||
| 291 | 283 | ||
| 292 | // Gets the dummy KThread for the caller, allocating a new one if this is the first time | 284 | // Gets the dummy KThread for the caller, allocating a new one if this is the first time |
| 293 | KThread* GetHostDummyThread() { | 285 | KThread* GetHostDummyThread() { |
| 294 | auto make_thread = [this]() { | 286 | auto initialize = [this](KThread* thread) { |
| 295 | KThread* thread = KThread::Create(system.Kernel()); | ||
| 296 | ASSERT(KThread::InitializeDummyThread(thread).IsSuccess()); | 287 | ASSERT(KThread::InitializeDummyThread(thread).IsSuccess()); |
| 297 | thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId())); | 288 | thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId())); |
| 298 | return thread; | 289 | return thread; |
| 299 | }; | 290 | }; |
| 300 | 291 | ||
| 301 | thread_local KThread* saved_thread = make_thread(); | 292 | thread_local auto raw_thread = KThread(system.Kernel()); |
| 302 | return saved_thread; | 293 | thread_local auto thread = initialize(&raw_thread); |
| 294 | |||
| 295 | return thread; | ||
| 303 | } | 296 | } |
| 304 | 297 | ||
| 305 | /// Registers a CPU core thread by allocating a host thread ID for it | 298 | /// Registers a CPU core thread by allocating a host thread ID for it |
| @@ -660,22 +653,6 @@ struct KernelCore::Impl { | |||
| 660 | time_phys_addr, time_size, "Time:SharedMemory"); | 653 | time_phys_addr, time_size, "Time:SharedMemory"); |
| 661 | } | 654 | } |
| 662 | 655 | ||
| 663 | void InitializePageSlab() { | ||
| 664 | // Allocate slab heaps | ||
| 665 | user_slab_heap_pages = | ||
| 666 | std::make_unique<KSlabHeap<Page>>(KSlabHeap<Page>::AllocationType::Guest); | ||
| 667 | |||
| 668 | // TODO(ameerj): This should be derived, not hardcoded within the kernel | ||
| 669 | constexpr u64 user_slab_heap_size{0x3de000}; | ||
| 670 | // Reserve slab heaps | ||
| 671 | ASSERT( | ||
| 672 | system_resource_limit->Reserve(LimitableResource::PhysicalMemory, user_slab_heap_size)); | ||
| 673 | // Initialize slab heap | ||
| 674 | user_slab_heap_pages->Initialize( | ||
| 675 | system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase), | ||
| 676 | user_slab_heap_size); | ||
| 677 | } | ||
| 678 | |||
| 679 | KClientPort* CreateNamedServicePort(std::string name) { | 656 | KClientPort* CreateNamedServicePort(std::string name) { |
| 680 | auto search = service_interface_factory.find(name); | 657 | auto search = service_interface_factory.find(name); |
| 681 | if (search == service_interface_factory.end()) { | 658 | if (search == service_interface_factory.end()) { |
| @@ -713,7 +690,6 @@ struct KernelCore::Impl { | |||
| 713 | } | 690 | } |
| 714 | 691 | ||
| 715 | std::mutex server_ports_lock; | 692 | std::mutex server_ports_lock; |
| 716 | std::mutex server_sessions_lock; | ||
| 717 | std::mutex registered_objects_lock; | 693 | std::mutex registered_objects_lock; |
| 718 | std::mutex registered_in_use_objects_lock; | 694 | std::mutex registered_in_use_objects_lock; |
| 719 | 695 | ||
| @@ -737,14 +713,13 @@ struct KernelCore::Impl { | |||
| 737 | // stores all the objects in place. | 713 | // stores all the objects in place. |
| 738 | std::unique_ptr<KHandleTable> global_handle_table; | 714 | std::unique_ptr<KHandleTable> global_handle_table; |
| 739 | 715 | ||
| 740 | KAutoObjectWithListContainer object_list_container; | 716 | std::unique_ptr<KAutoObjectWithListContainer> global_object_list_container; |
| 741 | 717 | ||
| 742 | /// Map of named ports managed by the kernel, which can be retrieved using | 718 | /// Map of named ports managed by the kernel, which can be retrieved using |
| 743 | /// the ConnectToPort SVC. | 719 | /// the ConnectToPort SVC. |
| 744 | std::unordered_map<std::string, ServiceInterfaceFactory> service_interface_factory; | 720 | std::unordered_map<std::string, ServiceInterfaceFactory> service_interface_factory; |
| 745 | NamedPortTable named_ports; | 721 | NamedPortTable named_ports; |
| 746 | std::unordered_set<KServerPort*> server_ports; | 722 | std::unordered_set<KServerPort*> server_ports; |
| 747 | std::unordered_set<KServerSession*> server_sessions; | ||
| 748 | std::unordered_set<KAutoObject*> registered_objects; | 723 | std::unordered_set<KAutoObject*> registered_objects; |
| 749 | std::unordered_set<KAutoObject*> registered_in_use_objects; | 724 | std::unordered_set<KAutoObject*> registered_in_use_objects; |
| 750 | 725 | ||
| @@ -756,7 +731,6 @@ struct KernelCore::Impl { | |||
| 756 | 731 | ||
| 757 | // Kernel memory management | 732 | // Kernel memory management |
| 758 | std::unique_ptr<KMemoryManager> memory_manager; | 733 | std::unique_ptr<KMemoryManager> memory_manager; |
| 759 | std::unique_ptr<KSlabHeap<Page>> user_slab_heap_pages; | ||
| 760 | 734 | ||
| 761 | // Shared memory for services | 735 | // Shared memory for services |
| 762 | Kernel::KSharedMemory* hid_shared_mem{}; | 736 | Kernel::KSharedMemory* hid_shared_mem{}; |
| @@ -915,11 +889,11 @@ const Core::ExclusiveMonitor& KernelCore::GetExclusiveMonitor() const { | |||
| 915 | } | 889 | } |
| 916 | 890 | ||
| 917 | KAutoObjectWithListContainer& KernelCore::ObjectListContainer() { | 891 | KAutoObjectWithListContainer& KernelCore::ObjectListContainer() { |
| 918 | return impl->object_list_container; | 892 | return *impl->global_object_list_container; |
| 919 | } | 893 | } |
| 920 | 894 | ||
| 921 | const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const { | 895 | const KAutoObjectWithListContainer& KernelCore::ObjectListContainer() const { |
| 922 | return impl->object_list_container; | 896 | return *impl->global_object_list_container; |
| 923 | } | 897 | } |
| 924 | 898 | ||
| 925 | void KernelCore::InvalidateAllInstructionCaches() { | 899 | void KernelCore::InvalidateAllInstructionCaches() { |
| @@ -949,16 +923,6 @@ KClientPort* KernelCore::CreateNamedServicePort(std::string name) { | |||
| 949 | return impl->CreateNamedServicePort(std::move(name)); | 923 | return impl->CreateNamedServicePort(std::move(name)); |
| 950 | } | 924 | } |
| 951 | 925 | ||
| 952 | void KernelCore::RegisterServerSession(KServerSession* server_session) { | ||
| 953 | std::lock_guard lk(impl->server_sessions_lock); | ||
| 954 | impl->server_sessions.insert(server_session); | ||
| 955 | } | ||
| 956 | |||
| 957 | void KernelCore::UnregisterServerSession(KServerSession* server_session) { | ||
| 958 | std::lock_guard lk(impl->server_sessions_lock); | ||
| 959 | impl->server_sessions.erase(server_session); | ||
| 960 | } | ||
| 961 | |||
| 962 | void KernelCore::RegisterKernelObject(KAutoObject* object) { | 926 | void KernelCore::RegisterKernelObject(KAutoObject* object) { |
| 963 | std::lock_guard lk(impl->registered_objects_lock); | 927 | std::lock_guard lk(impl->registered_objects_lock); |
| 964 | impl->registered_objects.insert(object); | 928 | impl->registered_objects.insert(object); |
| @@ -1031,14 +995,6 @@ const KMemoryManager& KernelCore::MemoryManager() const { | |||
| 1031 | return *impl->memory_manager; | 995 | return *impl->memory_manager; |
| 1032 | } | 996 | } |
| 1033 | 997 | ||
| 1034 | KSlabHeap<Page>& KernelCore::GetUserSlabHeapPages() { | ||
| 1035 | return *impl->user_slab_heap_pages; | ||
| 1036 | } | ||
| 1037 | |||
| 1038 | const KSlabHeap<Page>& KernelCore::GetUserSlabHeapPages() const { | ||
| 1039 | return *impl->user_slab_heap_pages; | ||
| 1040 | } | ||
| 1041 | |||
| 1042 | Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { | 998 | Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { |
| 1043 | return *impl->hid_shared_mem; | 999 | return *impl->hid_shared_mem; |
| 1044 | } | 1000 | } |
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index c1254b18d..7087bbda6 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h | |||
| @@ -43,6 +43,7 @@ class KHandleTable; | |||
| 43 | class KLinkedListNode; | 43 | class KLinkedListNode; |
| 44 | class KMemoryLayout; | 44 | class KMemoryLayout; |
| 45 | class KMemoryManager; | 45 | class KMemoryManager; |
| 46 | class KPageBuffer; | ||
| 46 | class KPort; | 47 | class KPort; |
| 47 | class KProcess; | 48 | class KProcess; |
| 48 | class KResourceLimit; | 49 | class KResourceLimit; |
| @@ -52,6 +53,7 @@ class KSession; | |||
| 52 | class KSharedMemory; | 53 | class KSharedMemory; |
| 53 | class KSharedMemoryInfo; | 54 | class KSharedMemoryInfo; |
| 54 | class KThread; | 55 | class KThread; |
| 56 | class KThreadLocalPage; | ||
| 55 | class KTransferMemory; | 57 | class KTransferMemory; |
| 56 | class KWorkerTaskManager; | 58 | class KWorkerTaskManager; |
| 57 | class KWritableEvent; | 59 | class KWritableEvent; |
| @@ -194,14 +196,6 @@ public: | |||
| 194 | /// Opens a port to a service previously registered with RegisterNamedService. | 196 | /// Opens a port to a service previously registered with RegisterNamedService. |
| 195 | KClientPort* CreateNamedServicePort(std::string name); | 197 | KClientPort* CreateNamedServicePort(std::string name); |
| 196 | 198 | ||
| 197 | /// Registers a server session with the gobal emulation state, to be freed on shutdown. This is | ||
| 198 | /// necessary because we do not emulate processes for HLE sessions. | ||
| 199 | void RegisterServerSession(KServerSession* server_session); | ||
| 200 | |||
| 201 | /// Unregisters a server session previously registered with RegisterServerSession when it was | ||
| 202 | /// destroyed during the current emulation session. | ||
| 203 | void UnregisterServerSession(KServerSession* server_session); | ||
| 204 | |||
| 205 | /// Registers all kernel objects with the global emulation state, this is purely for tracking | 199 | /// Registers all kernel objects with the global emulation state, this is purely for tracking |
| 206 | /// leaks after emulation has been shutdown. | 200 | /// leaks after emulation has been shutdown. |
| 207 | void RegisterKernelObject(KAutoObject* object); | 201 | void RegisterKernelObject(KAutoObject* object); |
| @@ -239,12 +233,6 @@ public: | |||
| 239 | /// Gets the virtual memory manager for the kernel. | 233 | /// Gets the virtual memory manager for the kernel. |
| 240 | const KMemoryManager& MemoryManager() const; | 234 | const KMemoryManager& MemoryManager() const; |
| 241 | 235 | ||
| 242 | /// Gets the slab heap allocated for user space pages. | ||
| 243 | KSlabHeap<Page>& GetUserSlabHeapPages(); | ||
| 244 | |||
| 245 | /// Gets the slab heap allocated for user space pages. | ||
| 246 | const KSlabHeap<Page>& GetUserSlabHeapPages() const; | ||
| 247 | |||
| 248 | /// Gets the shared memory object for HID services. | 236 | /// Gets the shared memory object for HID services. |
| 249 | Kernel::KSharedMemory& GetHidSharedMem(); | 237 | Kernel::KSharedMemory& GetHidSharedMem(); |
| 250 | 238 | ||
| @@ -336,6 +324,10 @@ public: | |||
| 336 | return slab_heap_container->writeable_event; | 324 | return slab_heap_container->writeable_event; |
| 337 | } else if constexpr (std::is_same_v<T, KCodeMemory>) { | 325 | } else if constexpr (std::is_same_v<T, KCodeMemory>) { |
| 338 | return slab_heap_container->code_memory; | 326 | return slab_heap_container->code_memory; |
| 327 | } else if constexpr (std::is_same_v<T, KPageBuffer>) { | ||
| 328 | return slab_heap_container->page_buffer; | ||
| 329 | } else if constexpr (std::is_same_v<T, KThreadLocalPage>) { | ||
| 330 | return slab_heap_container->thread_local_page; | ||
| 339 | } | 331 | } |
| 340 | } | 332 | } |
| 341 | 333 | ||
| @@ -397,6 +389,8 @@ private: | |||
| 397 | KSlabHeap<KTransferMemory> transfer_memory; | 389 | KSlabHeap<KTransferMemory> transfer_memory; |
| 398 | KSlabHeap<KWritableEvent> writeable_event; | 390 | KSlabHeap<KWritableEvent> writeable_event; |
| 399 | KSlabHeap<KCodeMemory> code_memory; | 391 | KSlabHeap<KCodeMemory> code_memory; |
| 392 | KSlabHeap<KPageBuffer> page_buffer; | ||
| 393 | KSlabHeap<KThreadLocalPage> thread_local_page; | ||
| 400 | }; | 394 | }; |
| 401 | 395 | ||
| 402 | std::unique_ptr<SlabHeapContainer> slab_heap_container; | 396 | std::unique_ptr<SlabHeapContainer> slab_heap_container; |
diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp index 4eb3a5988..52d25b837 100644 --- a/src/core/hle/kernel/service_thread.cpp +++ b/src/core/hle/kernel/service_thread.cpp | |||
| @@ -49,12 +49,9 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std | |||
| 49 | return; | 49 | return; |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | // Allocate a dummy guest thread for this host thread. | ||
| 52 | kernel.RegisterHostThread(); | 53 | kernel.RegisterHostThread(); |
| 53 | 54 | ||
| 54 | // Ensure the dummy thread allocated for this host thread is closed on exit. | ||
| 55 | auto* dummy_thread = kernel.GetCurrentEmuThread(); | ||
| 56 | SCOPE_EXIT({ dummy_thread->Close(); }); | ||
| 57 | |||
| 58 | while (true) { | 55 | while (true) { |
| 59 | std::function<void()> task; | 56 | std::function<void()> task; |
| 60 | 57 | ||
diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h index f1c11256e..dc1e48fc9 100644 --- a/src/core/hle/kernel/slab_helpers.h +++ b/src/core/hle/kernel/slab_helpers.h | |||
| @@ -59,7 +59,7 @@ class KAutoObjectWithSlabHeapAndContainer : public Base { | |||
| 59 | 59 | ||
| 60 | private: | 60 | private: |
| 61 | static Derived* Allocate(KernelCore& kernel) { | 61 | static Derived* Allocate(KernelCore& kernel) { |
| 62 | return kernel.SlabHeap<Derived>().AllocateWithKernel(kernel); | 62 | return kernel.SlabHeap<Derived>().Allocate(kernel); |
| 63 | } | 63 | } |
| 64 | 64 | ||
| 65 | static void Free(KernelCore& kernel, Derived* obj) { | 65 | static void Free(KernelCore& kernel, Derived* obj) { |
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h index 365e22e4e..b2e9ec092 100644 --- a/src/core/hle/kernel/svc_types.h +++ b/src/core/hle/kernel/svc_types.h | |||
| @@ -96,4 +96,6 @@ constexpr inline s32 IdealCoreNoUpdate = -3; | |||
| 96 | constexpr inline s32 LowestThreadPriority = 63; | 96 | constexpr inline s32 LowestThreadPriority = 63; |
| 97 | constexpr inline s32 HighestThreadPriority = 0; | 97 | constexpr inline s32 HighestThreadPriority = 0; |
| 98 | 98 | ||
| 99 | constexpr inline size_t ThreadLocalRegionSize = 0x200; | ||
| 100 | |||
| 99 | } // namespace Kernel::Svc | 101 | } // namespace Kernel::Svc |