diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/core/CMakeLists.txt | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/memory/page_heap.cpp | 116 | ||||
| -rw-r--r-- | src/core/hle/kernel/memory/page_heap.h | 365 |
3 files changed, 483 insertions, 0 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 3af325c8e..2a2239951 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -159,6 +159,8 @@ add_library(core STATIC | |||
| 159 | hle/kernel/memory/memory_block.h | 159 | hle/kernel/memory/memory_block.h |
| 160 | hle/kernel/memory/memory_types.h | 160 | hle/kernel/memory/memory_types.h |
| 161 | hle/kernel/memory/page_linked_list.h | 161 | hle/kernel/memory/page_linked_list.h |
| 162 | hle/kernel/memory/page_heap.cpp | ||
| 163 | hle/kernel/memory/page_heap.h | ||
| 162 | hle/kernel/memory/slab_heap.h | 164 | hle/kernel/memory/slab_heap.h |
| 163 | hle/kernel/memory/system_control.cpp | 165 | hle/kernel/memory/system_control.cpp |
| 164 | hle/kernel/memory/system_control.h | 166 | hle/kernel/memory/system_control.h |
diff --git a/src/core/hle/kernel/memory/page_heap.cpp b/src/core/hle/kernel/memory/page_heap.cpp new file mode 100644 index 000000000..115084160 --- /dev/null +++ b/src/core/hle/kernel/memory/page_heap.cpp | |||
| @@ -0,0 +1,116 @@ | |||
| 1 | // Copyright 2020 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "core/core.h" | ||
| 6 | #include "core/hle/kernel/memory/page_heap.h" | ||
| 7 | #include "core/memory.h" | ||
| 8 | |||
| 9 | namespace Kernel::Memory { | ||
| 10 | |||
| 11 | void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) { | ||
| 12 | // Check our assumptions | ||
| 13 | ASSERT(Common::IsAligned((address), PageSize)); | ||
| 14 | ASSERT(Common::IsAligned(size, PageSize)); | ||
| 15 | |||
| 16 | // Set our members | ||
| 17 | heap_address = address; | ||
| 18 | heap_size = size; | ||
| 19 | |||
| 20 | // Setup bitmaps | ||
| 21 | metadata.resize(metadata_size / sizeof(u64)); | ||
| 22 | u64* cur_bitmap_storage{metadata.data()}; | ||
| 23 | for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) { | ||
| 24 | const std::size_t cur_block_shift{MemoryBlockPageShifts[i]}; | ||
| 25 | const std::size_t next_block_shift{ | ||
| 26 | (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0}; | ||
| 27 | cur_bitmap_storage = blocks[i].Initialize(heap_address, heap_size, cur_block_shift, | ||
| 28 | next_block_shift, cur_bitmap_storage); | ||
| 29 | } | ||
| 30 | } | ||
| 31 | |||
| 32 | VAddr PageHeap::AllocateBlock(s32 index) { | ||
| 33 | const std::size_t needed_size{blocks[index].GetSize()}; | ||
| 34 | |||
| 35 | for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) { | ||
| 36 | if (const VAddr addr{blocks[i].PopBlock()}; addr) { | ||
| 37 | if (const std::size_t allocated_size{blocks[i].GetSize()}; | ||
| 38 | allocated_size > needed_size) { | ||
| 39 | Free(addr + needed_size, (allocated_size - needed_size) / PageSize); | ||
| 40 | } | ||
| 41 | return addr; | ||
| 42 | } | ||
| 43 | } | ||
| 44 | |||
| 45 | return 0; | ||
| 46 | } | ||
| 47 | |||
| 48 | void PageHeap::FreeBlock(VAddr block, s32 index) { | ||
| 49 | do { | ||
| 50 | block = blocks[index++].PushBlock(block); | ||
| 51 | } while (block != 0); | ||
| 52 | } | ||
| 53 | |||
| 54 | void PageHeap::Free(VAddr addr, std::size_t num_pages) { | ||
| 55 | // Freeing no pages is a no-op | ||
| 56 | if (num_pages == 0) { | ||
| 57 | return; | ||
| 58 | } | ||
| 59 | |||
| 60 | // Find the largest block size that we can free, and free as many as possible | ||
| 61 | s32 big_index{static_cast<s32>(MemoryBlockPageShifts.size()) - 1}; | ||
| 62 | const VAddr start{addr}; | ||
| 63 | const VAddr end{(num_pages * PageSize) + addr}; | ||
| 64 | VAddr before_start{start}; | ||
| 65 | VAddr before_end{start}; | ||
| 66 | VAddr after_start{end}; | ||
| 67 | VAddr after_end{end}; | ||
| 68 | while (big_index >= 0) { | ||
| 69 | const std::size_t block_size{blocks[big_index].GetSize()}; | ||
| 70 | const VAddr big_start{Common::AlignUp((start), block_size)}; | ||
| 71 | const VAddr big_end{Common::AlignDown((end), block_size)}; | ||
| 72 | if (big_start < big_end) { | ||
| 73 | // Free as many big blocks as we can | ||
| 74 | for (auto block{big_start}; block < big_end; block += block_size) { | ||
| 75 | FreeBlock(block, big_index); | ||
| 76 | } | ||
| 77 | before_end = big_start; | ||
| 78 | after_start = big_end; | ||
| 79 | break; | ||
| 80 | } | ||
| 81 | big_index--; | ||
| 82 | } | ||
| 83 | ASSERT(big_index >= 0); | ||
| 84 | |||
| 85 | // Free space before the big blocks | ||
| 86 | for (s32 i{big_index - 1}; i >= 0; i--) { | ||
| 87 | const std::size_t block_size{blocks[i].GetSize()}; | ||
| 88 | while (before_start + block_size <= before_end) { | ||
| 89 | before_end -= block_size; | ||
| 90 | FreeBlock(before_end, i); | ||
| 91 | } | ||
| 92 | } | ||
| 93 | |||
| 94 | // Free space after the big blocks | ||
| 95 | for (s32 i{big_index - 1}; i >= 0; i--) { | ||
| 96 | const std::size_t block_size{blocks[i].GetSize()}; | ||
| 97 | while (after_start + block_size <= after_end) { | ||
| 98 | FreeBlock(after_start, i); | ||
| 99 | after_start += block_size; | ||
| 100 | } | ||
| 101 | } | ||
| 102 | } | ||
| 103 | |||
| 104 | std::size_t PageHeap::CalculateMetadataOverheadSize(std::size_t region_size) { | ||
| 105 | std::size_t overhead_size = 0; | ||
| 106 | for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) { | ||
| 107 | const std::size_t cur_block_shift{MemoryBlockPageShifts[i]}; | ||
| 108 | const std::size_t next_block_shift{ | ||
| 109 | (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0}; | ||
| 110 | overhead_size += PageHeap::Block::CalculateMetadataOverheadSize( | ||
| 111 | region_size, cur_block_shift, next_block_shift); | ||
| 112 | } | ||
| 113 | return Common::AlignUp(overhead_size, PageSize); | ||
| 114 | } | ||
| 115 | |||
| 116 | } // namespace Kernel::Memory | ||
diff --git a/src/core/hle/kernel/memory/page_heap.h b/src/core/hle/kernel/memory/page_heap.h new file mode 100644 index 000000000..9eb15a053 --- /dev/null +++ b/src/core/hle/kernel/memory/page_heap.h | |||
| @@ -0,0 +1,365 @@ | |||
| 1 | // Copyright 2020 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <array> | ||
| 8 | #include <vector> | ||
| 9 | |||
| 10 | #include "common/alignment.h" | ||
| 11 | #include "common/assert.h" | ||
| 12 | #include "common/bit_util.h" | ||
| 13 | #include "common/common_funcs.h" | ||
| 14 | #include "common/common_types.h" | ||
| 15 | #include "core/hle/kernel/memory/memory_types.h" | ||
| 16 | |||
| 17 | namespace Kernel::Memory { | ||
| 18 | |||
| 19 | class PageHeap final : NonCopyable { | ||
| 20 | public: | ||
| 21 | static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) { | ||
| 22 | const std::size_t target_pages{std::max(num_pages, align_pages)}; | ||
| 23 | for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) { | ||
| 24 | if (target_pages <= (std::size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { | ||
| 25 | return static_cast<s32>(i); | ||
| 26 | } | ||
| 27 | } | ||
| 28 | return -1; | ||
| 29 | } | ||
| 30 | |||
| 31 | static constexpr s32 GetBlockIndex(std::size_t num_pages) { | ||
| 32 | for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) { | ||
| 33 | if (num_pages >= (std::size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { | ||
| 34 | return i; | ||
| 35 | } | ||
| 36 | } | ||
| 37 | return -1; | ||
| 38 | } | ||
| 39 | |||
| 40 | static constexpr std::size_t GetBlockSize(std::size_t index) { | ||
| 41 | return std::size_t(1) << MemoryBlockPageShifts[index]; | ||
| 42 | } | ||
| 43 | |||
| 44 | static constexpr std::size_t GetBlockNumPages(std::size_t index) { | ||
| 45 | return GetBlockSize(index) / PageSize; | ||
| 46 | } | ||
| 47 | |||
| 48 | private: | ||
| 49 | static constexpr std::size_t NumMemoryBlockPageShifts{7}; | ||
| 50 | static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{ | ||
| 51 | 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E}; | ||
| 52 | |||
| 53 | class Block final : NonCopyable { | ||
| 54 | private: | ||
| 55 | class Bitmap final : NonCopyable { | ||
| 56 | public: | ||
| 57 | static constexpr std::size_t MaxDepth{4}; | ||
| 58 | |||
| 59 | private: | ||
| 60 | std::array<u64*, MaxDepth> bit_storages{}; | ||
| 61 | std::size_t num_bits{}; | ||
| 62 | std::size_t used_depths{}; | ||
| 63 | |||
| 64 | public: | ||
| 65 | constexpr Bitmap() = default; | ||
| 66 | |||
| 67 | constexpr std::size_t GetNumBits() const { | ||
| 68 | return num_bits; | ||
| 69 | } | ||
| 70 | constexpr s32 GetHighestDepthIndex() const { | ||
| 71 | return static_cast<s32>(used_depths) - 1; | ||
| 72 | } | ||
| 73 | |||
| 74 | constexpr u64* Initialize(u64* storage, std::size_t size) { | ||
| 75 | //* Initially, everything is un-set | ||
| 76 | num_bits = 0; | ||
| 77 | |||
| 78 | // Calculate the needed bitmap depth | ||
| 79 | used_depths = static_cast<std::size_t>(GetRequiredDepth(size)); | ||
| 80 | ASSERT(used_depths <= MaxDepth); | ||
| 81 | |||
| 82 | // Set the bitmap pointers | ||
| 83 | for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) { | ||
| 84 | bit_storages[depth] = storage; | ||
| 85 | size = Common::AlignUp(size, 64) / 64; | ||
| 86 | storage += size; | ||
| 87 | } | ||
| 88 | |||
| 89 | return storage; | ||
| 90 | } | ||
| 91 | |||
| 92 | s64 FindFreeBlock() const { | ||
| 93 | uintptr_t offset{}; | ||
| 94 | s32 depth{}; | ||
| 95 | |||
| 96 | do { | ||
| 97 | const u64 v{bit_storages[depth][offset]}; | ||
| 98 | if (v == 0) { | ||
| 99 | // Non-zero depth indicates that a previous level had a free block | ||
| 100 | ASSERT(depth == 0); | ||
| 101 | return -1; | ||
| 102 | } | ||
| 103 | offset = offset * 64 + Common::CountTrailingZeroes64(v); | ||
| 104 | ++depth; | ||
| 105 | } while (depth < static_cast<s32>(used_depths)); | ||
| 106 | |||
| 107 | return static_cast<s64>(offset); | ||
| 108 | } | ||
| 109 | |||
| 110 | constexpr void SetBit(std::size_t offset) { | ||
| 111 | SetBit(GetHighestDepthIndex(), offset); | ||
| 112 | num_bits++; | ||
| 113 | } | ||
| 114 | |||
| 115 | constexpr void ClearBit(std::size_t offset) { | ||
| 116 | ClearBit(GetHighestDepthIndex(), offset); | ||
| 117 | num_bits--; | ||
| 118 | } | ||
| 119 | |||
| 120 | constexpr bool ClearRange(std::size_t offset, std::size_t count) { | ||
| 121 | const s32 depth{GetHighestDepthIndex()}; | ||
| 122 | const std::size_t bit_ind{offset / 64}; | ||
| 123 | u64* bits{bit_storages[depth]}; | ||
| 124 | if (count < 64) { | ||
| 125 | const std::size_t shift{offset % 64}; | ||
| 126 | ASSERT(shift + count <= 64); | ||
| 127 | // Check that all the bits are set | ||
| 128 | const u64 mask{((u64(1) << count) - 1) << shift}; | ||
| 129 | u64 v{bits[bit_ind]}; | ||
| 130 | if ((v & mask) != mask) { | ||
| 131 | return false; | ||
| 132 | } | ||
| 133 | |||
| 134 | // Clear the bits | ||
| 135 | v &= ~mask; | ||
| 136 | bits[bit_ind] = v; | ||
| 137 | if (v == 0) { | ||
| 138 | ClearBit(depth - 1, bit_ind); | ||
| 139 | } | ||
| 140 | } else { | ||
| 141 | ASSERT(offset % 64 == 0); | ||
| 142 | ASSERT(count % 64 == 0); | ||
| 143 | // Check that all the bits are set | ||
| 144 | std::size_t remaining{count}; | ||
| 145 | std::size_t i = 0; | ||
| 146 | do { | ||
| 147 | if (bits[bit_ind + i++] != ~u64(0)) { | ||
| 148 | return false; | ||
| 149 | } | ||
| 150 | remaining -= 64; | ||
| 151 | } while (remaining > 0); | ||
| 152 | |||
| 153 | // Clear the bits | ||
| 154 | remaining = count; | ||
| 155 | i = 0; | ||
| 156 | do { | ||
| 157 | bits[bit_ind + i] = 0; | ||
| 158 | ClearBit(depth - 1, bit_ind + i); | ||
| 159 | i++; | ||
| 160 | remaining -= 64; | ||
| 161 | } while (remaining > 0); | ||
| 162 | } | ||
| 163 | |||
| 164 | num_bits -= count; | ||
| 165 | return true; | ||
| 166 | } | ||
| 167 | |||
| 168 | private: | ||
| 169 | constexpr void SetBit(s32 depth, std::size_t offset) { | ||
| 170 | while (depth >= 0) { | ||
| 171 | const std::size_t ind{offset / 64}; | ||
| 172 | const std::size_t which{offset % 64}; | ||
| 173 | const u64 mask{u64(1) << which}; | ||
| 174 | |||
| 175 | u64* bit{std::addressof(bit_storages[depth][ind])}; | ||
| 176 | const u64 v{*bit}; | ||
| 177 | ASSERT((v & mask) == 0); | ||
| 178 | *bit = v | mask; | ||
| 179 | if (v) { | ||
| 180 | break; | ||
| 181 | } | ||
| 182 | offset = ind; | ||
| 183 | depth--; | ||
| 184 | } | ||
| 185 | } | ||
| 186 | |||
| 187 | constexpr void ClearBit(s32 depth, std::size_t offset) { | ||
| 188 | while (depth >= 0) { | ||
| 189 | const std::size_t ind{offset / 64}; | ||
| 190 | const std::size_t which{offset % 64}; | ||
| 191 | const u64 mask{u64(1) << which}; | ||
| 192 | |||
| 193 | u64* bit{std::addressof(bit_storages[depth][ind])}; | ||
| 194 | u64 v{*bit}; | ||
| 195 | ASSERT((v & mask) != 0); | ||
| 196 | v &= ~mask; | ||
| 197 | *bit = v; | ||
| 198 | if (v) { | ||
| 199 | break; | ||
| 200 | } | ||
| 201 | offset = ind; | ||
| 202 | depth--; | ||
| 203 | } | ||
| 204 | } | ||
| 205 | |||
| 206 | private: | ||
| 207 | static constexpr s32 GetRequiredDepth(std::size_t region_size) { | ||
| 208 | s32 depth = 0; | ||
| 209 | while (true) { | ||
| 210 | region_size /= 64; | ||
| 211 | depth++; | ||
| 212 | if (region_size == 0) { | ||
| 213 | return depth; | ||
| 214 | } | ||
| 215 | } | ||
| 216 | } | ||
| 217 | |||
| 218 | public: | ||
| 219 | static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size) { | ||
| 220 | std::size_t overhead_bits = 0; | ||
| 221 | for (s32 depth{GetRequiredDepth(region_size) - 1}; depth >= 0; depth--) { | ||
| 222 | region_size = Common::AlignUp(region_size, 64) / 64; | ||
| 223 | overhead_bits += region_size; | ||
| 224 | } | ||
| 225 | return overhead_bits * sizeof(u64); | ||
| 226 | } | ||
| 227 | }; | ||
| 228 | |||
| 229 | private: | ||
| 230 | Bitmap bitmap; | ||
| 231 | VAddr heap_address{}; | ||
| 232 | uintptr_t end_offset{}; | ||
| 233 | std::size_t block_shift{}; | ||
| 234 | std::size_t next_block_shift{}; | ||
| 235 | |||
| 236 | public: | ||
| 237 | constexpr Block() = default; | ||
| 238 | |||
| 239 | constexpr std::size_t GetShift() const { | ||
| 240 | return block_shift; | ||
| 241 | } | ||
| 242 | constexpr std::size_t GetNextShift() const { | ||
| 243 | return next_block_shift; | ||
| 244 | } | ||
| 245 | constexpr std::size_t GetSize() const { | ||
| 246 | return std::size_t(1) << GetShift(); | ||
| 247 | } | ||
| 248 | constexpr std::size_t GetNumPages() const { | ||
| 249 | return GetSize() / PageSize; | ||
| 250 | } | ||
| 251 | constexpr std::size_t GetNumFreeBlocks() const { | ||
| 252 | return bitmap.GetNumBits(); | ||
| 253 | } | ||
| 254 | constexpr std::size_t GetNumFreePages() const { | ||
| 255 | return GetNumFreeBlocks() * GetNumPages(); | ||
| 256 | } | ||
| 257 | |||
| 258 | constexpr u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs, | ||
| 259 | u64* bit_storage) { | ||
| 260 | // Set shifts | ||
| 261 | block_shift = bs; | ||
| 262 | next_block_shift = nbs; | ||
| 263 | |||
| 264 | // Align up the address | ||
| 265 | VAddr end{addr + size}; | ||
| 266 | const std::size_t align{(next_block_shift != 0) ? (u64(1) << next_block_shift) | ||
| 267 | : (u64(1) << block_shift)}; | ||
| 268 | addr = Common::AlignDown((addr), align); | ||
| 269 | end = Common::AlignUp((end), align); | ||
| 270 | |||
| 271 | heap_address = addr; | ||
| 272 | end_offset = (end - addr) / (u64(1) << block_shift); | ||
| 273 | return bitmap.Initialize(bit_storage, end_offset); | ||
| 274 | } | ||
| 275 | |||
| 276 | constexpr VAddr PushBlock(VAddr address) { | ||
| 277 | // Set the bit for the free block | ||
| 278 | std::size_t offset{(address - heap_address) >> GetShift()}; | ||
| 279 | bitmap.SetBit(offset); | ||
| 280 | |||
| 281 | // If we have a next shift, try to clear the blocks below and return the address | ||
| 282 | if (GetNextShift()) { | ||
| 283 | const std::size_t diff{u64(1) << (GetNextShift() - GetShift())}; | ||
| 284 | offset = Common::AlignDown(offset, diff); | ||
| 285 | if (bitmap.ClearRange(offset, diff)) { | ||
| 286 | return heap_address + (offset << GetShift()); | ||
| 287 | } | ||
| 288 | } | ||
| 289 | |||
| 290 | // We couldn't coalesce, or we're already as big as possible | ||
| 291 | return 0; | ||
| 292 | } | ||
| 293 | |||
| 294 | VAddr PopBlock() { | ||
| 295 | // Find a free block | ||
| 296 | const s64 soffset{bitmap.FindFreeBlock()}; | ||
| 297 | if (soffset < 0) { | ||
| 298 | return 0; | ||
| 299 | } | ||
| 300 | const std::size_t offset{static_cast<std::size_t>(soffset)}; | ||
| 301 | |||
| 302 | // Update our tracking and return it | ||
| 303 | bitmap.ClearBit(offset); | ||
| 304 | return heap_address + (offset << GetShift()); | ||
| 305 | } | ||
| 306 | |||
| 307 | public: | ||
| 308 | static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size, | ||
| 309 | std::size_t cur_block_shift, | ||
| 310 | std::size_t next_block_shift) { | ||
| 311 | const std::size_t cur_block_size{(u64(1) << cur_block_shift)}; | ||
| 312 | const std::size_t next_block_size{(u64(1) << next_block_shift)}; | ||
| 313 | const std::size_t align{(next_block_shift != 0) ? next_block_size : cur_block_size}; | ||
| 314 | return Bitmap::CalculateMetadataOverheadSize( | ||
| 315 | (align * 2 + Common::AlignUp(region_size, align)) / cur_block_size); | ||
| 316 | } | ||
| 317 | }; | ||
| 318 | |||
| 319 | public: | ||
| 320 | PageHeap() = default; | ||
| 321 | |||
| 322 | constexpr VAddr GetAddress() const { | ||
| 323 | return heap_address; | ||
| 324 | } | ||
| 325 | constexpr std::size_t GetSize() const { | ||
| 326 | return heap_size; | ||
| 327 | } | ||
| 328 | constexpr VAddr GetEndAddress() const { | ||
| 329 | return GetAddress() + GetSize(); | ||
| 330 | } | ||
| 331 | constexpr std::size_t GetPageOffset(VAddr block) const { | ||
| 332 | return (block - GetAddress()) / PageSize; | ||
| 333 | } | ||
| 334 | |||
| 335 | void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size); | ||
| 336 | VAddr AllocateBlock(s32 index); | ||
| 337 | void Free(VAddr addr, std::size_t num_pages); | ||
| 338 | |||
| 339 | void UpdateUsedSize() { | ||
| 340 | used_size = heap_size - (GetNumFreePages() * PageSize); | ||
| 341 | } | ||
| 342 | |||
| 343 | static std::size_t CalculateMetadataOverheadSize(std::size_t region_size); | ||
| 344 | |||
| 345 | private: | ||
| 346 | constexpr std::size_t GetNumFreePages() const { | ||
| 347 | std::size_t num_free{}; | ||
| 348 | |||
| 349 | for (const auto& block : blocks) { | ||
| 350 | num_free += block.GetNumFreePages(); | ||
| 351 | } | ||
| 352 | |||
| 353 | return num_free; | ||
| 354 | } | ||
| 355 | |||
| 356 | void FreeBlock(VAddr block, s32 index); | ||
| 357 | |||
| 358 | VAddr heap_address{}; | ||
| 359 | std::size_t heap_size{}; | ||
| 360 | std::size_t used_size{}; | ||
| 361 | std::array<Block, NumMemoryBlockPageShifts> blocks{}; | ||
| 362 | std::vector<u64> metadata; | ||
| 363 | }; | ||
| 364 | |||
| 365 | } // namespace Kernel::Memory | ||