diff options
| -rw-r--r-- | src/core/hle/kernel/k_page_heap.cpp | 86 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_heap.h | 39 |
2 files changed, 108 insertions, 17 deletions
diff --git a/src/core/hle/kernel/k_page_heap.cpp b/src/core/hle/kernel/k_page_heap.cpp index 5ede60168..7b02c7d8b 100644 --- a/src/core/hle/kernel/k_page_heap.cpp +++ b/src/core/hle/kernel/k_page_heap.cpp | |||
| @@ -44,11 +44,11 @@ size_t KPageHeap::GetNumFreePages() const { | |||
| 44 | return num_free; | 44 | return num_free; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | PAddr KPageHeap::AllocateBlock(s32 index, bool random) { | 47 | PAddr KPageHeap::AllocateByLinearSearch(s32 index) { |
| 48 | const size_t needed_size = m_blocks[index].GetSize(); | 48 | const size_t needed_size = m_blocks[index].GetSize(); |
| 49 | 49 | ||
| 50 | for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) { | 50 | for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) { |
| 51 | if (const PAddr addr = m_blocks[i].PopBlock(random); addr != 0) { | 51 | if (const PAddr addr = m_blocks[i].PopBlock(false); addr != 0) { |
| 52 | if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) { | 52 | if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) { |
| 53 | this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize); | 53 | this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize); |
| 54 | } | 54 | } |
| @@ -59,6 +59,88 @@ PAddr KPageHeap::AllocateBlock(s32 index, bool random) { | |||
| 59 | return 0; | 59 | return 0; |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | PAddr KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) { | ||
| 63 | // Get the size and required alignment. | ||
| 64 | const size_t needed_size = num_pages * PageSize; | ||
| 65 | const size_t align_size = align_pages * PageSize; | ||
| 66 | |||
| 67 | // Determine meta-alignment of our desired alignment size. | ||
| 68 | const size_t align_shift = std::countr_zero(align_size); | ||
| 69 | |||
| 70 | // Decide on a block to allocate from. | ||
| 71 | constexpr size_t MinimumPossibleAlignmentsForRandomAllocation = 4; | ||
| 72 | { | ||
| 73 | // By default, we'll want to look at all blocks larger than our current one. | ||
| 74 | s32 max_blocks = static_cast<s32>(m_num_blocks); | ||
| 75 | |||
| 76 | // Determine the maximum block we should try to allocate from. | ||
| 77 | size_t possible_alignments = 0; | ||
| 78 | for (s32 i = index; i < max_blocks; ++i) { | ||
| 79 | // Add the possible alignments from blocks at the current size. | ||
| 80 | possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * | ||
| 81 | m_blocks[i].GetNumFreeBlocks(); | ||
| 82 | |||
| 83 | // If there are enough possible alignments, we don't need to look at larger blocks. | ||
| 84 | if (possible_alignments >= MinimumPossibleAlignmentsForRandomAllocation) { | ||
| 85 | max_blocks = i + 1; | ||
| 86 | break; | ||
| 87 | } | ||
| 88 | } | ||
| 89 | |||
| 90 | // If we have any possible alignments which require a larger block, we need to pick one. | ||
| 91 | if (possible_alignments > 0 && index + 1 < max_blocks) { | ||
| 92 | // Select a random alignment from the possibilities. | ||
| 93 | const size_t rnd = m_rng.GenerateRandom(possible_alignments); | ||
| 94 | |||
| 95 | // Determine which block corresponds to the random alignment we chose. | ||
| 96 | possible_alignments = 0; | ||
| 97 | for (s32 i = index; i < max_blocks; ++i) { | ||
| 98 | // Add the possible alignments from blocks at the current size. | ||
| 99 | possible_alignments += | ||
| 100 | (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * | ||
| 101 | m_blocks[i].GetNumFreeBlocks(); | ||
| 102 | |||
| 103 | // If the current block gets us to our random choice, use the current block. | ||
| 104 | if (rnd < possible_alignments) { | ||
| 105 | index = i; | ||
| 106 | break; | ||
| 107 | } | ||
| 108 | } | ||
| 109 | } | ||
| 110 | } | ||
| 111 | |||
| 112 | // Pop a block from the index we selected. | ||
| 113 | if (PAddr addr = m_blocks[index].PopBlock(true); addr != 0) { | ||
| 114 | // Determine how much size we have left over. | ||
| 115 | if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size; | ||
| 116 | leftover_size > 0) { | ||
| 117 | // Determine how many valid alignments we can have. | ||
| 118 | const size_t possible_alignments = 1 + (leftover_size >> align_shift); | ||
| 119 | |||
| 120 | // Select a random valid alignment. | ||
| 121 | const size_t random_offset = m_rng.GenerateRandom(possible_alignments) << align_shift; | ||
| 122 | |||
| 123 | // Free memory before the random offset. | ||
| 124 | if (random_offset != 0) { | ||
| 125 | this->Free(addr, random_offset / PageSize); | ||
| 126 | } | ||
| 127 | |||
| 128 | // Advance our block by the random offset. | ||
| 129 | addr += random_offset; | ||
| 130 | |||
| 131 | // Free memory after our allocated block. | ||
| 132 | if (random_offset != leftover_size) { | ||
| 133 | this->Free(addr + needed_size, (leftover_size - random_offset) / PageSize); | ||
| 134 | } | ||
| 135 | } | ||
| 136 | |||
| 137 | // Return the block we allocated. | ||
| 138 | return addr; | ||
| 139 | } | ||
| 140 | |||
| 141 | return 0; | ||
| 142 | } | ||
| 143 | |||
| 62 | void KPageHeap::FreeBlock(PAddr block, s32 index) { | 144 | void KPageHeap::FreeBlock(PAddr block, s32 index) { |
| 63 | do { | 145 | do { |
| 64 | block = m_blocks[index++].PushBlock(block); | 146 | block = m_blocks[index++].PushBlock(block); |
diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h index 0917a8bed..9021edcf7 100644 --- a/src/core/hle/kernel/k_page_heap.h +++ b/src/core/hle/kernel/k_page_heap.h | |||
| @@ -14,13 +14,9 @@ | |||
| 14 | 14 | ||
| 15 | namespace Kernel { | 15 | namespace Kernel { |
| 16 | 16 | ||
| 17 | class KPageHeap final { | 17 | class KPageHeap { |
| 18 | public: | 18 | public: |
| 19 | YUZU_NON_COPYABLE(KPageHeap); | ||
| 20 | YUZU_NON_MOVEABLE(KPageHeap); | ||
| 21 | |||
| 22 | KPageHeap() = default; | 19 | KPageHeap() = default; |
| 23 | ~KPageHeap() = default; | ||
| 24 | 20 | ||
| 25 | constexpr PAddr GetAddress() const { | 21 | constexpr PAddr GetAddress() const { |
| 26 | return m_heap_address; | 22 | return m_heap_address; |
| @@ -57,7 +53,20 @@ public: | |||
| 57 | m_initial_used_size = m_heap_size - free_size - reserved_size; | 53 | m_initial_used_size = m_heap_size - free_size - reserved_size; |
| 58 | } | 54 | } |
| 59 | 55 | ||
| 60 | PAddr AllocateBlock(s32 index, bool random); | 56 | PAddr AllocateBlock(s32 index, bool random) { |
| 57 | if (random) { | ||
| 58 | const size_t block_pages = m_blocks[index].GetNumPages(); | ||
| 59 | return this->AllocateByRandom(index, block_pages, block_pages); | ||
| 60 | } else { | ||
| 61 | return this->AllocateByLinearSearch(index); | ||
| 62 | } | ||
| 63 | } | ||
| 64 | |||
| 65 | PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { | ||
| 66 | // TODO: linear search support? | ||
| 67 | return this->AllocateByRandom(index, num_pages, align_pages); | ||
| 68 | } | ||
| 69 | |||
| 61 | void Free(PAddr addr, size_t num_pages); | 70 | void Free(PAddr addr, size_t num_pages); |
| 62 | 71 | ||
| 63 | static size_t CalculateManagementOverheadSize(size_t region_size) { | 72 | static size_t CalculateManagementOverheadSize(size_t region_size) { |
| @@ -68,7 +77,7 @@ public: | |||
| 68 | static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) { | 77 | static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) { |
| 69 | const size_t target_pages = std::max(num_pages, align_pages); | 78 | const size_t target_pages = std::max(num_pages, align_pages); |
| 70 | for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) { | 79 | for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) { |
| 71 | if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { | 80 | if (target_pages <= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { |
| 72 | return static_cast<s32>(i); | 81 | return static_cast<s32>(i); |
| 73 | } | 82 | } |
| 74 | } | 83 | } |
| @@ -77,7 +86,7 @@ public: | |||
| 77 | 86 | ||
| 78 | static constexpr s32 GetBlockIndex(size_t num_pages) { | 87 | static constexpr s32 GetBlockIndex(size_t num_pages) { |
| 79 | for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) { | 88 | for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) { |
| 80 | if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) { | 89 | if (num_pages >= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { |
| 81 | return i; | 90 | return i; |
| 82 | } | 91 | } |
| 83 | } | 92 | } |
| @@ -85,7 +94,7 @@ public: | |||
| 85 | } | 94 | } |
| 86 | 95 | ||
| 87 | static constexpr size_t GetBlockSize(size_t index) { | 96 | static constexpr size_t GetBlockSize(size_t index) { |
| 88 | return size_t(1) << MemoryBlockPageShifts[index]; | 97 | return static_cast<size_t>(1) << MemoryBlockPageShifts[index]; |
| 89 | } | 98 | } |
| 90 | 99 | ||
| 91 | static constexpr size_t GetBlockNumPages(size_t index) { | 100 | static constexpr size_t GetBlockNumPages(size_t index) { |
| @@ -93,13 +102,9 @@ public: | |||
| 93 | } | 102 | } |
| 94 | 103 | ||
| 95 | private: | 104 | private: |
| 96 | class Block final { | 105 | class Block { |
| 97 | public: | 106 | public: |
| 98 | YUZU_NON_COPYABLE(Block); | ||
| 99 | YUZU_NON_MOVEABLE(Block); | ||
| 100 | |||
| 101 | Block() = default; | 107 | Block() = default; |
| 102 | ~Block() = default; | ||
| 103 | 108 | ||
| 104 | constexpr size_t GetShift() const { | 109 | constexpr size_t GetShift() const { |
| 105 | return m_block_shift; | 110 | return m_block_shift; |
| @@ -201,6 +206,9 @@ private: | |||
| 201 | }; | 206 | }; |
| 202 | 207 | ||
| 203 | private: | 208 | private: |
| 209 | PAddr AllocateByLinearSearch(s32 index); | ||
| 210 | PAddr AllocateByRandom(s32 index, size_t num_pages, size_t align_pages); | ||
| 211 | |||
| 204 | static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts, | 212 | static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts, |
| 205 | size_t num_block_shifts); | 213 | size_t num_block_shifts); |
| 206 | 214 | ||
| @@ -209,7 +217,8 @@ private: | |||
| 209 | size_t m_heap_size{}; | 217 | size_t m_heap_size{}; |
| 210 | size_t m_initial_used_size{}; | 218 | size_t m_initial_used_size{}; |
| 211 | size_t m_num_blocks{}; | 219 | size_t m_num_blocks{}; |
| 212 | std::array<Block, NumMemoryBlockPageShifts> m_blocks{}; | 220 | std::array<Block, NumMemoryBlockPageShifts> m_blocks; |
| 221 | KPageBitmap::RandomBitGenerator m_rng; | ||
| 213 | std::vector<u64> m_management_data; | 222 | std::vector<u64> m_management_data; |
| 214 | }; | 223 | }; |
| 215 | 224 | ||