diff options
| -rw-r--r-- | src/core/hle/kernel/k_dynamic_page_manager.h | 67 |
1 files changed, 50 insertions, 17 deletions
diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h index 9076c8fa3..ac80d60a1 100644 --- a/src/core/hle/kernel/k_dynamic_page_manager.h +++ b/src/core/hle/kernel/k_dynamic_page_manager.h | |||
| @@ -3,6 +3,8 @@ | |||
| 3 | 3 | ||
| 4 | #pragma once | 4 | #pragma once |
| 5 | 5 | ||
| 6 | #include <vector> | ||
| 7 | |||
| 6 | #include "common/alignment.h" | 8 | #include "common/alignment.h" |
| 7 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 8 | #include "core/hle/kernel/k_page_bitmap.h" | 10 | #include "core/hle/kernel/k_page_bitmap.h" |
| @@ -33,28 +35,36 @@ public: | |||
| 33 | return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); | 35 | return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); |
| 34 | } | 36 | } |
| 35 | 37 | ||
| 36 | Result Initialize(VAddr addr, size_t sz) { | 38 | Result Initialize(VAddr memory, size_t size, size_t align) { |
| 37 | // We need to have positive size. | 39 | // We need to have positive size. |
| 38 | R_UNLESS(sz > 0, ResultOutOfMemory); | 40 | R_UNLESS(size > 0, ResultOutOfMemory); |
| 39 | m_backing_memory.resize(sz); | 41 | m_backing_memory.resize(size); |
| 42 | |||
| 43 | // Set addresses. | ||
| 44 | m_address = memory; | ||
| 45 | m_aligned_address = Common::AlignDown(memory, align); | ||
| 40 | 46 | ||
| 41 | // Calculate management overhead. | 47 | // Calculate extents. |
| 42 | const size_t management_size = | 48 | const size_t managed_size = m_address + size - m_aligned_address; |
| 43 | KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer)); | 49 | const size_t overhead_size = Common::AlignUp( |
| 44 | const size_t allocatable_size = sz - management_size; | 50 | KPageBitmap::CalculateManagementOverheadSize(managed_size / sizeof(PageBuffer)), |
| 51 | sizeof(PageBuffer)); | ||
| 52 | R_UNLESS(overhead_size < size, ResultOutOfMemory); | ||
| 45 | 53 | ||
| 46 | // Set tracking fields. | 54 | // Set tracking fields. |
| 47 | m_address = addr; | 55 | m_size = Common::AlignDown(size - overhead_size, sizeof(PageBuffer)); |
| 48 | m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer)); | 56 | m_count = m_size / sizeof(PageBuffer); |
| 49 | m_count = allocatable_size / sizeof(PageBuffer); | ||
| 50 | R_UNLESS(m_count > 0, ResultOutOfMemory); | ||
| 51 | 57 | ||
| 52 | // Clear the management region. | 58 | // Clear the management region. |
| 53 | u64* management_ptr = GetPointer<u64>(m_address + allocatable_size); | 59 | u64* management_ptr = GetPointer<u64>(m_address + size - overhead_size); |
| 54 | std::memset(management_ptr, 0, management_size); | 60 | std::memset(management_ptr, 0, overhead_size); |
| 55 | 61 | ||
| 56 | // Initialize the bitmap. | 62 | // Initialize the bitmap. |
| 57 | m_page_bitmap.Initialize(management_ptr, m_count); | 63 | const size_t allocatable_region_size = |
| 64 | (m_address + size - overhead_size) - m_aligned_address; | ||
| 65 | ASSERT(allocatable_region_size >= sizeof(PageBuffer)); | ||
| 66 | |||
| 67 | m_page_bitmap.Initialize(management_ptr, allocatable_region_size / sizeof(PageBuffer)); | ||
| 58 | 68 | ||
| 59 | // Free the pages to the bitmap. | 69 | // Free the pages to the bitmap. |
| 60 | for (size_t i = 0; i < m_count; i++) { | 70 | for (size_t i = 0; i < m_count; i++) { |
| @@ -62,7 +72,8 @@ public: | |||
| 62 | std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize); | 72 | std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize); |
| 63 | 73 | ||
| 64 | // Set the bit for the free page. | 74 | // Set the bit for the free page. |
| 65 | m_page_bitmap.SetBit(i); | 75 | m_page_bitmap.SetBit((m_address + (i * sizeof(PageBuffer)) - m_aligned_address) / |
| 76 | sizeof(PageBuffer)); | ||
| 66 | } | 77 | } |
| 67 | 78 | ||
| 68 | R_SUCCEED(); | 79 | R_SUCCEED(); |
| @@ -101,7 +112,28 @@ public: | |||
| 101 | m_page_bitmap.ClearBit(offset); | 112 | m_page_bitmap.ClearBit(offset); |
| 102 | m_peak = std::max(m_peak, (++m_used)); | 113 | m_peak = std::max(m_peak, (++m_used)); |
| 103 | 114 | ||
| 104 | return GetPointer<PageBuffer>(m_address) + offset; | 115 | return GetPointer<PageBuffer>(m_aligned_address) + offset; |
| 116 | } | ||
| 117 | |||
| 118 | PageBuffer* Allocate(size_t count) { | ||
| 119 | // Take the lock. | ||
| 120 | // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. | ||
| 121 | KScopedSpinLock lk(m_lock); | ||
| 122 | |||
| 123 | // Find a random free block. | ||
| 124 | s64 soffset = m_page_bitmap.FindFreeRange(count); | ||
| 125 | if (soffset < 0) [[likely]] { | ||
| 126 | return nullptr; | ||
| 127 | } | ||
| 128 | |||
| 129 | const size_t offset = static_cast<size_t>(soffset); | ||
| 130 | |||
| 131 | // Update our tracking. | ||
| 132 | m_page_bitmap.ClearRange(offset, count); | ||
| 133 | m_used += count; | ||
| 134 | m_peak = std::max(m_peak, m_used); | ||
| 135 | |||
| 136 | return GetPointer<PageBuffer>(m_aligned_address) + offset; | ||
| 105 | } | 137 | } |
| 106 | 138 | ||
| 107 | void Free(PageBuffer* pb) { | 139 | void Free(PageBuffer* pb) { |
| @@ -113,7 +145,7 @@ public: | |||
| 113 | KScopedSpinLock lk(m_lock); | 145 | KScopedSpinLock lk(m_lock); |
| 114 | 146 | ||
| 115 | // Set the bit for the free page. | 147 | // Set the bit for the free page. |
| 116 | size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_address) / sizeof(PageBuffer); | 148 | size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_aligned_address) / sizeof(PageBuffer); |
| 117 | m_page_bitmap.SetBit(offset); | 149 | m_page_bitmap.SetBit(offset); |
| 118 | 150 | ||
| 119 | // Decrement our used count. | 151 | // Decrement our used count. |
| @@ -127,6 +159,7 @@ private: | |||
| 127 | size_t m_peak{}; | 159 | size_t m_peak{}; |
| 128 | size_t m_count{}; | 160 | size_t m_count{}; |
| 129 | VAddr m_address{}; | 161 | VAddr m_address{}; |
| 162 | VAddr m_aligned_address{}; | ||
| 130 | size_t m_size{}; | 163 | size_t m_size{}; |
| 131 | 164 | ||
| 132 | // TODO(bunnei): Back by host memory until we emulate kernel virtual address space. | 165 | // TODO(bunnei): Back by host memory until we emulate kernel virtual address space. |