summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar bunnei2022-02-21 17:52:36 -0800
committerGravatar bunnei2022-02-27 10:34:02 -0800
commit06e2b76c759af22be60c077489271b42ad49c732 (patch)
tree8dc02f9c3395ea5a169ad04c4af4eededf814a7f
parenthle: kernel: Add initial_process.h header. (diff)
downloadyuzu-06e2b76c759af22be60c077489271b42ad49c732.tar.gz
yuzu-06e2b76c759af22be60c077489271b42ad49c732.tar.xz
yuzu-06e2b76c759af22be60c077489271b42ad49c732.zip
hle: kernel: k_page_heap: Various updates and improvements.
- KPageHeap tracks physical addresses, not virtual addresses. - Various updates and improvements to match latest documentation for this type.
Diffstat (limited to '')
-rw-r--r--src/core/hle/kernel/k_page_heap.cpp126
-rw-r--r--src/core/hle/kernel/k_page_heap.h221
2 files changed, 192 insertions, 155 deletions
diff --git a/src/core/hle/kernel/k_page_heap.cpp b/src/core/hle/kernel/k_page_heap.cpp
index 29d996d62..97a5890a0 100644
--- a/src/core/hle/kernel/k_page_heap.cpp
+++ b/src/core/hle/kernel/k_page_heap.cpp
@@ -7,35 +7,51 @@
7 7
8namespace Kernel { 8namespace Kernel {
9 9
10void KPageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) { 10void KPageHeap::Initialize(PAddr address, size_t size, VAddr management_address,
11 // Check our assumptions 11 size_t management_size, const size_t* block_shifts,
12 ASSERT(Common::IsAligned((address), PageSize)); 12 size_t num_block_shifts) {
13 // Check our assumptions.
14 ASSERT(Common::IsAligned(address, PageSize));
13 ASSERT(Common::IsAligned(size, PageSize)); 15 ASSERT(Common::IsAligned(size, PageSize));
16 ASSERT(0 < num_block_shifts && num_block_shifts <= NumMemoryBlockPageShifts);
17 const VAddr management_end = management_address + management_size;
14 18
15 // Set our members 19 // Set our members.
16 heap_address = address; 20 m_heap_address = address;
17 heap_size = size; 21 m_heap_size = size;
18 22 m_num_blocks = num_block_shifts;
19 // Setup bitmaps 23
20 metadata.resize(metadata_size / sizeof(u64)); 24 // Setup bitmaps.
21 u64* cur_bitmap_storage{metadata.data()}; 25 m_management_data.resize(management_size / sizeof(u64));
22 for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) { 26 u64* cur_bitmap_storage{m_management_data.data()};
23 const std::size_t cur_block_shift{MemoryBlockPageShifts[i]}; 27 for (size_t i = 0; i < num_block_shifts; i++) {
24 const std::size_t next_block_shift{ 28 const size_t cur_block_shift = block_shifts[i];
25 (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0}; 29 const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0;
26 cur_bitmap_storage = blocks[i].Initialize(heap_address, heap_size, cur_block_shift, 30 cur_bitmap_storage = m_blocks[i].Initialize(m_heap_address, m_heap_size, cur_block_shift,
27 next_block_shift, cur_bitmap_storage); 31 next_block_shift, cur_bitmap_storage);
28 } 32 }
33
34 // Ensure we didn't overextend our bounds.
35 ASSERT(VAddr(cur_bitmap_storage) <= management_end);
36}
37
38size_t KPageHeap::GetNumFreePages() const {
39 size_t num_free = 0;
40
41 for (size_t i = 0; i < m_num_blocks; i++) {
42 num_free += m_blocks[i].GetNumFreePages();
43 }
44
45 return num_free;
29} 46}
30 47
31VAddr KPageHeap::AllocateBlock(s32 index, bool random) { 48PAddr KPageHeap::AllocateBlock(s32 index, bool random) {
32 const std::size_t needed_size{blocks[index].GetSize()}; 49 const size_t needed_size = m_blocks[index].GetSize();
33 50
34 for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) { 51 for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) {
35 if (const VAddr addr{blocks[i].PopBlock(random)}; addr) { 52 if (const PAddr addr = m_blocks[i].PopBlock(random); addr != 0) {
36 if (const std::size_t allocated_size{blocks[i].GetSize()}; 53 if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {
37 allocated_size > needed_size) { 54 this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
38 Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
39 } 55 }
40 return addr; 56 return addr;
41 } 57 }
@@ -44,34 +60,34 @@ VAddr KPageHeap::AllocateBlock(s32 index, bool random) {
44 return 0; 60 return 0;
45} 61}
46 62
47void KPageHeap::FreeBlock(VAddr block, s32 index) { 63void KPageHeap::FreeBlock(PAddr block, s32 index) {
48 do { 64 do {
49 block = blocks[index++].PushBlock(block); 65 block = m_blocks[index++].PushBlock(block);
50 } while (block != 0); 66 } while (block != 0);
51} 67}
52 68
53void KPageHeap::Free(VAddr addr, std::size_t num_pages) { 69void KPageHeap::Free(PAddr addr, size_t num_pages) {
54 // Freeing no pages is a no-op 70 // Freeing no pages is a no-op.
55 if (num_pages == 0) { 71 if (num_pages == 0) {
56 return; 72 return;
57 } 73 }
58 74
59 // Find the largest block size that we can free, and free as many as possible 75 // Find the largest block size that we can free, and free as many as possible.
60 s32 big_index{static_cast<s32>(MemoryBlockPageShifts.size()) - 1}; 76 s32 big_index = static_cast<s32>(m_num_blocks) - 1;
61 const VAddr start{addr}; 77 const PAddr start = addr;
62 const VAddr end{(num_pages * PageSize) + addr}; 78 const PAddr end = addr + num_pages * PageSize;
63 VAddr before_start{start}; 79 PAddr before_start = start;
64 VAddr before_end{start}; 80 PAddr before_end = start;
65 VAddr after_start{end}; 81 PAddr after_start = end;
66 VAddr after_end{end}; 82 PAddr after_end = end;
67 while (big_index >= 0) { 83 while (big_index >= 0) {
68 const std::size_t block_size{blocks[big_index].GetSize()}; 84 const size_t block_size = m_blocks[big_index].GetSize();
69 const VAddr big_start{Common::AlignUp((start), block_size)}; 85 const PAddr big_start = Common::AlignUp(start, block_size);
70 const VAddr big_end{Common::AlignDown((end), block_size)}; 86 const PAddr big_end = Common::AlignDown(end, block_size);
71 if (big_start < big_end) { 87 if (big_start < big_end) {
72 // Free as many big blocks as we can 88 // Free as many big blocks as we can.
73 for (auto block{big_start}; block < big_end; block += block_size) { 89 for (auto block = big_start; block < big_end; block += block_size) {
74 FreeBlock(block, big_index); 90 this->FreeBlock(block, big_index);
75 } 91 }
76 before_end = big_start; 92 before_end = big_start;
77 after_start = big_end; 93 after_start = big_end;
@@ -81,31 +97,31 @@ void KPageHeap::Free(VAddr addr, std::size_t num_pages) {
81 } 97 }
82 ASSERT(big_index >= 0); 98 ASSERT(big_index >= 0);
83 99
84 // Free space before the big blocks 100 // Free space before the big blocks.
85 for (s32 i{big_index - 1}; i >= 0; i--) { 101 for (s32 i = big_index - 1; i >= 0; i--) {
86 const std::size_t block_size{blocks[i].GetSize()}; 102 const size_t block_size = m_blocks[i].GetSize();
87 while (before_start + block_size <= before_end) { 103 while (before_start + block_size <= before_end) {
88 before_end -= block_size; 104 before_end -= block_size;
89 FreeBlock(before_end, i); 105 this->FreeBlock(before_end, i);
90 } 106 }
91 } 107 }
92 108
93 // Free space after the big blocks 109 // Free space after the big blocks.
94 for (s32 i{big_index - 1}; i >= 0; i--) { 110 for (s32 i = big_index - 1; i >= 0; i--) {
95 const std::size_t block_size{blocks[i].GetSize()}; 111 const size_t block_size = m_blocks[i].GetSize();
96 while (after_start + block_size <= after_end) { 112 while (after_start + block_size <= after_end) {
97 FreeBlock(after_start, i); 113 this->FreeBlock(after_start, i);
98 after_start += block_size; 114 after_start += block_size;
99 } 115 }
100 } 116 }
101} 117}
102 118
103std::size_t KPageHeap::CalculateManagementOverheadSize(std::size_t region_size) { 119size_t KPageHeap::CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts,
104 std::size_t overhead_size = 0; 120 size_t num_block_shifts) {
105 for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) { 121 size_t overhead_size = 0;
106 const std::size_t cur_block_shift{MemoryBlockPageShifts[i]}; 122 for (size_t i = 0; i < num_block_shifts; i++) {
107 const std::size_t next_block_shift{ 123 const size_t cur_block_shift = block_shifts[i];
108 (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0}; 124 const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0;
109 overhead_size += KPageHeap::Block::CalculateManagementOverheadSize( 125 overhead_size += KPageHeap::Block::CalculateManagementOverheadSize(
110 region_size, cur_block_shift, next_block_shift); 126 region_size, cur_block_shift, next_block_shift);
111 } 127 }
diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h
index a65aa28a0..60fff766b 100644
--- a/src/core/hle/kernel/k_page_heap.h
+++ b/src/core/hle/kernel/k_page_heap.h
@@ -23,54 +23,73 @@ public:
23 KPageHeap() = default; 23 KPageHeap() = default;
24 ~KPageHeap() = default; 24 ~KPageHeap() = default;
25 25
26 constexpr VAddr GetAddress() const { 26 constexpr PAddr GetAddress() const {
27 return heap_address; 27 return m_heap_address;
28 } 28 }
29 constexpr std::size_t GetSize() const { 29 constexpr size_t GetSize() const {
30 return heap_size; 30 return m_heap_size;
31 } 31 }
32 constexpr VAddr GetEndAddress() const { 32 constexpr PAddr GetEndAddress() const {
33 return GetAddress() + GetSize(); 33 return this->GetAddress() + this->GetSize();
34 } 34 }
35 constexpr std::size_t GetPageOffset(VAddr block) const { 35 constexpr size_t GetPageOffset(PAddr block) const {
36 return (block - GetAddress()) / PageSize; 36 return (block - this->GetAddress()) / PageSize;
37 }
38 constexpr size_t GetPageOffsetToEnd(PAddr block) const {
39 return (this->GetEndAddress() - block) / PageSize;
40 }
41
42 void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address,
43 size_t management_size) {
44 return this->Initialize(heap_address, heap_size, management_address, management_size,
45 MemoryBlockPageShifts.data(), NumMemoryBlockPageShifts);
46 }
47
48 size_t GetFreeSize() const {
49 return this->GetNumFreePages() * PageSize;
37 } 50 }
38 51
39 void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size); 52 void SetInitialUsedSize(size_t reserved_size) {
40 VAddr AllocateBlock(s32 index, bool random); 53 // Check that the reserved size is valid.
41 void Free(VAddr addr, std::size_t num_pages); 54 const size_t free_size = this->GetNumFreePages() * PageSize;
55 ASSERT(m_heap_size >= free_size + reserved_size);
42 56
43 void UpdateUsedSize() { 57 // Set the initial used size.
44 used_size = heap_size - (GetNumFreePages() * PageSize); 58 m_initial_used_size = m_heap_size - free_size - reserved_size;
45 } 59 }
46 60
47 static std::size_t CalculateManagementOverheadSize(std::size_t region_size); 61 PAddr AllocateBlock(s32 index, bool random);
62 void Free(PAddr addr, size_t num_pages);
63
64 static size_t CalculateManagementOverheadSize(size_t region_size) {
65 return CalculateManagementOverheadSize(region_size, MemoryBlockPageShifts.data(),
66 NumMemoryBlockPageShifts);
67 }
48 68
49 static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) { 69 static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) {
50 const auto target_pages{std::max(num_pages, align_pages)}; 70 const size_t target_pages = std::max(num_pages, align_pages);
51 for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) { 71 for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
52 if (target_pages <= 72 if (target_pages <= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
53 (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
54 return static_cast<s32>(i); 73 return static_cast<s32>(i);
55 } 74 }
56 } 75 }
57 return -1; 76 return -1;
58 } 77 }
59 78
60 static constexpr s32 GetBlockIndex(std::size_t num_pages) { 79 static constexpr s32 GetBlockIndex(size_t num_pages) {
61 for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) { 80 for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) {
62 if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { 81 if (num_pages >= (size_t(1) << MemoryBlockPageShifts[i]) / PageSize) {
63 return i; 82 return i;
64 } 83 }
65 } 84 }
66 return -1; 85 return -1;
67 } 86 }
68 87
69 static constexpr std::size_t GetBlockSize(std::size_t index) { 88 static constexpr size_t GetBlockSize(size_t index) {
70 return static_cast<std::size_t>(1) << MemoryBlockPageShifts[index]; 89 return size_t(1) << MemoryBlockPageShifts[index];
71 } 90 }
72 91
73 static constexpr std::size_t GetBlockNumPages(std::size_t index) { 92 static constexpr size_t GetBlockNumPages(size_t index) {
74 return GetBlockSize(index) / PageSize; 93 return GetBlockSize(index) / PageSize;
75 } 94 }
76 95
@@ -83,114 +102,116 @@ private:
83 Block() = default; 102 Block() = default;
84 ~Block() = default; 103 ~Block() = default;
85 104
86 constexpr std::size_t GetShift() const { 105 constexpr size_t GetShift() const {
87 return block_shift; 106 return m_block_shift;
88 } 107 }
89 constexpr std::size_t GetNextShift() const { 108 constexpr size_t GetNextShift() const {
90 return next_block_shift; 109 return m_next_block_shift;
91 } 110 }
92 constexpr std::size_t GetSize() const { 111 constexpr size_t GetSize() const {
93 return static_cast<std::size_t>(1) << GetShift(); 112 return u64(1) << this->GetShift();
94 } 113 }
95 constexpr std::size_t GetNumPages() const { 114 constexpr size_t GetNumPages() const {
96 return GetSize() / PageSize; 115 return this->GetSize() / PageSize;
97 } 116 }
98 constexpr std::size_t GetNumFreeBlocks() const { 117 constexpr size_t GetNumFreeBlocks() const {
99 return bitmap.GetNumBits(); 118 return m_bitmap.GetNumBits();
100 } 119 }
101 constexpr std::size_t GetNumFreePages() const { 120 constexpr size_t GetNumFreePages() const {
102 return GetNumFreeBlocks() * GetNumPages(); 121 return this->GetNumFreeBlocks() * this->GetNumPages();
103 } 122 }
104 123
105 u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs, 124 u64* Initialize(PAddr addr, size_t size, size_t bs, size_t nbs, u64* bit_storage) {
106 u64* bit_storage) { 125 // Set shifts.
107 // Set shifts 126 m_block_shift = bs;
108 block_shift = bs; 127 m_next_block_shift = nbs;
109 next_block_shift = nbs; 128
110 129 // Align up the address.
111 // Align up the address 130 PAddr end = addr + size;
112 VAddr end{addr + size}; 131 const size_t align = (m_next_block_shift != 0) ? (u64(1) << m_next_block_shift)
113 const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift) 132 : (u64(1) << m_block_shift);
114 : (1ULL << block_shift)}; 133 addr = Common::AlignDown(addr, align);
115 addr = Common::AlignDown((addr), align); 134 end = Common::AlignUp(end, align);
116 end = Common::AlignUp((end), align); 135
117 136 m_heap_address = addr;
118 heap_address = addr; 137 m_end_offset = (end - addr) / (u64(1) << m_block_shift);
119 end_offset = (end - addr) / (1ULL << block_shift); 138 return m_bitmap.Initialize(bit_storage, m_end_offset);
120 return bitmap.Initialize(bit_storage, end_offset);
121 } 139 }
122 140
123 VAddr PushBlock(VAddr address) { 141 PAddr PushBlock(PAddr address) {
124 // Set the bit for the free block 142 // Set the bit for the free block.
125 std::size_t offset{(address - heap_address) >> GetShift()}; 143 size_t offset = (address - m_heap_address) >> this->GetShift();
126 bitmap.SetBit(offset); 144 m_bitmap.SetBit(offset);
127 145
128 // If we have a next shift, try to clear the blocks below and return the address 146 // If we have a next shift, try to clear the blocks below this one and return the new
129 if (GetNextShift()) { 147 // address.
130 const auto diff{1ULL << (GetNextShift() - GetShift())}; 148 if (this->GetNextShift()) {
149 const size_t diff = u64(1) << (this->GetNextShift() - this->GetShift());
131 offset = Common::AlignDown(offset, diff); 150 offset = Common::AlignDown(offset, diff);
132 if (bitmap.ClearRange(offset, diff)) { 151 if (m_bitmap.ClearRange(offset, diff)) {
133 return heap_address + (offset << GetShift()); 152 return m_heap_address + (offset << this->GetShift());
134 } 153 }
135 } 154 }
136 155
137 // We couldn't coalesce, or we're already as big as possible 156 // We couldn't coalesce, or we're already as big as possible.
138 return 0; 157 return {};
139 } 158 }
140 159
141 VAddr PopBlock(bool random) { 160 PAddr PopBlock(bool random) {
142 // Find a free block 161 // Find a free block.
143 const s64 soffset{bitmap.FindFreeBlock(random)}; 162 s64 soffset = m_bitmap.FindFreeBlock(random);
144 if (soffset < 0) { 163 if (soffset < 0) {
145 return 0; 164 return {};
146 } 165 }
147 const auto offset{static_cast<std::size_t>(soffset)}; 166 const size_t offset = static_cast<size_t>(soffset);
148 167
149 // Update our tracking and return it 168 // Update our tracking and return it.
150 bitmap.ClearBit(offset); 169 m_bitmap.ClearBit(offset);
151 return heap_address + (offset << GetShift()); 170 return m_heap_address + (offset << this->GetShift());
152 } 171 }
153 172
154 static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size, 173 public:
155 std::size_t cur_block_shift, 174 static constexpr size_t CalculateManagementOverheadSize(size_t region_size,
156 std::size_t next_block_shift) { 175 size_t cur_block_shift,
157 const auto cur_block_size{(1ULL << cur_block_shift)}; 176 size_t next_block_shift) {
158 const auto next_block_size{(1ULL << next_block_shift)}; 177 const size_t cur_block_size = (u64(1) << cur_block_shift);
159 const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size}; 178 const size_t next_block_size = (u64(1) << next_block_shift);
179 const size_t align = (next_block_shift != 0) ? next_block_size : cur_block_size;
160 return KPageBitmap::CalculateManagementOverheadSize( 180 return KPageBitmap::CalculateManagementOverheadSize(
161 (align * 2 + Common::AlignUp(region_size, align)) / cur_block_size); 181 (align * 2 + Common::AlignUp(region_size, align)) / cur_block_size);
162 } 182 }
163 183
164 private: 184 private:
165 KPageBitmap bitmap; 185 KPageBitmap m_bitmap;
166 VAddr heap_address{}; 186 PAddr m_heap_address{};
167 uintptr_t end_offset{}; 187 uintptr_t m_end_offset{};
168 std::size_t block_shift{}; 188 size_t m_block_shift{};
169 std::size_t next_block_shift{}; 189 size_t m_next_block_shift{};
170 }; 190 };
171 191
172 constexpr std::size_t GetNumFreePages() const { 192private:
173 std::size_t num_free{}; 193 void Initialize(PAddr heap_address, size_t heap_size, VAddr management_address,
174 194 size_t management_size, const size_t* block_shifts, size_t num_block_shifts);
175 for (const auto& block : blocks) { 195 size_t GetNumFreePages() const;
176 num_free += block.GetNumFreePages();
177 }
178
179 return num_free;
180 }
181 196
182 void FreeBlock(VAddr block, s32 index); 197 void FreeBlock(PAddr block, s32 index);
183 198
184 static constexpr std::size_t NumMemoryBlockPageShifts{7}; 199 static constexpr size_t NumMemoryBlockPageShifts{7};
185 static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{ 200 static constexpr std::array<size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
186 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E, 201 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E,
187 }; 202 };
188 203
189 VAddr heap_address{}; 204private:
190 std::size_t heap_size{}; 205 static size_t CalculateManagementOverheadSize(size_t region_size, const size_t* block_shifts,
191 std::size_t used_size{}; 206 size_t num_block_shifts);
192 std::array<Block, NumMemoryBlockPageShifts> blocks{}; 207
193 std::vector<u64> metadata; 208private:
209 PAddr m_heap_address{};
210 size_t m_heap_size{};
211 size_t m_initial_used_size{};
212 size_t m_num_blocks{};
213 std::array<Block, NumMemoryBlockPageShifts> m_blocks{};
214 std::vector<u64> m_management_data;
194}; 215};
195 216
196} // namespace Kernel 217} // namespace Kernel