summaryrefslogtreecommitdiff
path: root/src/common/address_space.inc
diff options
context:
space:
mode:
Diffstat (limited to 'src/common/address_space.inc')
-rw-r--r--src/common/address_space.inc366
1 files changed, 366 insertions, 0 deletions
diff --git a/src/common/address_space.inc b/src/common/address_space.inc
new file mode 100644
index 000000000..2195dabd5
--- /dev/null
+++ b/src/common/address_space.inc
@@ -0,0 +1,366 @@
1// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include "common/address_space.h"
5#include "common/assert.h"
6
7#define MAP_MEMBER(returnType) \
8 template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \
9 bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \
10 requires AddressSpaceValid<VaType, AddressSpaceBits> returnType FlatAddressSpaceMap< \
11 VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo>
12#define MAP_MEMBER_CONST() \
13 template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \
14 bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \
15 requires AddressSpaceValid<VaType, AddressSpaceBits> FlatAddressSpaceMap< \
16 VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo>
17
18#define MM_MEMBER(returnType) \
19 template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
20 requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \
21 FlatMemoryManager<VaType, UnmappedVa, AddressSpaceBits>
22
23#define ALLOC_MEMBER(returnType) \
24 template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
25 requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \
26 FlatAllocator<VaType, UnmappedVa, AddressSpaceBits>
27#define ALLOC_MEMBER_CONST() \
28 template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
29 requires AddressSpaceValid<VaType, AddressSpaceBits> \
30 FlatAllocator<VaType, UnmappedVa, AddressSpaceBits>
31
32namespace Common {
33MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType va_limit_,
34 std::function<void(VaType, VaType)> unmap_callback_)
35 : va_limit{va_limit_}, unmap_callback{std::move(unmap_callback_)} {
36 if (va_limit > VaMaximum) {
37 ASSERT_MSG(false, "Invalid VA limit!");
38 }
39}
40
41MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info) {
42 VaType virt_end{virt + size};
43
44 if (virt_end > va_limit) {
45 ASSERT_MSG(false,
46 "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}",
47 virt_end, va_limit);
48 }
49
50 auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)};
51 if (block_end_successor == blocks.begin()) {
52 ASSERT_MSG(false, "Trying to map a block before the VA start: virt_end: 0x{:X}", virt_end);
53 }
54
55 auto block_end_predecessor{std::prev(block_end_successor)};
56
57 if (block_end_successor != blocks.end()) {
58 // We have blocks in front of us, if one is directly in front then we don't have to add a
59 // tail
60 if (block_end_successor->virt != virt_end) {
61 PaType tailPhys{[&]() -> PaType {
62 if constexpr (!PaContigSplit) {
63 // Always propagate unmapped regions rather than calculating offset
64 return block_end_predecessor->phys;
65 } else {
66 if (block_end_predecessor->Unmapped()) {
67 // Always propagate unmapped regions rather than calculating offset
68 return block_end_predecessor->phys;
69 } else {
70 return block_end_predecessor->phys + virt_end - block_end_predecessor->virt;
71 }
72 }
73 }()};
74
75 if (block_end_predecessor->virt >= virt) {
76 // If this block's start would be overlapped by the map then reuse it as a tail
77 // block
78 block_end_predecessor->virt = virt_end;
79 block_end_predecessor->phys = tailPhys;
80 block_end_predecessor->extra_info = block_end_predecessor->extra_info;
81
82 // No longer predecessor anymore
83 block_end_successor = block_end_predecessor--;
84 } else {
85 // Else insert a new one and we're done
86 blocks.insert(block_end_successor,
87 {Block(virt, phys, extra_info),
88 Block(virt_end, tailPhys, block_end_predecessor->extra_info)});
89 if (unmap_callback) {
90 unmap_callback(virt, size);
91 }
92
93 return;
94 }
95 }
96 } else {
97 // block_end_predecessor will always be unmapped as blocks has to be terminated by an
98 // unmapped chunk
99 if (block_end_predecessor != blocks.begin() && block_end_predecessor->virt >= virt) {
100 // Move the unmapped block start backwards
101 block_end_predecessor->virt = virt_end;
102
103 // No longer predecessor anymore
104 block_end_successor = block_end_predecessor--;
105 } else {
106 // Else insert a new one and we're done
107 blocks.insert(block_end_successor,
108 {Block(virt, phys, extra_info), Block(virt_end, UnmappedPa, {})});
109 if (unmap_callback) {
110 unmap_callback(virt, size);
111 }
112
113 return;
114 }
115 }
116
117 auto block_start_successor{block_end_successor};
118
119 // Walk the block vector to find the start successor as this is more efficient than another
120 // binary search in most scenarios
121 while (std::prev(block_start_successor)->virt >= virt) {
122 block_start_successor--;
123 }
124
125 // Check that the start successor is either the end block or something in between
126 if (block_start_successor->virt > virt_end) {
127 ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt);
128 } else if (block_start_successor->virt == virt_end) {
129 // We need to create a new block as there are none spare that we would overwrite
130 blocks.insert(block_start_successor, Block(virt, phys, extra_info));
131 } else {
132 // Erase overwritten blocks
133 if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) {
134 blocks.erase(eraseStart, block_end_successor);
135 }
136
137 // Reuse a block that would otherwise be overwritten as a start block
138 block_start_successor->virt = virt;
139 block_start_successor->phys = phys;
140 block_start_successor->extra_info = extra_info;
141 }
142
143 if (unmap_callback) {
144 unmap_callback(virt, size);
145 }
146}
147
148MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) {
149 VaType virt_end{virt + size};
150
151 if (virt_end > va_limit) {
152 ASSERT_MSG(false,
153 "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}",
154 virt_end, va_limit);
155 }
156
157 auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)};
158 if (block_end_successor == blocks.begin()) {
159 ASSERT_MSG(false, "Trying to unmap a block before the VA start: virt_end: 0x{:X}",
160 virt_end);
161 }
162
163 auto block_end_predecessor{std::prev(block_end_successor)};
164
165 auto walk_back_to_predecessor{[&](auto iter) {
166 while (iter->virt >= virt) {
167 iter--;
168 }
169
170 return iter;
171 }};
172
173 auto erase_blocks_with_end_unmapped{[&](auto unmappedEnd) {
174 auto block_start_predecessor{walk_back_to_predecessor(unmappedEnd)};
175 auto block_start_successor{std::next(block_start_predecessor)};
176
177 auto eraseEnd{[&]() {
178 if (block_start_predecessor->Unmapped()) {
179 // If the start predecessor is unmapped then we can erase everything in our region
180 // and be done
181 return std::next(unmappedEnd);
182 } else {
183 // Else reuse the end predecessor as the start of our unmapped region then erase all
184 // up to it
185 unmappedEnd->virt = virt;
186 return unmappedEnd;
187 }
188 }()};
189
190 // We can't have two unmapped regions after each other
191 if (eraseEnd != blocks.end() &&
192 (eraseEnd == block_start_successor ||
193 (block_start_predecessor->Unmapped() && eraseEnd->Unmapped()))) {
194 ASSERT_MSG(false, "Multiple contiguous unmapped regions are unsupported!");
195 }
196
197 blocks.erase(block_start_successor, eraseEnd);
198 }};
199
200 // We can avoid any splitting logic if these are the case
201 if (block_end_predecessor->Unmapped()) {
202 if (block_end_predecessor->virt > virt) {
203 erase_blocks_with_end_unmapped(block_end_predecessor);
204 }
205
206 if (unmap_callback) {
207 unmap_callback(virt, size);
208 }
209
210 return; // The region is unmapped, bail out early
211 } else if (block_end_successor->virt == virt_end && block_end_successor->Unmapped()) {
212 erase_blocks_with_end_unmapped(block_end_successor);
213
214 if (unmap_callback) {
215 unmap_callback(virt, size);
216 }
217
218 return; // The region is unmapped here and doesn't need splitting, bail out early
219 } else if (block_end_successor == blocks.end()) {
220 // This should never happen as the end should always follow an unmapped block
221 ASSERT_MSG(false, "Unexpected Memory Manager state!");
222 } else if (block_end_successor->virt != virt_end) {
223 // If one block is directly in front then we don't have to add a tail
224
225 // The previous block is mapped so we will need to add a tail with an offset
226 PaType tailPhys{[&]() {
227 if constexpr (PaContigSplit) {
228 return block_end_predecessor->phys + virt_end - block_end_predecessor->virt;
229 } else {
230 return block_end_predecessor->phys;
231 }
232 }()};
233
234 if (block_end_predecessor->virt >= virt) {
235 // If this block's start would be overlapped by the unmap then reuse it as a tail block
236 block_end_predecessor->virt = virt_end;
237 block_end_predecessor->phys = tailPhys;
238
239 // No longer predecessor anymore
240 block_end_successor = block_end_predecessor--;
241 } else {
242 blocks.insert(block_end_successor,
243 {Block(virt, UnmappedPa, {}),
244 Block(virt_end, tailPhys, block_end_predecessor->extra_info)});
245 if (unmap_callback) {
246 unmap_callback(virt, size);
247 }
248
249 // The previous block is mapped and ends before
250 return;
251 }
252 }
253
254 // Walk the block vector to find the start predecessor as this is more efficient than another
255 // binary search in most scenarios
256 auto block_start_predecessor{walk_back_to_predecessor(block_end_successor)};
257 auto block_start_successor{std::next(block_start_predecessor)};
258
259 if (block_start_successor->virt > virt_end) {
260 ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt);
261 } else if (block_start_successor->virt == virt_end) {
262 // There are no blocks between the start and the end that would let us skip inserting a new
263 // one for head
264
265 // The previous block is may be unmapped, if so we don't need to insert any unmaps after it
266 if (block_start_predecessor->Mapped()) {
267 blocks.insert(block_start_successor, Block(virt, UnmappedPa, {}));
268 }
269 } else if (block_start_predecessor->Unmapped()) {
270 // If the previous block is unmapped
271 blocks.erase(block_start_successor, block_end_predecessor);
272 } else {
273 // Erase overwritten blocks, skipping the first one as we have written the unmapped start
274 // block there
275 if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) {
276 blocks.erase(eraseStart, block_end_successor);
277 }
278
279 // Add in the unmapped block header
280 block_start_successor->virt = virt;
281 block_start_successor->phys = UnmappedPa;
282 }
283
284 if (unmap_callback)
285 unmap_callback(virt, size);
286}
287
288ALLOC_MEMBER_CONST()::FlatAllocator(VaType virt_start_, VaType va_limit_)
289 : Base{va_limit_}, virt_start{virt_start_}, current_linear_alloc_end{virt_start_} {}
290
291ALLOC_MEMBER(VaType)::Allocate(VaType size) {
292 std::scoped_lock lock(this->block_mutex);
293
294 VaType alloc_start{UnmappedVa};
295 VaType alloc_end{current_linear_alloc_end + size};
296
297 // Avoid searching backwards in the address space if possible
298 if (alloc_end >= current_linear_alloc_end && alloc_end <= this->va_limit) {
299 auto alloc_end_successor{
300 std::lower_bound(this->blocks.begin(), this->blocks.end(), alloc_end)};
301 if (alloc_end_successor == this->blocks.begin()) {
302 ASSERT_MSG(false, "First block in AS map is invalid!");
303 }
304
305 auto alloc_end_predecessor{std::prev(alloc_end_successor)};
306 if (alloc_end_predecessor->virt <= current_linear_alloc_end) {
307 alloc_start = current_linear_alloc_end;
308 } else {
309 // Skip over fixed any mappings in front of us
310 while (alloc_end_successor != this->blocks.end()) {
311 if (alloc_end_successor->virt - alloc_end_predecessor->virt < size ||
312 alloc_end_predecessor->Mapped()) {
313 alloc_start = alloc_end_predecessor->virt;
314 break;
315 }
316
317 alloc_end_predecessor = alloc_end_successor++;
318
319 // Use the VA limit to calculate if we can fit in the final block since it has no
320 // successor
321 if (alloc_end_successor == this->blocks.end()) {
322 alloc_end = alloc_end_predecessor->virt + size;
323
324 if (alloc_end >= alloc_end_predecessor->virt && alloc_end <= this->va_limit) {
325 alloc_start = alloc_end_predecessor->virt;
326 }
327 }
328 }
329 }
330 }
331
332 if (alloc_start != UnmappedVa) {
333 current_linear_alloc_end = alloc_start + size;
334 } else { // If linear allocation overflows the AS then find a gap
335 if (this->blocks.size() <= 2) {
336 ASSERT_MSG(false, "Unexpected allocator state!");
337 }
338
339 auto search_predecessor{this->blocks.begin()};
340 auto search_successor{std::next(search_predecessor)};
341
342 while (search_successor != this->blocks.end() &&
343 (search_successor->virt - search_predecessor->virt < size ||
344 search_predecessor->Mapped())) {
345 search_predecessor = search_successor++;
346 }
347
348 if (search_successor != this->blocks.end()) {
349 alloc_start = search_predecessor->virt;
350 } else {
351 return {}; // AS is full
352 }
353 }
354
355 this->MapLocked(alloc_start, true, size, {});
356 return alloc_start;
357}
358
359ALLOC_MEMBER(void)::AllocateFixed(VaType virt, VaType size) {
360 this->Map(virt, true, size);
361}
362
363ALLOC_MEMBER(void)::Free(VaType virt, VaType size) {
364 this->Unmap(virt, size);
365}
366} // namespace Common