summaryrefslogtreecommitdiff
path: root/src/common/address_space.inc
diff options
context:
space:
mode:
authorGravatar Fernando Sahmkow2021-11-14 20:55:52 +0100
committerGravatar Fernando Sahmkow2022-10-06 21:00:52 +0200
commitfeb49c822d9cabc5bc7be9eab1f2bf4ba460176a (patch)
tree678c58e7fb8e41f5a04e309df9d196320d11de99 /src/common/address_space.inc
parentNVDRV: Update copyright notices. (diff)
downloadyuzu-feb49c822d9cabc5bc7be9eab1f2bf4ba460176a.tar.gz
yuzu-feb49c822d9cabc5bc7be9eab1f2bf4ba460176a.tar.xz
yuzu-feb49c822d9cabc5bc7be9eab1f2bf4ba460176a.zip
NVDRV: Remake ASGPU
Diffstat (limited to 'src/common/address_space.inc')
-rw-r--r--src/common/address_space.inc338
1 files changed, 338 insertions, 0 deletions
diff --git a/src/common/address_space.inc b/src/common/address_space.inc
new file mode 100644
index 000000000..907c55d88
--- /dev/null
+++ b/src/common/address_space.inc
@@ -0,0 +1,338 @@
1// SPDX-License-Identifier: GPLv3 or later
2// Copyright © 2021 Skyline Team and Contributors (https://github.com/skyline-emu/)
3
4#include "common/address_space.h"
5#include "common/assert.h"
6
7#define MAP_MEMBER(returnType) \
8 template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \
9 bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \
10 requires AddressSpaceValid<VaType, AddressSpaceBits> returnType FlatAddressSpaceMap< \
11 VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo>
12#define MAP_MEMBER_CONST() \
13 template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \
14 bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \
15 requires AddressSpaceValid<VaType, AddressSpaceBits> FlatAddressSpaceMap< \
16 VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo>
17
18#define MM_MEMBER(returnType) \
19 template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
20 requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \
21 FlatMemoryManager<VaType, UnmappedVa, AddressSpaceBits>
22
23#define ALLOC_MEMBER(returnType) \
24 template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
25 requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \
26 FlatAllocator<VaType, UnmappedVa, AddressSpaceBits>
27#define ALLOC_MEMBER_CONST() \
28 template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
29 requires AddressSpaceValid<VaType, AddressSpaceBits> \
30 FlatAllocator<VaType, UnmappedVa, AddressSpaceBits>
31
32namespace Common {
33MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType vaLimit,
34 std::function<void(VaType, VaType)> unmapCallback)
35 : unmapCallback(std::move(unmapCallback)), vaLimit(vaLimit) {
36 if (vaLimit > VaMaximum)
37 UNREACHABLE_MSG("Invalid VA limit!");
38}
39
40MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extraInfo) {
41 VaType virtEnd{virt + size};
42
43 if (virtEnd > vaLimit)
44 UNREACHABLE_MSG("Trying to map a block past the VA limit: virtEnd: 0x{:X}, vaLimit: 0x{:X}",
45 virtEnd, vaLimit);
46
47 auto blockEndSuccessor{std::lower_bound(blocks.begin(), blocks.end(), virtEnd)};
48 if (blockEndSuccessor == blocks.begin())
49 UNREACHABLE_MSG("Trying to map a block before the VA start: virtEnd: 0x{:X}", virtEnd);
50
51 auto blockEndPredecessor{std::prev(blockEndSuccessor)};
52
53 if (blockEndSuccessor != blocks.end()) {
54 // We have blocks in front of us, if one is directly in front then we don't have to add a
55 // tail
56 if (blockEndSuccessor->virt != virtEnd) {
57 PaType tailPhys{[&]() -> PaType {
58 if constexpr (!PaContigSplit) {
59 return blockEndPredecessor
60 ->phys; // Always propagate unmapped regions rather than calculating offset
61 } else {
62 if (blockEndPredecessor->Unmapped())
63 return blockEndPredecessor->phys; // Always propagate unmapped regions
64 // rather than calculating offset
65 else
66 return blockEndPredecessor->phys + virtEnd - blockEndPredecessor->virt;
67 }
68 }()};
69
70 if (blockEndPredecessor->virt >= virt) {
71 // If this block's start would be overlapped by the map then reuse it as a tail
72 // block
73 blockEndPredecessor->virt = virtEnd;
74 blockEndPredecessor->phys = tailPhys;
75 blockEndPredecessor->extraInfo = blockEndPredecessor->extraInfo;
76
77 // No longer predecessor anymore
78 blockEndSuccessor = blockEndPredecessor--;
79 } else {
80 // Else insert a new one and we're done
81 blocks.insert(blockEndSuccessor,
82 {Block(virt, phys, extraInfo),
83 Block(virtEnd, tailPhys, blockEndPredecessor->extraInfo)});
84 if (unmapCallback)
85 unmapCallback(virt, size);
86
87 return;
88 }
89 }
90 } else {
91 // blockEndPredecessor will always be unmapped as blocks has to be terminated by an unmapped
92 // chunk
93 if (blockEndPredecessor != blocks.begin() && blockEndPredecessor->virt >= virt) {
94 // Move the unmapped block start backwards
95 blockEndPredecessor->virt = virtEnd;
96
97 // No longer predecessor anymore
98 blockEndSuccessor = blockEndPredecessor--;
99 } else {
100 // Else insert a new one and we're done
101 blocks.insert(blockEndSuccessor,
102 {Block(virt, phys, extraInfo), Block(virtEnd, UnmappedPa, {})});
103 if (unmapCallback)
104 unmapCallback(virt, size);
105
106 return;
107 }
108 }
109
110 auto blockStartSuccessor{blockEndSuccessor};
111
112 // Walk the block vector to find the start successor as this is more efficient than another
113 // binary search in most scenarios
114 while (std::prev(blockStartSuccessor)->virt >= virt)
115 blockStartSuccessor--;
116
117 // Check that the start successor is either the end block or something in between
118 if (blockStartSuccessor->virt > virtEnd) {
119 UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", blockStartSuccessor->virt);
120 } else if (blockStartSuccessor->virt == virtEnd) {
121 // We need to create a new block as there are none spare that we would overwrite
122 blocks.insert(blockStartSuccessor, Block(virt, phys, extraInfo));
123 } else {
124 // Erase overwritten blocks
125 if (auto eraseStart{std::next(blockStartSuccessor)}; eraseStart != blockEndSuccessor)
126 blocks.erase(eraseStart, blockEndSuccessor);
127
128 // Reuse a block that would otherwise be overwritten as a start block
129 blockStartSuccessor->virt = virt;
130 blockStartSuccessor->phys = phys;
131 blockStartSuccessor->extraInfo = extraInfo;
132 }
133
134 if (unmapCallback)
135 unmapCallback(virt, size);
136}
137
138MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) {
139 VaType virtEnd{virt + size};
140
141 if (virtEnd > vaLimit)
142 UNREACHABLE_MSG("Trying to map a block past the VA limit: virtEnd: 0x{:X}, vaLimit: 0x{:X}",
143 virtEnd, vaLimit);
144
145 auto blockEndSuccessor{std::lower_bound(blocks.begin(), blocks.end(), virtEnd)};
146 if (blockEndSuccessor == blocks.begin())
147 UNREACHABLE_MSG("Trying to unmap a block before the VA start: virtEnd: 0x{:X}", virtEnd);
148
149 auto blockEndPredecessor{std::prev(blockEndSuccessor)};
150
151 auto walkBackToPredecessor{[&](auto iter) {
152 while (iter->virt >= virt)
153 iter--;
154
155 return iter;
156 }};
157
158 auto eraseBlocksWithEndUnmapped{[&](auto unmappedEnd) {
159 auto blockStartPredecessor{walkBackToPredecessor(unmappedEnd)};
160 auto blockStartSuccessor{std::next(blockStartPredecessor)};
161
162 auto eraseEnd{[&]() {
163 if (blockStartPredecessor->Unmapped()) {
164 // If the start predecessor is unmapped then we can erase everything in our region
165 // and be done
166 return std::next(unmappedEnd);
167 } else {
168 // Else reuse the end predecessor as the start of our unmapped region then erase all
169 // up to it
170 unmappedEnd->virt = virt;
171 return unmappedEnd;
172 }
173 }()};
174
175 // We can't have two unmapped regions after each other
176 if (eraseEnd != blocks.end() &&
177 (eraseEnd == blockStartSuccessor ||
178 (blockStartPredecessor->Unmapped() && eraseEnd->Unmapped())))
179 UNREACHABLE_MSG("Multiple contiguous unmapped regions are unsupported!");
180
181 blocks.erase(blockStartSuccessor, eraseEnd);
182 }};
183
184 // We can avoid any splitting logic if these are the case
185 if (blockEndPredecessor->Unmapped()) {
186 if (blockEndPredecessor->virt > virt)
187 eraseBlocksWithEndUnmapped(blockEndPredecessor);
188
189 if (unmapCallback)
190 unmapCallback(virt, size);
191
192 return; // The region is unmapped, bail out early
193 } else if (blockEndSuccessor->virt == virtEnd && blockEndSuccessor->Unmapped()) {
194 eraseBlocksWithEndUnmapped(blockEndSuccessor);
195
196 if (unmapCallback)
197 unmapCallback(virt, size);
198
199 return; // The region is unmapped here and doesn't need splitting, bail out early
200 } else if (blockEndSuccessor == blocks.end()) {
201 // This should never happen as the end should always follow an unmapped block
202 UNREACHABLE_MSG("Unexpected Memory Manager state!");
203 } else if (blockEndSuccessor->virt != virtEnd) {
204 // If one block is directly in front then we don't have to add a tail
205
206 // The previous block is mapped so we will need to add a tail with an offset
207 PaType tailPhys{[&]() {
208 if constexpr (PaContigSplit)
209 return blockEndPredecessor->phys + virtEnd - blockEndPredecessor->virt;
210 else
211 return blockEndPredecessor->phys;
212 }()};
213
214 if (blockEndPredecessor->virt >= virt) {
215 // If this block's start would be overlapped by the unmap then reuse it as a tail block
216 blockEndPredecessor->virt = virtEnd;
217 blockEndPredecessor->phys = tailPhys;
218
219 // No longer predecessor anymore
220 blockEndSuccessor = blockEndPredecessor--;
221 } else {
222 blocks.insert(blockEndSuccessor,
223 {Block(virt, UnmappedPa, {}),
224 Block(virtEnd, tailPhys, blockEndPredecessor->extraInfo)});
225 if (unmapCallback)
226 unmapCallback(virt, size);
227
228 return; // The previous block is mapped and ends before
229 }
230 }
231
232 // Walk the block vector to find the start predecessor as this is more efficient than another
233 // binary search in most scenarios
234 auto blockStartPredecessor{walkBackToPredecessor(blockEndSuccessor)};
235 auto blockStartSuccessor{std::next(blockStartPredecessor)};
236
237 if (blockStartSuccessor->virt > virtEnd) {
238 UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", blockStartSuccessor->virt);
239 } else if (blockStartSuccessor->virt == virtEnd) {
240 // There are no blocks between the start and the end that would let us skip inserting a new
241 // one for head
242
243 // The previous block is may be unmapped, if so we don't need to insert any unmaps after it
244 if (blockStartPredecessor->Mapped())
245 blocks.insert(blockStartSuccessor, Block(virt, UnmappedPa, {}));
246 } else if (blockStartPredecessor->Unmapped()) {
247 // If the previous block is unmapped
248 blocks.erase(blockStartSuccessor, blockEndPredecessor);
249 } else {
250 // Erase overwritten blocks, skipping the first one as we have written the unmapped start
251 // block there
252 if (auto eraseStart{std::next(blockStartSuccessor)}; eraseStart != blockEndSuccessor)
253 blocks.erase(eraseStart, blockEndSuccessor);
254
255 // Add in the unmapped block header
256 blockStartSuccessor->virt = virt;
257 blockStartSuccessor->phys = UnmappedPa;
258 }
259
260 if (unmapCallback)
261 unmapCallback(virt, size);
262}
263
264ALLOC_MEMBER_CONST()::FlatAllocator(VaType vaStart, VaType vaLimit)
265 : Base(vaLimit), currentLinearAllocEnd(vaStart), vaStart(vaStart) {}
266
267ALLOC_MEMBER(VaType)::Allocate(VaType size) {
268 std::scoped_lock lock(this->blockMutex);
269
270 VaType allocStart{UnmappedVa};
271 VaType allocEnd{currentLinearAllocEnd + size};
272
273 // Avoid searching backwards in the address space if possible
274 if (allocEnd >= currentLinearAllocEnd && allocEnd <= this->vaLimit) {
275 auto allocEndSuccessor{
276 std::lower_bound(this->blocks.begin(), this->blocks.end(), allocEnd)};
277 if (allocEndSuccessor == this->blocks.begin())
278 UNREACHABLE_MSG("First block in AS map is invalid!");
279
280 auto allocEndPredecessor{std::prev(allocEndSuccessor)};
281 if (allocEndPredecessor->virt <= currentLinearAllocEnd) {
282 allocStart = currentLinearAllocEnd;
283 } else {
284 // Skip over fixed any mappings in front of us
285 while (allocEndSuccessor != this->blocks.end()) {
286 if (allocEndSuccessor->virt - allocEndPredecessor->virt < size ||
287 allocEndPredecessor->Mapped()) {
288 allocStart = allocEndPredecessor->virt;
289 break;
290 }
291
292 allocEndPredecessor = allocEndSuccessor++;
293
294 // Use the VA limit to calculate if we can fit in the final block since it has no
295 // successor
296 if (allocEndSuccessor == this->blocks.end()) {
297 allocEnd = allocEndPredecessor->virt + size;
298
299 if (allocEnd >= allocEndPredecessor->virt && allocEnd <= this->vaLimit)
300 allocStart = allocEndPredecessor->virt;
301 }
302 }
303 }
304 }
305
306 if (allocStart != UnmappedVa) {
307 currentLinearAllocEnd = allocStart + size;
308 } else { // If linear allocation overflows the AS then find a gap
309 if (this->blocks.size() <= 2)
310 UNREACHABLE_MSG("Unexpected allocator state!");
311
312 auto searchPredecessor{this->blocks.begin()};
313 auto searchSuccessor{std::next(searchPredecessor)};
314
315 while (searchSuccessor != this->blocks.end() &&
316 (searchSuccessor->virt - searchPredecessor->virt < size ||
317 searchPredecessor->Mapped())) {
318 searchPredecessor = searchSuccessor++;
319 }
320
321 if (searchSuccessor != this->blocks.end())
322 allocStart = searchPredecessor->virt;
323 else
324 return {}; // AS is full
325 }
326
327 this->MapLocked(allocStart, true, size, {});
328 return allocStart;
329}
330
331ALLOC_MEMBER(void)::AllocateFixed(VaType virt, VaType size) {
332 this->Map(virt, true, size);
333}
334
335ALLOC_MEMBER(void)::Free(VaType virt, VaType size) {
336 this->Unmap(virt, size);
337}
338} // namespace Common