summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar bunnei2019-03-20 22:28:35 -0400
committerGravatar bunnei2019-03-20 22:36:03 -0400
commit72837e4b3d312ac6d7e5114c7b6e370006d46921 (patch)
treea696e00609764169e4cb6e1bbceea6ea5fb5cb2e
parentmemory: Check that core is powered on before attempting to use GPU. (diff)
downloadyuzu-72837e4b3d312ac6d7e5114c7b6e370006d46921.tar.gz
yuzu-72837e4b3d312ac6d7e5114c7b6e370006d46921.tar.xz
yuzu-72837e4b3d312ac6d7e5114c7b6e370006d46921.zip
memory_manager: Bug fixes and further cleanup.
Diffstat (limited to '')
-rw-r--r--src/video_core/memory_manager.cpp131
-rw-r--r--src/video_core/memory_manager.h14
2 files changed, 72 insertions, 73 deletions
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index e8edf9b14..0c4cf3974 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -40,7 +40,7 @@ GPUVAddr MemoryManager::AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align) {
40 return gpu_addr; 40 return gpu_addr;
41} 41}
42 42
43GPUVAddr MemoryManager::MapBufferEx(GPUVAddr cpu_addr, u64 size) { 43GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) {
44 const GPUVAddr gpu_addr{ 44 const GPUVAddr gpu_addr{
45 FindFreeRegion(address_space_base, size, page_size, VirtualMemoryArea::Type::Unmapped)}; 45 FindFreeRegion(address_space_base, size, page_size, VirtualMemoryArea::Type::Unmapped)};
46 MapBackingMemory(gpu_addr, Memory::GetPointer(cpu_addr), ((size + page_mask) & ~page_mask), 46 MapBackingMemory(gpu_addr, Memory::GetPointer(cpu_addr), ((size + page_mask) & ~page_mask),
@@ -48,7 +48,7 @@ GPUVAddr MemoryManager::MapBufferEx(GPUVAddr cpu_addr, u64 size) {
48 return gpu_addr; 48 return gpu_addr;
49} 49}
50 50
51GPUVAddr MemoryManager::MapBufferEx(GPUVAddr cpu_addr, GPUVAddr gpu_addr, u64 size) { 51GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size) {
52 ASSERT((gpu_addr & page_mask) == 0); 52 ASSERT((gpu_addr & page_mask) == 0);
53 53
54 MapBackingMemory(gpu_addr, Memory::GetPointer(cpu_addr), ((size + page_mask) & ~page_mask), 54 MapBackingMemory(gpu_addr, Memory::GetPointer(cpu_addr), ((size + page_mask) & ~page_mask),
@@ -74,20 +74,20 @@ GPUVAddr MemoryManager::FindFreeRegion(GPUVAddr region_start, u64 size, u64 alig
74 align = (align + page_mask) & ~page_mask; 74 align = (align + page_mask) & ~page_mask;
75 75
76 // Find the first Free VMA. 76 // Find the first Free VMA.
77 const GPUVAddr base = region_start; 77 const VMAHandle vma_handle{std::find_if(vma_map.begin(), vma_map.end(), [&](const auto& vma) {
78 const VMAHandle vma_handle = std::find_if(vma_map.begin(), vma_map.end(), [&](const auto& vma) { 78 if (vma.second.type != vma_type) {
79 if (vma.second.type != vma_type)
80 return false; 79 return false;
80 }
81 81
82 const VAddr vma_end = vma.second.base + vma.second.size; 82 const VAddr vma_end{vma.second.base + vma.second.size};
83 return vma_end > base && vma_end >= base + size; 83 return vma_end > region_start && vma_end >= region_start + size;
84 }); 84 })};
85 85
86 if (vma_handle == vma_map.end()) { 86 if (vma_handle == vma_map.end()) {
87 return {}; 87 return {};
88 } 88 }
89 89
90 return std::max(base, vma_handle->second.base); 90 return std::max(region_start, vma_handle->second.base);
91} 91}
92 92
93bool MemoryManager::IsAddressValid(GPUVAddr addr) const { 93bool MemoryManager::IsAddressValid(GPUVAddr addr) const {
@@ -99,7 +99,7 @@ std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr) {
99 return {}; 99 return {};
100 } 100 }
101 101
102 VAddr cpu_addr = page_table.backing_addr[addr >> page_bits]; 102 VAddr cpu_addr{page_table.backing_addr[addr >> page_bits]};
103 if (cpu_addr) { 103 if (cpu_addr) {
104 return cpu_addr + (addr & page_mask); 104 return cpu_addr + (addr & page_mask);
105 } 105 }
@@ -113,7 +113,7 @@ T MemoryManager::Read(GPUVAddr addr) {
113 return {}; 113 return {};
114 } 114 }
115 115
116 const u8* page_pointer = page_table.pointers[addr >> page_bits]; 116 const u8* page_pointer{page_table.pointers[addr >> page_bits]};
117 if (page_pointer) { 117 if (page_pointer) {
118 // NOTE: Avoid adding any extra logic to this fast-path block 118 // NOTE: Avoid adding any extra logic to this fast-path block
119 T value; 119 T value;
@@ -121,8 +121,7 @@ T MemoryManager::Read(GPUVAddr addr) {
121 return value; 121 return value;
122 } 122 }
123 123
124 Common::PageType type = page_table.attributes[addr >> page_bits]; 124 switch (page_table.attributes[addr >> page_bits]) {
125 switch (type) {
126 case Common::PageType::Unmapped: 125 case Common::PageType::Unmapped:
127 LOG_ERROR(HW_GPU, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, addr); 126 LOG_ERROR(HW_GPU, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, addr);
128 return 0; 127 return 0;
@@ -141,15 +140,14 @@ void MemoryManager::Write(GPUVAddr addr, T data) {
141 return; 140 return;
142 } 141 }
143 142
144 u8* page_pointer = page_table.pointers[addr >> page_bits]; 143 u8* page_pointer{page_table.pointers[addr >> page_bits]};
145 if (page_pointer) { 144 if (page_pointer) {
146 // NOTE: Avoid adding any extra logic to this fast-path block 145 // NOTE: Avoid adding any extra logic to this fast-path block
147 std::memcpy(&page_pointer[addr & page_mask], &data, sizeof(T)); 146 std::memcpy(&page_pointer[addr & page_mask], &data, sizeof(T));
148 return; 147 return;
149 } 148 }
150 149
151 Common::PageType type = page_table.attributes[addr >> page_bits]; 150 switch (page_table.attributes[addr >> page_bits]) {
152 switch (type) {
153 case Common::PageType::Unmapped: 151 case Common::PageType::Unmapped:
154 LOG_ERROR(HW_GPU, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, 152 LOG_ERROR(HW_GPU, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
155 static_cast<u32>(data), addr); 153 static_cast<u32>(data), addr);
@@ -176,7 +174,7 @@ u8* MemoryManager::GetPointer(GPUVAddr addr) {
176 return {}; 174 return {};
177 } 175 }
178 176
179 u8* page_pointer = page_table.pointers[addr >> page_bits]; 177 u8* page_pointer{page_table.pointers[addr >> page_bits]};
180 if (page_pointer) { 178 if (page_pointer) {
181 return page_pointer + (addr & page_mask); 179 return page_pointer + (addr & page_mask);
182 } 180 }
@@ -201,7 +199,7 @@ void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageTy
201 LOG_DEBUG(HW_GPU, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * page_size, 199 LOG_DEBUG(HW_GPU, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * page_size,
202 (base + size) * page_size); 200 (base + size) * page_size);
203 201
204 VAddr end = base + size; 202 const VAddr end{base + size};
205 ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", 203 ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
206 base + page_table.pointers.size()); 204 base + page_table.pointers.size());
207 205
@@ -257,56 +255,58 @@ MemoryManager::VMAHandle MemoryManager::FindVMA(GPUVAddr target) const {
257 } 255 }
258} 256}
259 257
258MemoryManager::VMAIter MemoryManager::Allocate(VMAIter vma_handle) {
259 VirtualMemoryArea& vma{vma_handle->second};
260
261 vma.type = VirtualMemoryArea::Type::Allocated;
262 vma.backing_addr = 0;
263 vma.backing_memory = {};
264 UpdatePageTableForVMA(vma);
265
266 return MergeAdjacent(vma_handle);
267}
268
260MemoryManager::VMAHandle MemoryManager::AllocateMemory(GPUVAddr target, std::size_t offset, 269MemoryManager::VMAHandle MemoryManager::AllocateMemory(GPUVAddr target, std::size_t offset,
261 u64 size) { 270 u64 size) {
262 271
263 // This is the appropriately sized VMA that will turn into our allocation. 272 // This is the appropriately sized VMA that will turn into our allocation.
264 VMAIter vma_handle = CarveVMA(target, size); 273 VMAIter vma_handle{CarveVMA(target, size)};
265 VirtualMemoryArea& final_vma = vma_handle->second; 274 VirtualMemoryArea& vma{vma_handle->second};
266 ASSERT(final_vma.size == size);
267 275
268 final_vma.type = VirtualMemoryArea::Type::Allocated; 276 ASSERT(vma.size == size);
269 final_vma.offset = offset;
270 UpdatePageTableForVMA(final_vma);
271 277
272 return MergeAdjacent(vma_handle); 278 vma.offset = offset;
279
280 return Allocate(vma_handle);
273} 281}
274 282
275MemoryManager::VMAHandle MemoryManager::MapBackingMemory(GPUVAddr target, u8* memory, u64 size, 283MemoryManager::VMAHandle MemoryManager::MapBackingMemory(GPUVAddr target, u8* memory, u64 size,
276 VAddr backing_addr) { 284 VAddr backing_addr) {
277 // This is the appropriately sized VMA that will turn into our allocation. 285 // This is the appropriately sized VMA that will turn into our allocation.
278 VMAIter vma_handle = CarveVMA(target, size); 286 VMAIter vma_handle{CarveVMA(target, size)};
279 VirtualMemoryArea& final_vma = vma_handle->second; 287 VirtualMemoryArea& vma{vma_handle->second};
280 ASSERT(final_vma.size == size);
281
282 final_vma.type = VirtualMemoryArea::Type::Mapped;
283 final_vma.backing_memory = memory;
284 final_vma.backing_addr = backing_addr;
285 UpdatePageTableForVMA(final_vma);
286
287 return MergeAdjacent(vma_handle);
288}
289 288
290MemoryManager::VMAIter MemoryManager::Unmap(VMAIter vma_handle) { 289 ASSERT(vma.size == size);
291 VirtualMemoryArea& vma = vma_handle->second;
292 vma.type = VirtualMemoryArea::Type::Allocated;
293 vma.offset = 0;
294 vma.backing_memory = nullptr;
295 290
291 vma.type = VirtualMemoryArea::Type::Mapped;
292 vma.backing_memory = memory;
293 vma.backing_addr = backing_addr;
296 UpdatePageTableForVMA(vma); 294 UpdatePageTableForVMA(vma);
297 295
298 return MergeAdjacent(vma_handle); 296 return MergeAdjacent(vma_handle);
299} 297}
300 298
301void MemoryManager::UnmapRange(GPUVAddr target, u64 size) { 299void MemoryManager::UnmapRange(GPUVAddr target, u64 size) {
302 VMAIter vma = CarveVMARange(target, size); 300 VMAIter vma{CarveVMARange(target, size)};
303 const VAddr target_end = target + size; 301 const VAddr target_end{target + size};
302 const VMAIter end{vma_map.end()};
304 303
305 const VMAIter end = vma_map.end();
306 // The comparison against the end of the range must be done using addresses since VMAs can be 304 // The comparison against the end of the range must be done using addresses since VMAs can be
307 // merged during this process, causing invalidation of the iterators. 305 // merged during this process, causing invalidation of the iterators.
308 while (vma != end && vma->second.base < target_end) { 306 while (vma != end && vma->second.base < target_end) {
309 vma = std::next(Unmap(vma)); 307 // Unmapped ranges return to allocated state and can be reused
308 // This behavior is used by Super Mario Odyssey, Sonic Forces, and likely other games
309 vma = std::next(Allocate(vma));
310 } 310 }
311 311
312 ASSERT(FindVMA(target)->second.size >= size); 312 ASSERT(FindVMA(target)->second.size >= size);
@@ -319,25 +319,26 @@ MemoryManager::VMAIter MemoryManager::StripIterConstness(const VMAHandle& iter)
319} 319}
320 320
321MemoryManager::VMAIter MemoryManager::CarveVMA(GPUVAddr base, u64 size) { 321MemoryManager::VMAIter MemoryManager::CarveVMA(GPUVAddr base, u64 size) {
322 ASSERT_MSG((size & Tegra::MemoryManager::page_mask) == 0, "non-page aligned size: 0x{:016X}", 322 ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: 0x{:016X}", size);
323 size); 323 ASSERT_MSG((base & page_mask) == 0, "non-page aligned base: 0x{:016X}", base);
324 ASSERT_MSG((base & Tegra::MemoryManager::page_mask) == 0, "non-page aligned base: 0x{:016X}",
325 base);
326 324
327 VMAIter vma_handle = StripIterConstness(FindVMA(base)); 325 VMAIter vma_handle{StripIterConstness(FindVMA(base))};
328 if (vma_handle == vma_map.end()) { 326 if (vma_handle == vma_map.end()) {
329 // Target address is outside the range managed by the kernel 327 // Target address is outside the managed range
330 return {}; 328 return {};
331 } 329 }
332 330
333 const VirtualMemoryArea& vma = vma_handle->second; 331 const VirtualMemoryArea& vma{vma_handle->second};
334 if (vma.type == VirtualMemoryArea::Type::Mapped) { 332 if (vma.type == VirtualMemoryArea::Type::Mapped) {
335 // Region is already allocated 333 // Region is already allocated
336 return {}; 334 return {};
337 } 335 }
338 336
339 const VAddr start_in_vma = base - vma.base; 337 const VAddr start_in_vma{base - vma.base};
340 const VAddr end_in_vma = start_in_vma + size; 338 const VAddr end_in_vma{start_in_vma + size};
339
340 ASSERT_MSG(end_in_vma <= vma.size, "region size 0x{:016X} is less than required size 0x{:016X}",
341 vma.size, end_in_vma);
341 342
342 if (end_in_vma < vma.size) { 343 if (end_in_vma < vma.size) {
343 // Split VMA at the end of the allocated region 344 // Split VMA at the end of the allocated region
@@ -352,17 +353,15 @@ MemoryManager::VMAIter MemoryManager::CarveVMA(GPUVAddr base, u64 size) {
352} 353}
353 354
354MemoryManager::VMAIter MemoryManager::CarveVMARange(GPUVAddr target, u64 size) { 355MemoryManager::VMAIter MemoryManager::CarveVMARange(GPUVAddr target, u64 size) {
355 ASSERT_MSG((size & Tegra::MemoryManager::page_mask) == 0, "non-page aligned size: 0x{:016X}", 356 ASSERT_MSG((size & page_mask) == 0, "non-page aligned size: 0x{:016X}", size);
356 size); 357 ASSERT_MSG((target & page_mask) == 0, "non-page aligned base: 0x{:016X}", target);
357 ASSERT_MSG((target & Tegra::MemoryManager::page_mask) == 0, "non-page aligned base: 0x{:016X}",
358 target);
359 358
360 const VAddr target_end = target + size; 359 const VAddr target_end{target + size};
361 ASSERT(target_end >= target); 360 ASSERT(target_end >= target);
362 ASSERT(size > 0); 361 ASSERT(size > 0);
363 362
364 VMAIter begin_vma = StripIterConstness(FindVMA(target)); 363 VMAIter begin_vma{StripIterConstness(FindVMA(target))};
365 const VMAIter i_end = vma_map.lower_bound(target_end); 364 const VMAIter i_end{vma_map.lower_bound(target_end)};
366 if (std::any_of(begin_vma, i_end, [](const auto& entry) { 365 if (std::any_of(begin_vma, i_end, [](const auto& entry) {
367 return entry.second.type == VirtualMemoryArea::Type::Unmapped; 366 return entry.second.type == VirtualMemoryArea::Type::Unmapped;
368 })) { 367 })) {
@@ -373,7 +372,7 @@ MemoryManager::VMAIter MemoryManager::CarveVMARange(GPUVAddr target, u64 size) {
373 begin_vma = SplitVMA(begin_vma, target - begin_vma->second.base); 372 begin_vma = SplitVMA(begin_vma, target - begin_vma->second.base);
374 } 373 }
375 374
376 VMAIter end_vma = StripIterConstness(FindVMA(target_end)); 375 VMAIter end_vma{StripIterConstness(FindVMA(target_end))};
377 if (end_vma != vma_map.end() && target_end != end_vma->second.base) { 376 if (end_vma != vma_map.end() && target_end != end_vma->second.base) {
378 end_vma = SplitVMA(end_vma, target_end - end_vma->second.base); 377 end_vma = SplitVMA(end_vma, target_end - end_vma->second.base);
379 } 378 }
@@ -382,8 +381,8 @@ MemoryManager::VMAIter MemoryManager::CarveVMARange(GPUVAddr target, u64 size) {
382} 381}
383 382
384MemoryManager::VMAIter MemoryManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) { 383MemoryManager::VMAIter MemoryManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) {
385 VirtualMemoryArea& old_vma = vma_handle->second; 384 VirtualMemoryArea& old_vma{vma_handle->second};
386 VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA 385 VirtualMemoryArea new_vma{old_vma}; // Make a copy of the VMA
387 386
388 // For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably 387 // For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably
389 // a bug. This restriction might be removed later. 388 // a bug. This restriction might be removed later.
@@ -411,14 +410,14 @@ MemoryManager::VMAIter MemoryManager::SplitVMA(VMAIter vma_handle, u64 offset_in
411} 410}
412 411
413MemoryManager::VMAIter MemoryManager::MergeAdjacent(VMAIter iter) { 412MemoryManager::VMAIter MemoryManager::MergeAdjacent(VMAIter iter) {
414 const VMAIter next_vma = std::next(iter); 413 const VMAIter next_vma{std::next(iter)};
415 if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) { 414 if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) {
416 iter->second.size += next_vma->second.size; 415 iter->second.size += next_vma->second.size;
417 vma_map.erase(next_vma); 416 vma_map.erase(next_vma);
418 } 417 }
419 418
420 if (iter != vma_map.begin()) { 419 if (iter != vma_map.begin()) {
421 VMAIter prev_vma = std::prev(iter); 420 VMAIter prev_vma{std::prev(iter)};
422 if (prev_vma->second.CanBeMergedWith(iter->second)) { 421 if (prev_vma->second.CanBeMergedWith(iter->second)) {
423 prev_vma->second.size += iter->second.size; 422 prev_vma->second.size += iter->second.size;
424 vma_map.erase(iter); 423 vma_map.erase(iter);
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index 76fa3d916..60ba6b858 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -47,8 +47,8 @@ public:
47 47
48 GPUVAddr AllocateSpace(u64 size, u64 align); 48 GPUVAddr AllocateSpace(u64 size, u64 align);
49 GPUVAddr AllocateSpace(GPUVAddr addr, u64 size, u64 align); 49 GPUVAddr AllocateSpace(GPUVAddr addr, u64 size, u64 align);
50 GPUVAddr MapBufferEx(GPUVAddr cpu_addr, u64 size); 50 GPUVAddr MapBufferEx(VAddr cpu_addr, u64 size);
51 GPUVAddr MapBufferEx(GPUVAddr cpu_addr, GPUVAddr addr, u64 size); 51 GPUVAddr MapBufferEx(VAddr cpu_addr, GPUVAddr addr, u64 size);
52 GPUVAddr UnmapBuffer(GPUVAddr addr, u64 size); 52 GPUVAddr UnmapBuffer(GPUVAddr addr, u64 size);
53 std::optional<VAddr> GpuToCpuAddress(GPUVAddr addr); 53 std::optional<VAddr> GpuToCpuAddress(GPUVAddr addr);
54 54
@@ -96,8 +96,8 @@ private:
96 /// Converts a VMAHandle to a mutable VMAIter. 96 /// Converts a VMAHandle to a mutable VMAIter.
97 VMAIter StripIterConstness(const VMAHandle& iter); 97 VMAIter StripIterConstness(const VMAHandle& iter);
98 98
99 /// Unmaps the given VMA. 99 /// Marks as the specfied VMA as allocated.
100 VMAIter Unmap(VMAIter vma); 100 VMAIter Allocate(VMAIter vma);
101 101
102 /** 102 /**
103 * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing 103 * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing
@@ -135,11 +135,11 @@ private:
135 static constexpr u64 page_mask{page_size - 1}; 135 static constexpr u64 page_mask{page_size - 1};
136 136
137 /// Address space in bits, this is fairly arbitrary but sufficiently large. 137 /// Address space in bits, this is fairly arbitrary but sufficiently large.
138 static constexpr u32 address_space_width = 39; 138 static constexpr u32 address_space_width{39};
139 /// Start address for mapping, this is fairly arbitrary but must be non-zero. 139 /// Start address for mapping, this is fairly arbitrary but must be non-zero.
140 static constexpr GPUVAddr address_space_base = 0x100000; 140 static constexpr GPUVAddr address_space_base{0x100000};
141 /// End of address space, based on address space in bits. 141 /// End of address space, based on address space in bits.
142 static constexpr GPUVAddr address_space_end = 1ULL << address_space_width; 142 static constexpr GPUVAddr address_space_end{1ULL << address_space_width};
143 143
144 Common::PageTable page_table{page_bits}; 144 Common::PageTable page_table{page_bits};
145 VMAMap vma_map; 145 VMAMap vma_map;