summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/video_core/memory_manager.cpp177
-rw-r--r--src/video_core/memory_manager.h11
2 files changed, 138 insertions, 50 deletions
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index 836ece136..e1a8b5391 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -15,8 +15,6 @@
15#include "video_core/rasterizer_interface.h" 15#include "video_core/rasterizer_interface.h"
16#include "video_core/renderer_base.h" 16#include "video_core/renderer_base.h"
17 17
18#pragma optimize("", off)
19
20namespace Tegra { 18namespace Tegra {
21 19
22std::atomic<size_t> MemoryManager::unique_identifier_generator{}; 20std::atomic<size_t> MemoryManager::unique_identifier_generator{};
@@ -42,7 +40,7 @@ MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64
42 40
43 big_entries.resize(big_page_table_size / 32, 0); 41 big_entries.resize(big_page_table_size / 32, 0);
44 big_page_table_cpu.resize(big_page_table_size); 42 big_page_table_cpu.resize(big_page_table_size);
45 big_page_table_physical.resize(big_page_table_size); 43 big_page_continous.resize(big_page_table_size / continous_bits, 0);
46 entries.resize(page_table_size / 32, 0); 44 entries.resize(page_table_size / 32, 0);
47} 45}
48 46
@@ -80,6 +78,19 @@ void MemoryManager::SetEntry(size_t position, MemoryManager::EntryType entry) {
80 } 78 }
81} 79}
82 80
81inline bool MemoryManager::IsBigPageContinous(size_t big_page_index) const {
82 const u64 entry_mask = big_page_continous[big_page_index / continous_bits];
83 const size_t sub_index = big_page_index % continous_bits;
84 return ((entry_mask >> sub_index) & 0x1ULL) != 0;
85}
86
87inline void MemoryManager::SetBigPageContinous(size_t big_page_index, bool value) {
88 const u64 continous_mask = big_page_continous[big_page_index / continous_bits];
89 const size_t sub_index = big_page_index % continous_bits;
90 big_page_continous[big_page_index / continous_bits] =
91 (~(1ULL << sub_index) & continous_mask) | (value ? 1ULL << sub_index : 0);
92}
93
83template <MemoryManager::EntryType entry_type> 94template <MemoryManager::EntryType entry_type>
84GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, 95GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr,
85 size_t size) { 96 size_t size) {
@@ -121,9 +132,19 @@ GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr
121 const auto index = PageEntryIndex<true>(current_gpu_addr); 132 const auto index = PageEntryIndex<true>(current_gpu_addr);
122 const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits); 133 const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits);
123 big_page_table_cpu[index] = sub_value; 134 big_page_table_cpu[index] = sub_value;
124 const PAddr phys_address = 135 const bool is_continous = ([&] {
125 device_memory.GetPhysicalAddr(memory.GetPointer(current_cpu_addr)); 136 uintptr_t base_ptr{
126 big_page_table_physical[index] = static_cast<u32>(phys_address); 137 reinterpret_cast<uintptr_t>(memory.GetPointer(current_cpu_addr))};
138 for (VAddr start_cpu = current_cpu_addr + page_size;
139 start_cpu < current_cpu_addr + big_page_size; start_cpu += page_size) {
140 base_ptr += page_size;
141 if (base_ptr != reinterpret_cast<uintptr_t>(memory.GetPointer(start_cpu))) {
142 return false;
143 }
144 }
145 return true;
146 })();
147 SetBigPageContinous(index, is_continous);
127 } 148 }
128 remaining_size -= big_page_size; 149 remaining_size -= big_page_size;
129 } 150 }
@@ -248,12 +269,17 @@ const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const {
248 return memory.GetPointer(*address); 269 return memory.GetPointer(*address);
249} 270}
250 271
272#ifdef _MSC_VER // no need for gcc / clang but msvc's compiler is more conservative with inlining.
251#pragma inline_recursion(on) 273#pragma inline_recursion(on)
274#endif
252 275
253template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped> 276template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped>
254inline void MemoryManager::MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, 277inline void MemoryManager::MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size,
255 FuncMapped&& func_mapped, FuncReserved&& func_reserved, 278 FuncMapped&& func_mapped, FuncReserved&& func_reserved,
256 FuncUnmapped&& func_unmapped) const { 279 FuncUnmapped&& func_unmapped) const {
280 static constexpr bool BOOL_BREAK_MAPPED = std::is_same_v<FuncMapped, bool>;
281 static constexpr bool BOOL_BREAK_RESERVED = std::is_same_v<FuncReserved, bool>;
282 static constexpr bool BOOL_BREAK_UNMAPPED = std::is_same_v<FuncUnmapped, bool>;
257 u64 used_page_size; 283 u64 used_page_size;
258 u64 used_page_mask; 284 u64 used_page_mask;
259 u64 used_page_bits; 285 u64 used_page_bits;
@@ -276,11 +302,31 @@ inline void MemoryManager::MemoryOperation(GPUVAddr gpu_src_addr, std::size_t si
276 std::min(static_cast<std::size_t>(used_page_size) - page_offset, remaining_size)}; 302 std::min(static_cast<std::size_t>(used_page_size) - page_offset, remaining_size)};
277 auto entry = GetEntry<is_big_pages>(current_address); 303 auto entry = GetEntry<is_big_pages>(current_address);
278 if (entry == EntryType::Mapped) [[likely]] { 304 if (entry == EntryType::Mapped) [[likely]] {
279 func_mapped(page_index, page_offset, copy_amount); 305 if constexpr (BOOL_BREAK_MAPPED) {
306 if (func_mapped(page_index, page_offset, copy_amount)) {
307 return;
308 }
309 } else {
310 func_mapped(page_index, page_offset, copy_amount);
311 }
312
280 } else if (entry == EntryType::Reserved) { 313 } else if (entry == EntryType::Reserved) {
281 func_reserved(page_index, page_offset, copy_amount); 314 if constexpr (BOOL_BREAK_RESERVED) {
315 if (func_reserved(page_index, page_offset, copy_amount)) {
316 return;
317 }
318 } else {
319 func_reserved(page_index, page_offset, copy_amount);
320 }
321
282 } else [[unlikely]] { 322 } else [[unlikely]] {
283 func_unmapped(page_index, page_offset, copy_amount); 323 if constexpr (BOOL_BREAK_UNMAPPED) {
324 if (func_unmapped(page_index, page_offset, copy_amount)) {
325 return;
326 }
327 } else {
328 func_unmapped(page_index, page_offset, copy_amount);
329 }
284 } 330 }
285 page_index++; 331 page_index++;
286 page_offset = 0; 332 page_offset = 0;
@@ -303,7 +349,8 @@ void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer,
303 if constexpr (is_safe) { 349 if constexpr (is_safe) {
304 rasterizer->FlushRegion(cpu_addr_base, copy_amount); 350 rasterizer->FlushRegion(cpu_addr_base, copy_amount);
305 } 351 }
306 memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount); 352 u8* physical = memory.GetPointer(cpu_addr_base);
353 std::memcpy(dest_buffer, physical, copy_amount);
307 dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; 354 dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
308 }; 355 };
309 auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { 356 auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
@@ -312,9 +359,12 @@ void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer,
312 if constexpr (is_safe) { 359 if constexpr (is_safe) {
313 rasterizer->FlushRegion(cpu_addr_base, copy_amount); 360 rasterizer->FlushRegion(cpu_addr_base, copy_amount);
314 } 361 }
315 memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount); 362 if (!IsBigPageContinous(page_index)) {
316 // u8* physical = device_memory.GetPointer(big_page_table_physical[page_index] + offset); 363 memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount);
317 // std::memcpy(dest_buffer, physical, copy_amount); 364 } else {
365 u8* physical = memory.GetPointer(cpu_addr_base);
366 std::memcpy(dest_buffer, physical, copy_amount);
367 }
318 dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount; 368 dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
319 }; 369 };
320 auto read_short_pages = [&](std::size_t page_index, std::size_t offset, 370 auto read_short_pages = [&](std::size_t page_index, std::size_t offset,
@@ -347,7 +397,8 @@ void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffe
347 if constexpr (is_safe) { 397 if constexpr (is_safe) {
348 rasterizer->InvalidateRegion(cpu_addr_base, copy_amount); 398 rasterizer->InvalidateRegion(cpu_addr_base, copy_amount);
349 } 399 }
350 memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount); 400 u8* physical = memory.GetPointer(cpu_addr_base);
401 std::memcpy(physical, src_buffer, copy_amount);
351 src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; 402 src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
352 }; 403 };
353 auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) { 404 auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
@@ -356,10 +407,12 @@ void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffe
356 if constexpr (is_safe) { 407 if constexpr (is_safe) {
357 rasterizer->InvalidateRegion(cpu_addr_base, copy_amount); 408 rasterizer->InvalidateRegion(cpu_addr_base, copy_amount);
358 } 409 }
359 memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount); 410 if (!IsBigPageContinous(page_index)) {
360 /*u8* physical = 411 memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount);
361 device_memory.GetPointer(big_page_table_physical[page_index] << cpu_page_bits) + offset; 412 } else {
362 std::memcpy(physical, src_buffer, copy_amount);*/ 413 u8* physical = memory.GetPointer(cpu_addr_base);
414 std::memcpy(physical, src_buffer, copy_amount);
415 }
363 src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; 416 src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
364 }; 417 };
365 auto write_short_pages = [&](std::size_t page_index, std::size_t offset, 418 auto write_short_pages = [&](std::size_t page_index, std::size_t offset,
@@ -413,48 +466,80 @@ void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std
413} 466}
414 467
415bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const { 468bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const {
416 const auto cpu_addr{GpuToCpuAddress(gpu_addr)}; 469 if (GetEntry<true>(gpu_addr) == EntryType::Mapped) [[likely]] {
417 if (!cpu_addr) { 470 size_t page_index = gpu_addr >> big_page_bits;
471 if (IsBigPageContinous(page_index)) [[likely]] {
472 const std::size_t page{(page_index & big_page_mask) + size};
473 return page <= big_page_size;
474 }
475 const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
476 return page <= Core::Memory::YUZU_PAGESIZE;
477 }
478 if (GetEntry<false>(gpu_addr) != EntryType::Mapped) {
418 return false; 479 return false;
419 } 480 }
420 const std::size_t page{(*cpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; 481 const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
421 return page <= Core::Memory::YUZU_PAGESIZE; 482 return page <= Core::Memory::YUZU_PAGESIZE;
422} 483}
423 484
424bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const { 485bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const {
425 size_t page_index{gpu_addr >> big_page_bits};
426 const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits};
427 std::optional<VAddr> old_page_addr{}; 486 std::optional<VAddr> old_page_addr{};
428 while (page_index != page_last) { 487 bool result{true};
429 const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; 488 auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
430 if (!page_addr || *page_addr == 0) { 489 std::size_t copy_amount) {
431 return false; 490 result = false;
491 return true;
492 };
493 auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
494 const VAddr cpu_addr_base =
495 (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
496 if (old_page_addr && *old_page_addr != cpu_addr_base) {
497 result = false;
498 return true;
432 } 499 }
433 if (old_page_addr) { 500 old_page_addr = {cpu_addr_base + copy_amount};
434 if (*old_page_addr + page_size != *page_addr) { 501 return false;
435 return false; 502 };
436 } 503 auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
504 const VAddr cpu_addr_base =
505 (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
506 if (old_page_addr && *old_page_addr != cpu_addr_base) {
507 result = false;
508 return true;
437 } 509 }
438 old_page_addr = page_addr; 510 old_page_addr = {cpu_addr_base + copy_amount};
439 ++page_index; 511 return false;
440 } 512 };
441 return true; 513 auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
514 std::size_t copy_amount) {
515 GPUVAddr base = (page_index << big_page_bits) + offset;
516 MemoryOperation<false>(base, copy_amount, short_check, fail, fail);
517 return !result;
518 };
519 MemoryOperation<true>(gpu_addr, size, big_check, fail, check_short_pages);
520 return result;
442} 521}
443 522
444bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) const { 523bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) const {
445 size_t page_index{gpu_addr >> page_bits}; 524 std::optional<VAddr> old_page_addr{};
446 const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits}; 525 bool result{true};
447 while (page_index < page_last) { 526 auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
448 if (GetEntry<false>(page_index << page_bits) == EntryType::Free) { 527 [[maybe_unused]] std::size_t copy_amount) {
449 return false; 528 result = false;
450 } 529 return true;
451 ++page_index; 530 };
452 } 531 auto pass = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
453 return true; 532 [[maybe_unused]] std::size_t copy_amount) { return false; };
533 auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
534 std::size_t copy_amount) {
535 GPUVAddr base = (page_index << big_page_bits) + offset;
536 MemoryOperation<false>(base, copy_amount, pass, pass, fail);
537 return !result;
538 };
539 MemoryOperation<true>(gpu_addr, size, pass, fail, check_short_pages);
540 return result;
454} 541}
455 542
456#pragma inline_recursion(on)
457
458std::vector<std::pair<GPUVAddr, std::size_t>> MemoryManager::GetSubmappedRange( 543std::vector<std::pair<GPUVAddr, std::size_t>> MemoryManager::GetSubmappedRange(
459 GPUVAddr gpu_addr, std::size_t size) const { 544 GPUVAddr gpu_addr, std::size_t size) const {
460 std::vector<std::pair<GPUVAddr, std::size_t>> result{}; 545 std::vector<std::pair<GPUVAddr, std::size_t>> result{};
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index 9c388a06e..8f8877a92 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -105,9 +105,6 @@ public:
105 void FlushRegion(GPUVAddr gpu_addr, size_t size) const; 105 void FlushRegion(GPUVAddr gpu_addr, size_t size) const;
106 106
107private: 107private:
108 [[nodiscard]] std::optional<GPUVAddr> FindFreeRange(std::size_t size, std::size_t align,
109 bool start_32bit_address = false) const;
110
111 template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped> 108 template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped>
112 inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped, 109 inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped,
113 FuncReserved&& func_reserved, FuncUnmapped&& func_unmapped) const; 110 FuncReserved&& func_reserved, FuncUnmapped&& func_unmapped) const;
@@ -127,6 +124,9 @@ private:
127 } 124 }
128 } 125 }
129 126
127 inline bool IsBigPageContinous(size_t big_page_index) const;
128 inline void SetBigPageContinous(size_t big_page_index, bool value);
129
130 Core::System& system; 130 Core::System& system;
131 Core::Memory::Memory& memory; 131 Core::Memory::Memory& memory;
132 Core::DeviceMemory& device_memory; 132 Core::DeviceMemory& device_memory;
@@ -169,7 +169,10 @@ private:
169 169
170 Common::MultiLevelPageTable<u32> page_table; 170 Common::MultiLevelPageTable<u32> page_table;
171 Common::VirtualBuffer<u32> big_page_table_cpu; 171 Common::VirtualBuffer<u32> big_page_table_cpu;
172 Common::VirtualBuffer<u32> big_page_table_physical; 172
173 std::vector<u64> big_page_continous;
174
175 constexpr static size_t continous_bits = 64;
173 176
174 const size_t unique_identifier; 177 const size_t unique_identifier;
175 178