summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/common/multi_level_page_table.cpp1
-rw-r--r--src/common/multi_level_page_table.inc7
-rw-r--r--src/video_core/memory_manager.cpp147
-rw-r--r--src/video_core/memory_manager.h98
4 files changed, 86 insertions, 167 deletions
diff --git a/src/common/multi_level_page_table.cpp b/src/common/multi_level_page_table.cpp
index 561785ca7..aed04d0b5 100644
--- a/src/common/multi_level_page_table.cpp
+++ b/src/common/multi_level_page_table.cpp
@@ -4,4 +4,5 @@ namespace Common {
4template class Common::MultiLevelPageTable<GPUVAddr>; 4template class Common::MultiLevelPageTable<GPUVAddr>;
5template class Common::MultiLevelPageTable<VAddr>; 5template class Common::MultiLevelPageTable<VAddr>;
6template class Common::MultiLevelPageTable<PAddr>; 6template class Common::MultiLevelPageTable<PAddr>;
7template class Common::MultiLevelPageTable<u32>;
7} // namespace Common 8} // namespace Common
diff --git a/src/common/multi_level_page_table.inc b/src/common/multi_level_page_table.inc
index a75e61f9d..7fbcb908a 100644
--- a/src/common/multi_level_page_table.inc
+++ b/src/common/multi_level_page_table.inc
@@ -20,7 +20,7 @@ MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bit
20 : address_space_bits{address_space_bits_}, 20 : address_space_bits{address_space_bits_},
21 first_level_bits{first_level_bits_}, page_bits{page_bits_} { 21 first_level_bits{first_level_bits_}, page_bits{page_bits_} {
22 first_level_shift = address_space_bits - first_level_bits; 22 first_level_shift = address_space_bits - first_level_bits;
23 first_level_chunk_size = 1ULL << (first_level_shift - page_bits); 23 first_level_chunk_size = (1ULL << (first_level_shift - page_bits)) * sizeof(BaseAddr);
24 alloc_size = (1ULL << (address_space_bits - page_bits)) * sizeof(BaseAddr); 24 alloc_size = (1ULL << (address_space_bits - page_bits)) * sizeof(BaseAddr);
25 std::size_t first_level_size = 1ULL << first_level_bits; 25 std::size_t first_level_size = 1ULL << first_level_bits;
26 first_level_map.resize(first_level_size, nullptr); 26 first_level_map.resize(first_level_size, nullptr);
@@ -53,8 +53,7 @@ MultiLevelPageTable<BaseAddr>::~MultiLevelPageTable() noexcept {
53template <typename BaseAddr> 53template <typename BaseAddr>
54void MultiLevelPageTable<BaseAddr>::ReserveRange(u64 start, std::size_t size) { 54void MultiLevelPageTable<BaseAddr>::ReserveRange(u64 start, std::size_t size) {
55 const u64 new_start = start >> first_level_shift; 55 const u64 new_start = start >> first_level_shift;
56 const u64 new_end = 56 const u64 new_end = (start + size) >> first_level_shift;
57 (start + size + (first_level_chunk_size << page_bits) - 1) >> first_level_shift;
58 for (u64 i = new_start; i <= new_end; i++) { 57 for (u64 i = new_start; i <= new_end; i++) {
59 if (!first_level_map[i]) { 58 if (!first_level_map[i]) {
60 AllocateLevel(i); 59 AllocateLevel(i);
@@ -64,7 +63,7 @@ void MultiLevelPageTable<BaseAddr>::ReserveRange(u64 start, std::size_t size) {
64 63
65template <typename BaseAddr> 64template <typename BaseAddr>
66void MultiLevelPageTable<BaseAddr>::AllocateLevel(u64 level) { 65void MultiLevelPageTable<BaseAddr>::AllocateLevel(u64 level) {
67 void* ptr = reinterpret_cast<char*>(base_ptr) + level * first_level_chunk_size; 66 void* ptr = reinterpret_cast<char *>(base_ptr) + level * first_level_chunk_size;
68#ifdef _WIN32 67#ifdef _WIN32
69 void* base{VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE)}; 68 void* base{VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE)};
70#else 69#else
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index a3efd365e..1e090279f 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -16,36 +16,63 @@
16 16
17namespace Tegra { 17namespace Tegra {
18 18
19MemoryManager::MemoryManager(Core::System& system_) 19MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 page_bits_)
20 : system{system_}, page_table(page_table_size) {} 20 : system{system_}, address_space_bits{address_space_bits_}, page_bits{page_bits_}, entries{},
21 page_table{address_space_bits, address_space_bits + page_bits - 38, page_bits} {
22 address_space_size = 1ULL << address_space_bits;
23 allocate_start = address_space_bits > 32 ? 1ULL << 32 : 0;
24 page_size = 1ULL << page_bits;
25 page_mask = page_size - 1ULL;
26 const u64 page_table_bits = address_space_bits - cpu_page_bits;
27 const u64 page_table_size = 1ULL << page_table_bits;
28 page_table_mask = page_table_size - 1;
29
30 entries.resize(page_table_size / 32, 0);
31}
21 32
22MemoryManager::~MemoryManager() = default; 33MemoryManager::~MemoryManager() = default;
23 34
24void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) { 35MemoryManager::EntryType MemoryManager::GetEntry(size_t position) const {
25 rasterizer = rasterizer_; 36 position = position >> page_bits;
37 const u64 entry_mask = entries[position / 32];
38 const size_t sub_index = position % 32;
39 return static_cast<EntryType>((entry_mask >> (2 * sub_index)) & 0x03ULL);
40}
41
42void MemoryManager::SetEntry(size_t position, MemoryManager::EntryType entry) {
43 position = position >> page_bits;
44 const u64 entry_mask = entries[position / 32];
45 const size_t sub_index = position % 32;
46 entries[position / 32] =
47 (~(3ULL << sub_index * 2) & entry_mask) | (static_cast<u64>(entry) << sub_index * 2);
26} 48}
27 49
28GPUVAddr MemoryManager::UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) { 50template <MemoryManager::EntryType entry_type>
51GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr,
52 size_t size) {
29 u64 remaining_size{size}; 53 u64 remaining_size{size};
54 if constexpr (entry_type == EntryType::Mapped) {
55 page_table.ReserveRange(gpu_addr, size);
56 }
30 for (u64 offset{}; offset < size; offset += page_size) { 57 for (u64 offset{}; offset < size; offset += page_size) {
31 if (remaining_size < page_size) { 58 const GPUVAddr current_gpu_addr = gpu_addr + offset;
32 SetPageEntry(gpu_addr + offset, page_entry + offset, remaining_size); 59 SetEntry(current_gpu_addr, entry_type);
33 } else { 60 if constexpr (entry_type == EntryType::Mapped) {
34 SetPageEntry(gpu_addr + offset, page_entry + offset); 61 const VAddr current_cpu_addr = cpu_addr + offset;
62 const auto index = PageEntryIndex(current_gpu_addr);
63 page_table[index] = static_cast<u32>(current_cpu_addr >> 12ULL);
35 } 64 }
36 remaining_size -= page_size; 65 remaining_size -= page_size;
37 } 66 }
38 return gpu_addr; 67 return gpu_addr;
39} 68}
40 69
70void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
71 rasterizer = rasterizer_;
72}
73
41GPUVAddr MemoryManager::Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size) { 74GPUVAddr MemoryManager::Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size) {
42 const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first); 75 return PageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size);
43 if (it != map_ranges.end() && it->first == gpu_addr) {
44 it->second = size;
45 } else {
46 map_ranges.insert(it, MapRange{gpu_addr, size});
47 }
48 return UpdateRange(gpu_addr, cpu_addr, size);
49} 76}
50 77
51GPUVAddr MemoryManager::MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align) { 78GPUVAddr MemoryManager::MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align) {
@@ -62,13 +89,6 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
62 if (size == 0) { 89 if (size == 0) {
63 return; 90 return;
64 } 91 }
65 const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first);
66 if (it != map_ranges.end()) {
67 ASSERT(it->first == gpu_addr);
68 map_ranges.erase(it);
69 } else {
70 ASSERT_MSG(false, "Unmapping non-existent GPU address=0x{:x}", gpu_addr);
71 }
72 const auto submapped_ranges = GetSubmappedRange(gpu_addr, size); 92 const auto submapped_ranges = GetSubmappedRange(gpu_addr, size);
73 93
74 for (const auto& [map_addr, map_size] : submapped_ranges) { 94 for (const auto& [map_addr, map_size] : submapped_ranges) {
@@ -79,63 +99,23 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
79 rasterizer->UnmapMemory(*cpu_addr, map_size); 99 rasterizer->UnmapMemory(*cpu_addr, map_size);
80 } 100 }
81 101
82 UpdateRange(gpu_addr, PageEntry::State::Unmapped, size); 102 PageTableOp<EntryType::Free>(gpu_addr, 0, size);
83} 103}
84 104
85std::optional<GPUVAddr> MemoryManager::AllocateFixed(GPUVAddr gpu_addr, std::size_t size) { 105std::optional<GPUVAddr> MemoryManager::AllocateFixed(GPUVAddr gpu_addr, std::size_t size) {
86 for (u64 offset{}; offset < size; offset += page_size) { 106 for (u64 offset{}; offset < size; offset += page_size) {
87 if (!GetPageEntry(gpu_addr + offset).IsUnmapped()) { 107 if (GetEntry(gpu_addr + offset) != EntryType::Free) {
88 return std::nullopt; 108 return std::nullopt;
89 } 109 }
90 } 110 }
91 111
92 return UpdateRange(gpu_addr, PageEntry::State::Allocated, size); 112 return PageTableOp<EntryType::Reserved>(gpu_addr, 0, size);
93} 113}
94 114
95GPUVAddr MemoryManager::Allocate(std::size_t size, std::size_t align) { 115GPUVAddr MemoryManager::Allocate(std::size_t size, std::size_t align) {
96 return *AllocateFixed(*FindFreeRange(size, align), size); 116 return *AllocateFixed(*FindFreeRange(size, align), size);
97} 117}
98 118
99void MemoryManager::TryLockPage(PageEntry page_entry, std::size_t size) {
100 if (!page_entry.IsValid()) {
101 return;
102 }
103
104 ASSERT(system.CurrentProcess()
105 ->PageTable()
106 .LockForDeviceAddressSpace(page_entry.ToAddress(), size)
107 .IsSuccess());
108}
109
110void MemoryManager::TryUnlockPage(PageEntry page_entry, std::size_t size) {
111 if (!page_entry.IsValid()) {
112 return;
113 }
114
115 ASSERT(system.CurrentProcess()
116 ->PageTable()
117 .UnlockForDeviceAddressSpace(page_entry.ToAddress(), size)
118 .IsSuccess());
119}
120
121PageEntry MemoryManager::GetPageEntry(GPUVAddr gpu_addr) const {
122 return page_table[PageEntryIndex(gpu_addr)];
123}
124
125void MemoryManager::SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) {
126 // TODO(bunnei): We should lock/unlock device regions. This currently causes issues due to
127 // improper tracking, but should be fixed in the future.
128
129 //// Unlock the old page
130 // TryUnlockPage(page_table[PageEntryIndex(gpu_addr)], size);
131
132 //// Lock the new page
133 // TryLockPage(page_entry, size);
134 auto& current_page = page_table[PageEntryIndex(gpu_addr)];
135
136 current_page = page_entry;
137}
138
139std::optional<GPUVAddr> MemoryManager::FindFreeRange(std::size_t size, std::size_t align, 119std::optional<GPUVAddr> MemoryManager::FindFreeRange(std::size_t size, std::size_t align,
140 bool start_32bit_address) const { 120 bool start_32bit_address) const {
141 if (!align) { 121 if (!align) {
@@ -145,9 +125,9 @@ std::optional<GPUVAddr> MemoryManager::FindFreeRange(std::size_t size, std::size
145 } 125 }
146 126
147 u64 available_size{}; 127 u64 available_size{};
148 GPUVAddr gpu_addr{start_32bit_address ? address_space_start_low : address_space_start}; 128 GPUVAddr gpu_addr{allocate_start};
149 while (gpu_addr + available_size < address_space_size) { 129 while (gpu_addr + available_size < address_space_size) {
150 if (GetPageEntry(gpu_addr + available_size).IsUnmapped()) { 130 if (GetEntry(gpu_addr + available_size) == EntryType::Free) {
151 available_size += page_size; 131 available_size += page_size;
152 132
153 if (available_size >= size) { 133 if (available_size >= size) {
@@ -168,15 +148,12 @@ std::optional<GPUVAddr> MemoryManager::FindFreeRange(std::size_t size, std::size
168} 148}
169 149
170std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { 150std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
171 if (gpu_addr == 0) { 151 if (GetEntry(gpu_addr) != EntryType::Mapped) {
172 return std::nullopt;
173 }
174 const auto page_entry{GetPageEntry(gpu_addr)};
175 if (!page_entry.IsValid()) {
176 return std::nullopt; 152 return std::nullopt;
177 } 153 }
178 154
179 return page_entry.ToAddress() + (gpu_addr & page_mask); 155 const VAddr cpu_addr_base = static_cast<VAddr>(page_table[PageEntryIndex(gpu_addr)]) << 12ULL;
156 return cpu_addr_base + (gpu_addr & page_mask);
180} 157}
181 158
182std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const { 159std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const {
@@ -227,10 +204,6 @@ template void MemoryManager::Write<u32>(GPUVAddr addr, u32 data);
227template void MemoryManager::Write<u64>(GPUVAddr addr, u64 data); 204template void MemoryManager::Write<u64>(GPUVAddr addr, u64 data);
228 205
229u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) { 206u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) {
230 if (!GetPageEntry(gpu_addr).IsValid()) {
231 return {};
232 }
233
234 const auto address{GpuToCpuAddress(gpu_addr)}; 207 const auto address{GpuToCpuAddress(gpu_addr)};
235 if (!address) { 208 if (!address) {
236 return {}; 209 return {};
@@ -240,10 +213,6 @@ u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) {
240} 213}
241 214
242const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const { 215const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const {
243 if (!GetPageEntry(gpu_addr).IsValid()) {
244 return {};
245 }
246
247 const auto address{GpuToCpuAddress(gpu_addr)}; 216 const auto address{GpuToCpuAddress(gpu_addr)};
248 if (!address) { 217 if (!address) {
249 return {}; 218 return {};
@@ -252,12 +221,6 @@ const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const {
252 return system.Memory().GetPointer(*address); 221 return system.Memory().GetPointer(*address);
253} 222}
254 223
255size_t MemoryManager::BytesToMapEnd(GPUVAddr gpu_addr) const noexcept {
256 auto it = std::ranges::upper_bound(map_ranges, gpu_addr, {}, &MapRange::first);
257 --it;
258 return it->second - (gpu_addr - it->first);
259}
260
261void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size, 224void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size,
262 bool is_safe) const { 225 bool is_safe) const {
263 std::size_t remaining_size{size}; 226 std::size_t remaining_size{size};
@@ -268,7 +231,7 @@ void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std:
268 const std::size_t copy_amount{ 231 const std::size_t copy_amount{
269 std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; 232 std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
270 const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; 233 const auto page_addr{GpuToCpuAddress(page_index << page_bits)};
271 if (page_addr && *page_addr != 0) { 234 if (page_addr) {
272 const auto src_addr{*page_addr + page_offset}; 235 const auto src_addr{*page_addr + page_offset};
273 if (is_safe) { 236 if (is_safe) {
274 // Flush must happen on the rasterizer interface, such that memory is always 237 // Flush must happen on the rasterizer interface, such that memory is always
@@ -307,7 +270,7 @@ void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffe
307 const std::size_t copy_amount{ 270 const std::size_t copy_amount{
308 std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; 271 std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)};
309 const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; 272 const auto page_addr{GpuToCpuAddress(page_index << page_bits)};
310 if (page_addr && *page_addr != 0) { 273 if (page_addr) {
311 const auto dest_addr{*page_addr + page_offset}; 274 const auto dest_addr{*page_addr + page_offset};
312 275
313 if (is_safe) { 276 if (is_safe) {
@@ -392,7 +355,7 @@ bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) cons
392 size_t page_index{gpu_addr >> page_bits}; 355 size_t page_index{gpu_addr >> page_bits};
393 const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits}; 356 const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits};
394 while (page_index < page_last) { 357 while (page_index < page_last) {
395 if (!page_table[page_index].IsValid() || page_table[page_index].ToAddress() == 0) { 358 if (GetEntry(page_index << page_bits) == EntryType::Free) {
396 return false; 359 return false;
397 } 360 }
398 ++page_index; 361 ++page_index;
@@ -408,7 +371,7 @@ std::vector<std::pair<GPUVAddr, std::size_t>> MemoryManager::GetSubmappedRange(
408 size_t page_offset{gpu_addr & page_mask}; 371 size_t page_offset{gpu_addr & page_mask};
409 std::optional<std::pair<GPUVAddr, std::size_t>> last_segment{}; 372 std::optional<std::pair<GPUVAddr, std::size_t>> last_segment{};
410 std::optional<VAddr> old_page_addr{}; 373 std::optional<VAddr> old_page_addr{};
411 const auto extend_size = [&last_segment, &page_index, &page_offset](std::size_t bytes) { 374 const auto extend_size = [this, &last_segment, &page_index, &page_offset](std::size_t bytes) {
412 if (!last_segment) { 375 if (!last_segment) {
413 const GPUVAddr new_base_addr = (page_index << page_bits) + page_offset; 376 const GPUVAddr new_base_addr = (page_index << page_bits) + page_offset;
414 last_segment = {new_base_addr, bytes}; 377 last_segment = {new_base_addr, bytes};
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index 74f9ce175..0a763fd19 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -8,6 +8,7 @@
8#include <vector> 8#include <vector>
9 9
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "common/multi_level_page_table.h"
11 12
12namespace VideoCore { 13namespace VideoCore {
13class RasterizerInterface; 14class RasterizerInterface;
@@ -19,55 +20,10 @@ class System;
19 20
20namespace Tegra { 21namespace Tegra {
21 22
22class PageEntry final {
23public:
24 enum class State : u32 {
25 Unmapped = static_cast<u32>(-1),
26 Allocated = static_cast<u32>(-2),
27 };
28
29 constexpr PageEntry() = default;
30 constexpr PageEntry(State state_) : state{state_} {}
31 constexpr PageEntry(VAddr addr) : state{static_cast<State>(addr >> ShiftBits)} {}
32
33 [[nodiscard]] constexpr bool IsUnmapped() const {
34 return state == State::Unmapped;
35 }
36
37 [[nodiscard]] constexpr bool IsAllocated() const {
38 return state == State::Allocated;
39 }
40
41 [[nodiscard]] constexpr bool IsValid() const {
42 return !IsUnmapped() && !IsAllocated();
43 }
44
45 [[nodiscard]] constexpr VAddr ToAddress() const {
46 if (!IsValid()) {
47 return {};
48 }
49
50 return static_cast<VAddr>(state) << ShiftBits;
51 }
52
53 [[nodiscard]] constexpr PageEntry operator+(u64 offset) const {
54 // If this is a reserved value, offsets do not apply
55 if (!IsValid()) {
56 return *this;
57 }
58 return PageEntry{(static_cast<VAddr>(state) << ShiftBits) + offset};
59 }
60
61private:
62 static constexpr std::size_t ShiftBits{12};
63
64 State state{State::Unmapped};
65};
66static_assert(sizeof(PageEntry) == 4, "PageEntry is too large");
67
68class MemoryManager final { 23class MemoryManager final {
69public: 24public:
70 explicit MemoryManager(Core::System& system_); 25 explicit MemoryManager(Core::System& system_, u64 address_space_bits_ = 40,
26 u64 page_bits_ = 16);
71 ~MemoryManager(); 27 ~MemoryManager();
72 28
73 /// Binds a renderer to the memory manager. 29 /// Binds a renderer to the memory manager.
@@ -86,9 +42,6 @@ public:
86 [[nodiscard]] u8* GetPointer(GPUVAddr addr); 42 [[nodiscard]] u8* GetPointer(GPUVAddr addr);
87 [[nodiscard]] const u8* GetPointer(GPUVAddr addr) const; 43 [[nodiscard]] const u8* GetPointer(GPUVAddr addr) const;
88 44
89 /// Returns the number of bytes until the end of the memory map containing the given GPU address
90 [[nodiscard]] size_t BytesToMapEnd(GPUVAddr gpu_addr) const noexcept;
91
92 /** 45 /**
93 * ReadBlock and WriteBlock are full read and write operations over virtual 46 * ReadBlock and WriteBlock are full read and write operations over virtual
94 * GPU Memory. It's important to use these when GPU memory may not be continuous 47 * GPU Memory. It's important to use these when GPU memory may not be continuous
@@ -145,44 +98,47 @@ public:
145 void FlushRegion(GPUVAddr gpu_addr, size_t size) const; 98 void FlushRegion(GPUVAddr gpu_addr, size_t size) const;
146 99
147private: 100private:
148 [[nodiscard]] PageEntry GetPageEntry(GPUVAddr gpu_addr) const;
149 void SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size = page_size);
150 GPUVAddr UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size);
151 [[nodiscard]] std::optional<GPUVAddr> FindFreeRange(std::size_t size, std::size_t align, 101 [[nodiscard]] std::optional<GPUVAddr> FindFreeRange(std::size_t size, std::size_t align,
152 bool start_32bit_address = false) const; 102 bool start_32bit_address = false) const;
153 103
154 void TryLockPage(PageEntry page_entry, std::size_t size);
155 void TryUnlockPage(PageEntry page_entry, std::size_t size);
156
157 void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size, 104 void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size,
158 bool is_safe) const; 105 bool is_safe) const;
159 void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size, 106 void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size,
160 bool is_safe); 107 bool is_safe);
161 108
162 [[nodiscard]] static constexpr std::size_t PageEntryIndex(GPUVAddr gpu_addr) { 109 [[nodiscard]] inline std::size_t PageEntryIndex(GPUVAddr gpu_addr) const {
163 return (gpu_addr >> page_bits) & page_table_mask; 110 return (gpu_addr >> page_bits) & page_table_mask;
164 } 111 }
165 112
166 static constexpr u64 address_space_size = 1ULL << 40;
167 static constexpr u64 address_space_start = 1ULL << 32;
168 static constexpr u64 address_space_start_low = 1ULL << 16;
169 static constexpr u64 page_bits{16};
170 static constexpr u64 page_size{1 << page_bits};
171 static constexpr u64 page_mask{page_size - 1};
172 static constexpr u64 page_table_bits{24};
173 static constexpr u64 page_table_size{1 << page_table_bits};
174 static constexpr u64 page_table_mask{page_table_size - 1};
175
176 Core::System& system; 113 Core::System& system;
177 114
115 const u64 address_space_bits;
116 const u64 page_bits;
117 u64 address_space_size;
118 u64 allocate_start;
119 u64 page_size;
120 u64 page_mask;
121 u64 page_table_mask;
122 static constexpr u64 cpu_page_bits{12};
123
178 VideoCore::RasterizerInterface* rasterizer = nullptr; 124 VideoCore::RasterizerInterface* rasterizer = nullptr;
179 125
180 std::vector<PageEntry> page_table; 126 enum class EntryType : u64 {
127 Free = 0,
128 Reserved = 1,
129 Mapped = 2,
130 };
131
132 std::vector<u64> entries;
133
134 template <EntryType entry_type>
135 GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size);
136
137 EntryType GetEntry(size_t position) const;
181 138
182 using MapRange = std::pair<GPUVAddr, size_t>; 139 void SetEntry(size_t position, EntryType entry);
183 std::vector<MapRange> map_ranges;
184 140
185 std::vector<std::pair<VAddr, std::size_t>> cache_invalidate_queue; 141 Common::MultiLevelPageTable<u32> page_table;
186}; 142};
187 143
188} // namespace Tegra 144} // namespace Tegra