summaryrefslogtreecommitdiff
path: root/src/core/memory.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/memory.cpp')
-rw-r--r--src/core/memory.cpp1008
1 files changed, 589 insertions, 419 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index fa49f3dd0..91bf07a92 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -17,529 +17,699 @@
17#include "core/hle/kernel/process.h" 17#include "core/hle/kernel/process.h"
18#include "core/hle/kernel/vm_manager.h" 18#include "core/hle/kernel/vm_manager.h"
19#include "core/memory.h" 19#include "core/memory.h"
20#include "core/memory_setup.h"
21#include "video_core/gpu.h" 20#include "video_core/gpu.h"
22 21
23namespace Memory { 22namespace Memory {
24 23
25static Common::PageTable* current_page_table = nullptr; 24// Implementation class used to keep the specifics of the memory subsystem hidden
25// from outside classes. This also allows modification to the internals of the memory
26// subsystem without needing to rebuild all files that make use of the memory interface.
27struct Memory::Impl {
28 explicit Impl(Core::System& system_) : system{system_} {}
26 29
27void SetCurrentPageTable(Kernel::Process& process) { 30 void SetCurrentPageTable(Kernel::Process& process) {
28 current_page_table = &process.VMManager().page_table; 31 current_page_table = &process.VMManager().page_table;
29 32
30 const std::size_t address_space_width = process.VMManager().GetAddressSpaceWidth(); 33 const std::size_t address_space_width = process.VMManager().GetAddressSpaceWidth();
31 34
32 auto& system = Core::System::GetInstance(); 35 system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width);
33 system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width); 36 system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width);
34 system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width); 37 system.ArmInterface(2).PageTableChanged(*current_page_table, address_space_width);
35 system.ArmInterface(2).PageTableChanged(*current_page_table, address_space_width); 38 system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width);
36 system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width); 39 }
37}
38 40
39static void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory, 41 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) {
40 Common::PageType type) { 42 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
41 LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE, 43 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
42 (base + size) * PAGE_SIZE); 44 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory);
43
44 // During boot, current_page_table might not be set yet, in which case we need not flush
45 if (Core::System::GetInstance().IsPoweredOn()) {
46 auto& gpu = Core::System::GetInstance().GPU();
47 for (u64 i = 0; i < size; i++) {
48 const auto page = base + i;
49 if (page_table.attributes[page] == Common::PageType::RasterizerCachedMemory) {
50 gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE);
51 }
52 }
53 } 45 }
54 46
55 VAddr end = base + size; 47 void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size,
56 ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", 48 Common::MemoryHookPointer mmio_handler) {
57 base + page_table.pointers.size()); 49 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
50 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
51 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr,
52 Common::PageType::Special);
53
54 const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
55 const Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice,
56 std::move(mmio_handler)};
57 page_table.special_regions.add(
58 std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
59 }
58 60
59 std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type); 61 void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
62 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
63 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
64 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr,
65 Common::PageType::Unmapped);
60 66
61 if (memory == nullptr) { 67 const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
62 std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end, memory); 68 page_table.special_regions.erase(interval);
63 } else { 69 }
64 while (base != end) {
65 page_table.pointers[base] = memory;
66 70
67 base += 1; 71 void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
68 memory += PAGE_SIZE; 72 Common::MemoryHookPointer hook) {
69 } 73 const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
74 const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
75 page_table.special_regions.add(
76 std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
70 } 77 }
71}
72 78
73void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) { 79 void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
74 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); 80 Common::MemoryHookPointer hook) {
75 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); 81 const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
76 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); 82 const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
77} 83 page_table.special_regions.subtract(
84 std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
85 }
78 86
79void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size, 87 bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) const {
80 Common::MemoryHookPointer mmio_handler) { 88 const auto& page_table = process.VMManager().page_table;
81 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
82 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
83 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Special);
84 89
85 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); 90 const u8* const page_pointer = page_table.pointers[vaddr >> PAGE_BITS];
86 Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice, std::move(mmio_handler)}; 91 if (page_pointer != nullptr) {
87 page_table.special_regions.add( 92 return true;
88 std::make_pair(interval, std::set<Common::SpecialRegion>{region})); 93 }
89}
90 94
91void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { 95 if (page_table.attributes[vaddr >> PAGE_BITS] == Common::PageType::RasterizerCachedMemory) {
92 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); 96 return true;
93 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); 97 }
94 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, Common::PageType::Unmapped);
95 98
96 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); 99 if (page_table.attributes[vaddr >> PAGE_BITS] != Common::PageType::Special) {
97 page_table.special_regions.erase(interval); 100 return false;
98} 101 }
99 102
100void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size, 103 return false;
101 Common::MemoryHookPointer hook) { 104 }
102 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
103 Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
104 page_table.special_regions.add(
105 std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
106}
107 105
108void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size, 106 bool IsValidVirtualAddress(VAddr vaddr) const {
109 Common::MemoryHookPointer hook) { 107 return IsValidVirtualAddress(*system.CurrentProcess(), vaddr);
110 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); 108 }
111 Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)};
112 page_table.special_regions.subtract(
113 std::make_pair(interval, std::set<Common::SpecialRegion>{region}));
114}
115 109
116/** 110 /**
117 * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) 111 * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned)
118 * using a VMA from the current process 112 * using a VMA from the current process
119 */ 113 */
120static u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) { 114 u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) {
121 const auto& vm_manager = process.VMManager(); 115 const auto& vm_manager = process.VMManager();
122 116
123 const auto it = vm_manager.FindVMA(vaddr); 117 const auto it = vm_manager.FindVMA(vaddr);
124 DEBUG_ASSERT(vm_manager.IsValidHandle(it)); 118 DEBUG_ASSERT(vm_manager.IsValidHandle(it));
125 119
126 u8* direct_pointer = nullptr; 120 u8* direct_pointer = nullptr;
127 const auto& vma = it->second; 121 const auto& vma = it->second;
128 switch (vma.type) { 122 switch (vma.type) {
129 case Kernel::VMAType::AllocatedMemoryBlock: 123 case Kernel::VMAType::AllocatedMemoryBlock:
130 direct_pointer = vma.backing_block->data() + vma.offset; 124 direct_pointer = vma.backing_block->data() + vma.offset;
131 break; 125 break;
132 case Kernel::VMAType::BackingMemory: 126 case Kernel::VMAType::BackingMemory:
133 direct_pointer = vma.backing_memory; 127 direct_pointer = vma.backing_memory;
134 break; 128 break;
135 case Kernel::VMAType::Free: 129 case Kernel::VMAType::Free:
136 return nullptr; 130 return nullptr;
137 default: 131 default:
138 UNREACHABLE(); 132 UNREACHABLE();
133 }
134
135 return direct_pointer + (vaddr - vma.base);
139 } 136 }
140 137
141 return direct_pointer + (vaddr - vma.base); 138 /**
142} 139 * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned)
140 * using a VMA from the current process.
141 */
142 u8* GetPointerFromVMA(VAddr vaddr) {
143 return GetPointerFromVMA(*system.CurrentProcess(), vaddr);
144 }
143 145
144/** 146 u8* GetPointer(const VAddr vaddr) {
145 * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) 147 u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
146 * using a VMA from the current process. 148 if (page_pointer != nullptr) {
147 */ 149 return page_pointer + (vaddr & PAGE_MASK);
148static u8* GetPointerFromVMA(VAddr vaddr) { 150 }
149 return GetPointerFromVMA(*Core::System::GetInstance().CurrentProcess(), vaddr);
150}
151 151
152template <typename T> 152 if (current_page_table->attributes[vaddr >> PAGE_BITS] ==
153T Read(const VAddr vaddr) { 153 Common::PageType::RasterizerCachedMemory) {
154 const u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; 154 return GetPointerFromVMA(vaddr);
155 if (page_pointer) { 155 }
156 // NOTE: Avoid adding any extra logic to this fast-path block
157 T value;
158 std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T));
159 return value;
160 }
161
162 Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
163 switch (type) {
164 case Common::PageType::Unmapped:
165 LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr);
166 return 0;
167 case Common::PageType::Memory:
168 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
169 break;
170 case Common::PageType::RasterizerCachedMemory: {
171 auto host_ptr{GetPointerFromVMA(vaddr)};
172 Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), sizeof(T));
173 T value;
174 std::memcpy(&value, host_ptr, sizeof(T));
175 return value;
176 }
177 default:
178 UNREACHABLE();
179 }
180 return {};
181}
182 156
183template <typename T> 157 LOG_ERROR(HW_Memory, "Unknown GetPointer @ 0x{:016X}", vaddr);
184void Write(const VAddr vaddr, const T data) { 158 return nullptr;
185 u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
186 if (page_pointer) {
187 // NOTE: Avoid adding any extra logic to this fast-path block
188 std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T));
189 return;
190 }
191
192 Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
193 switch (type) {
194 case Common::PageType::Unmapped:
195 LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
196 static_cast<u32>(data), vaddr);
197 return;
198 case Common::PageType::Memory:
199 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
200 break;
201 case Common::PageType::RasterizerCachedMemory: {
202 auto host_ptr{GetPointerFromVMA(vaddr)};
203 Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(T));
204 std::memcpy(host_ptr, &data, sizeof(T));
205 break;
206 }
207 default:
208 UNREACHABLE();
209 } 159 }
210}
211 160
212bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) { 161 u8 Read8(const VAddr addr) {
213 const auto& page_table = process.VMManager().page_table; 162 return Read<u8>(addr);
163 }
214 164
215 const u8* page_pointer = page_table.pointers[vaddr >> PAGE_BITS]; 165 u16 Read16(const VAddr addr) {
216 if (page_pointer) 166 return Read<u16_le>(addr);
217 return true; 167 }
218 168
219 if (page_table.attributes[vaddr >> PAGE_BITS] == Common::PageType::RasterizerCachedMemory) 169 u32 Read32(const VAddr addr) {
220 return true; 170 return Read<u32_le>(addr);
171 }
221 172
222 if (page_table.attributes[vaddr >> PAGE_BITS] != Common::PageType::Special) 173 u64 Read64(const VAddr addr) {
223 return false; 174 return Read<u64_le>(addr);
175 }
224 176
225 return false; 177 void Write8(const VAddr addr, const u8 data) {
226} 178 Write<u8>(addr, data);
179 }
227 180
228bool IsValidVirtualAddress(const VAddr vaddr) { 181 void Write16(const VAddr addr, const u16 data) {
229 return IsValidVirtualAddress(*Core::System::GetInstance().CurrentProcess(), vaddr); 182 Write<u16_le>(addr, data);
230} 183 }
231 184
232bool IsKernelVirtualAddress(const VAddr vaddr) { 185 void Write32(const VAddr addr, const u32 data) {
233 return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END; 186 Write<u32_le>(addr, data);
234} 187 }
235 188
236u8* GetPointer(const VAddr vaddr) { 189 void Write64(const VAddr addr, const u64 data) {
237 u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; 190 Write<u64_le>(addr, data);
238 if (page_pointer) {
239 return page_pointer + (vaddr & PAGE_MASK);
240 } 191 }
241 192
242 if (current_page_table->attributes[vaddr >> PAGE_BITS] == 193 std::string ReadCString(VAddr vaddr, std::size_t max_length) {
243 Common::PageType::RasterizerCachedMemory) { 194 std::string string;
244 return GetPointerFromVMA(vaddr); 195 string.reserve(max_length);
196 for (std::size_t i = 0; i < max_length; ++i) {
197 const char c = Read8(vaddr);
198 if (c == '\0') {
199 break;
200 }
201 string.push_back(c);
202 ++vaddr;
203 }
204 string.shrink_to_fit();
205 return string;
245 } 206 }
246 207
247 LOG_ERROR(HW_Memory, "Unknown GetPointer @ 0x{:016X}", vaddr); 208 void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer,
248 return nullptr; 209 const std::size_t size) {
249} 210 const auto& page_table = process.VMManager().page_table;
211
212 std::size_t remaining_size = size;
213 std::size_t page_index = src_addr >> PAGE_BITS;
214 std::size_t page_offset = src_addr & PAGE_MASK;
215
216 while (remaining_size > 0) {
217 const std::size_t copy_amount =
218 std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
219 const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
220
221 switch (page_table.attributes[page_index]) {
222 case Common::PageType::Unmapped: {
223 LOG_ERROR(HW_Memory,
224 "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
225 current_vaddr, src_addr, size);
226 std::memset(dest_buffer, 0, copy_amount);
227 break;
228 }
229 case Common::PageType::Memory: {
230 DEBUG_ASSERT(page_table.pointers[page_index]);
250 231
251std::string ReadCString(VAddr vaddr, std::size_t max_length) { 232 const u8* const src_ptr = page_table.pointers[page_index] + page_offset;
252 std::string string; 233 std::memcpy(dest_buffer, src_ptr, copy_amount);
253 string.reserve(max_length); 234 break;
254 for (std::size_t i = 0; i < max_length; ++i) { 235 }
255 char c = Read8(vaddr); 236 case Common::PageType::RasterizerCachedMemory: {
256 if (c == '\0') 237 const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr);
257 break; 238 system.GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount);
258 string.push_back(c); 239 std::memcpy(dest_buffer, host_ptr, copy_amount);
259 ++vaddr; 240 break;
241 }
242 default:
243 UNREACHABLE();
244 }
245
246 page_index++;
247 page_offset = 0;
248 dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
249 remaining_size -= copy_amount;
250 }
260 } 251 }
261 string.shrink_to_fit();
262 return string;
263}
264 252
265void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { 253 void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
266 if (vaddr == 0) { 254 ReadBlock(*system.CurrentProcess(), src_addr, dest_buffer, size);
267 return;
268 } 255 }
269 256
270 // Iterate over a contiguous CPU address space, which corresponds to the specified GPU address 257 void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer,
271 // space, marking the region as un/cached. The region is marked un/cached at a granularity of 258 const std::size_t size) {
272 // CPU pages, hence why we iterate on a CPU page basis (note: GPU page size is different). This 259 const auto& page_table = process.VMManager().page_table;
273 // assumes the specified GPU address region is contiguous as well. 260 std::size_t remaining_size = size;
261 std::size_t page_index = dest_addr >> PAGE_BITS;
262 std::size_t page_offset = dest_addr & PAGE_MASK;
263
264 while (remaining_size > 0) {
265 const std::size_t copy_amount =
266 std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
267 const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
268
269 switch (page_table.attributes[page_index]) {
270 case Common::PageType::Unmapped: {
271 LOG_ERROR(HW_Memory,
272 "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
273 current_vaddr, dest_addr, size);
274 break;
275 }
276 case Common::PageType::Memory: {
277 DEBUG_ASSERT(page_table.pointers[page_index]);
278
279 u8* const dest_ptr = page_table.pointers[page_index] + page_offset;
280 std::memcpy(dest_ptr, src_buffer, copy_amount);
281 break;
282 }
283 case Common::PageType::RasterizerCachedMemory: {
284 u8* const host_ptr = GetPointerFromVMA(process, current_vaddr);
285 system.GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount);
286 std::memcpy(host_ptr, src_buffer, copy_amount);
287 break;
288 }
289 default:
290 UNREACHABLE();
291 }
292
293 page_index++;
294 page_offset = 0;
295 src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
296 remaining_size -= copy_amount;
297 }
298 }
274 299
275 u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; 300 void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) {
276 for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { 301 WriteBlock(*system.CurrentProcess(), dest_addr, src_buffer, size);
277 Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS]; 302 }
278 303
279 if (cached) { 304 void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std::size_t size) {
280 // Switch page type to cached if now cached 305 const auto& page_table = process.VMManager().page_table;
281 switch (page_type) { 306 std::size_t remaining_size = size;
282 case Common::PageType::Unmapped: 307 std::size_t page_index = dest_addr >> PAGE_BITS;
283 // It is not necessary for a process to have this region mapped into its address 308 std::size_t page_offset = dest_addr & PAGE_MASK;
284 // space, for example, a system module need not have a VRAM mapping. 309
310 while (remaining_size > 0) {
311 const std::size_t copy_amount =
312 std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
313 const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
314
315 switch (page_table.attributes[page_index]) {
316 case Common::PageType::Unmapped: {
317 LOG_ERROR(HW_Memory,
318 "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
319 current_vaddr, dest_addr, size);
285 break; 320 break;
286 case Common::PageType::Memory: 321 }
287 page_type = Common::PageType::RasterizerCachedMemory; 322 case Common::PageType::Memory: {
288 current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr; 323 DEBUG_ASSERT(page_table.pointers[page_index]);
324
325 u8* dest_ptr = page_table.pointers[page_index] + page_offset;
326 std::memset(dest_ptr, 0, copy_amount);
289 break; 327 break;
290 case Common::PageType::RasterizerCachedMemory: 328 }
291 // There can be more than one GPU region mapped per CPU region, so it's common that 329 case Common::PageType::RasterizerCachedMemory: {
292 // this area is already marked as cached. 330 u8* const host_ptr = GetPointerFromVMA(process, current_vaddr);
331 system.GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount);
332 std::memset(host_ptr, 0, copy_amount);
293 break; 333 break;
334 }
294 default: 335 default:
295 UNREACHABLE(); 336 UNREACHABLE();
296 } 337 }
297 } else { 338
298 // Switch page type to uncached if now uncached 339 page_index++;
299 switch (page_type) { 340 page_offset = 0;
300 case Common::PageType::Unmapped: 341 remaining_size -= copy_amount;
301 // It is not necessary for a process to have this region mapped into its address 342 }
302 // space, for example, a system module need not have a VRAM mapping. 343 }
344
345 void ZeroBlock(const VAddr dest_addr, const std::size_t size) {
346 ZeroBlock(*system.CurrentProcess(), dest_addr, size);
347 }
348
349 void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr,
350 const std::size_t size) {
351 const auto& page_table = process.VMManager().page_table;
352 std::size_t remaining_size = size;
353 std::size_t page_index = src_addr >> PAGE_BITS;
354 std::size_t page_offset = src_addr & PAGE_MASK;
355
356 while (remaining_size > 0) {
357 const std::size_t copy_amount =
358 std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
359 const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
360
361 switch (page_table.attributes[page_index]) {
362 case Common::PageType::Unmapped: {
363 LOG_ERROR(HW_Memory,
364 "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
365 current_vaddr, src_addr, size);
366 ZeroBlock(process, dest_addr, copy_amount);
303 break; 367 break;
304 case Common::PageType::Memory: 368 }
305 // There can be more than one GPU region mapped per CPU region, so it's common that 369 case Common::PageType::Memory: {
306 // this area is already unmarked as cached. 370 DEBUG_ASSERT(page_table.pointers[page_index]);
371 const u8* src_ptr = page_table.pointers[page_index] + page_offset;
372 WriteBlock(process, dest_addr, src_ptr, copy_amount);
307 break; 373 break;
374 }
308 case Common::PageType::RasterizerCachedMemory: { 375 case Common::PageType::RasterizerCachedMemory: {
309 u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK); 376 const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr);
310 if (pointer == nullptr) { 377 system.GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount);
311 // It's possible that this function has been called while updating the pagetable 378 WriteBlock(process, dest_addr, host_ptr, copy_amount);
312 // after unmapping a VMA. In that case the underlying VMA will no longer exist,
313 // and we should just leave the pagetable entry blank.
314 page_type = Common::PageType::Unmapped;
315 } else {
316 page_type = Common::PageType::Memory;
317 current_page_table->pointers[vaddr >> PAGE_BITS] = pointer;
318 }
319 break; 379 break;
320 } 380 }
321 default: 381 default:
322 UNREACHABLE(); 382 UNREACHABLE();
323 } 383 }
384
385 page_index++;
386 page_offset = 0;
387 dest_addr += static_cast<VAddr>(copy_amount);
388 src_addr += static_cast<VAddr>(copy_amount);
389 remaining_size -= copy_amount;
324 } 390 }
325 } 391 }
326}
327 392
328u8 Read8(const VAddr addr) { 393 void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size) {
329 return Read<u8>(addr); 394 return CopyBlock(*system.CurrentProcess(), dest_addr, src_addr, size);
330} 395 }
331 396
332u16 Read16(const VAddr addr) { 397 void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
333 return Read<u16_le>(addr); 398 if (vaddr == 0) {
334} 399 return;
400 }
335 401
336u32 Read32(const VAddr addr) { 402 // Iterate over a contiguous CPU address space, which corresponds to the specified GPU
337 return Read<u32_le>(addr); 403 // address space, marking the region as un/cached. The region is marked un/cached at a
338} 404 // granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size
405 // is different). This assumes the specified GPU address region is contiguous as well.
406
407 u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
408 for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
409 Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
410
411 if (cached) {
412 // Switch page type to cached if now cached
413 switch (page_type) {
414 case Common::PageType::Unmapped:
415 // It is not necessary for a process to have this region mapped into its address
416 // space, for example, a system module need not have a VRAM mapping.
417 break;
418 case Common::PageType::Memory:
419 page_type = Common::PageType::RasterizerCachedMemory;
420 current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr;
421 break;
422 case Common::PageType::RasterizerCachedMemory:
423 // There can be more than one GPU region mapped per CPU region, so it's common
424 // that this area is already marked as cached.
425 break;
426 default:
427 UNREACHABLE();
428 }
429 } else {
430 // Switch page type to uncached if now uncached
431 switch (page_type) {
432 case Common::PageType::Unmapped:
433 // It is not necessary for a process to have this region mapped into its address
434 // space, for example, a system module need not have a VRAM mapping.
435 break;
436 case Common::PageType::Memory:
437 // There can be more than one GPU region mapped per CPU region, so it's common
438 // that this area is already unmarked as cached.
439 break;
440 case Common::PageType::RasterizerCachedMemory: {
441 u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK);
442 if (pointer == nullptr) {
443 // It's possible that this function has been called while updating the
444 // pagetable after unmapping a VMA. In that case the underlying VMA will no
445 // longer exist, and we should just leave the pagetable entry blank.
446 page_type = Common::PageType::Unmapped;
447 } else {
448 page_type = Common::PageType::Memory;
449 current_page_table->pointers[vaddr >> PAGE_BITS] = pointer;
450 }
451 break;
452 }
453 default:
454 UNREACHABLE();
455 }
456 }
457 }
458 }
339 459
340u64 Read64(const VAddr addr) { 460 /**
341 return Read<u64_le>(addr); 461 * Maps a region of pages as a specific type.
342} 462 *
463 * @param page_table The page table to use to perform the mapping.
464 * @param base The base address to begin mapping at.
465 * @param size The total size of the range in bytes.
466 * @param memory The memory to map.
467 * @param type The page type to map the memory as.
468 */
469 void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory,
470 Common::PageType type) {
471 LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE,
472 (base + size) * PAGE_SIZE);
473
474 // During boot, current_page_table might not be set yet, in which case we need not flush
475 if (system.IsPoweredOn()) {
476 auto& gpu = system.GPU();
477 for (u64 i = 0; i < size; i++) {
478 const auto page = base + i;
479 if (page_table.attributes[page] == Common::PageType::RasterizerCachedMemory) {
480 gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE);
481 }
482 }
483 }
343 484
344void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, 485 const VAddr end = base + size;
345 const std::size_t size) { 486 ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
346 const auto& page_table = process.VMManager().page_table; 487 base + page_table.pointers.size());
347 488
348 std::size_t remaining_size = size; 489 std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type);
349 std::size_t page_index = src_addr >> PAGE_BITS; 490
350 std::size_t page_offset = src_addr & PAGE_MASK; 491 if (memory == nullptr) {
351 492 std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end,
352 while (remaining_size > 0) { 493 memory);
353 const std::size_t copy_amount = 494 } else {
354 std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); 495 while (base != end) {
355 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); 496 page_table.pointers[base] = memory;
356 497
357 switch (page_table.attributes[page_index]) { 498 base += 1;
358 case Common::PageType::Unmapped: { 499 memory += PAGE_SIZE;
359 LOG_ERROR(HW_Memory, 500 }
360 "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", 501 }
361 current_vaddr, src_addr, size); 502 }
362 std::memset(dest_buffer, 0, copy_amount); 503
363 break; 504 /**
505 * Reads a particular data type out of memory at the given virtual address.
506 *
507 * @param vaddr The virtual address to read the data type from.
508 *
509 * @tparam T The data type to read out of memory. This type *must* be
510 * trivially copyable, otherwise the behavior of this function
511 * is undefined.
512 *
513 * @returns The instance of T read from the specified virtual address.
514 */
515 template <typename T>
516 T Read(const VAddr vaddr) {
517 const u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
518 if (page_pointer != nullptr) {
519 // NOTE: Avoid adding any extra logic to this fast-path block
520 T value;
521 std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T));
522 return value;
364 } 523 }
365 case Common::PageType::Memory: {
366 DEBUG_ASSERT(page_table.pointers[page_index]);
367 524
368 const u8* src_ptr = page_table.pointers[page_index] + page_offset; 525 const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
369 std::memcpy(dest_buffer, src_ptr, copy_amount); 526 switch (type) {
527 case Common::PageType::Unmapped:
528 LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, vaddr);
529 return 0;
530 case Common::PageType::Memory:
531 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
370 break; 532 break;
533 case Common::PageType::RasterizerCachedMemory: {
534 const u8* const host_ptr = GetPointerFromVMA(vaddr);
535 system.GPU().FlushRegion(ToCacheAddr(host_ptr), sizeof(T));
536 T value;
537 std::memcpy(&value, host_ptr, sizeof(T));
538 return value;
539 }
540 default:
541 UNREACHABLE();
371 } 542 }
543 return {};
544 }
545
546 /**
547 * Writes a particular data type to memory at the given virtual address.
548 *
549 * @param vaddr The virtual address to write the data type to.
550 *
551 * @tparam T The data type to write to memory. This type *must* be
552 * trivially copyable, otherwise the behavior of this function
553 * is undefined.
554 *
555 * @returns The instance of T write to the specified virtual address.
556 */
557 template <typename T>
558 void Write(const VAddr vaddr, const T data) {
559 u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
560 if (page_pointer != nullptr) {
561 // NOTE: Avoid adding any extra logic to this fast-path block
562 std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T));
563 return;
564 }
565
566 const Common::PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
567 switch (type) {
568 case Common::PageType::Unmapped:
569 LOG_ERROR(HW_Memory, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8,
570 static_cast<u32>(data), vaddr);
571 return;
572 case Common::PageType::Memory:
573 ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr);
574 break;
372 case Common::PageType::RasterizerCachedMemory: { 575 case Common::PageType::RasterizerCachedMemory: {
373 const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)}; 576 u8* const host_ptr{GetPointerFromVMA(vaddr)};
374 Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount); 577 system.GPU().InvalidateRegion(ToCacheAddr(host_ptr), sizeof(T));
375 std::memcpy(dest_buffer, host_ptr, copy_amount); 578 std::memcpy(host_ptr, &data, sizeof(T));
376 break; 579 break;
377 } 580 }
378 default: 581 default:
379 UNREACHABLE(); 582 UNREACHABLE();
380 } 583 }
381
382 page_index++;
383 page_offset = 0;
384 dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
385 remaining_size -= copy_amount;
386 } 584 }
585
586 Common::PageTable* current_page_table = nullptr;
587 Core::System& system;
588};
589
590Memory::Memory(Core::System& system) : impl{std::make_unique<Impl>(system)} {}
591Memory::~Memory() = default;
592
593void Memory::SetCurrentPageTable(Kernel::Process& process) {
594 impl->SetCurrentPageTable(process);
387} 595}
388 596
389void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { 597void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) {
390 ReadBlock(*Core::System::GetInstance().CurrentProcess(), src_addr, dest_buffer, size); 598 impl->MapMemoryRegion(page_table, base, size, target);
391} 599}
392 600
393void Write8(const VAddr addr, const u8 data) { 601void Memory::MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size,
394 Write<u8>(addr, data); 602 Common::MemoryHookPointer mmio_handler) {
603 impl->MapIoRegion(page_table, base, size, std::move(mmio_handler));
395} 604}
396 605
397void Write16(const VAddr addr, const u16 data) { 606void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
398 Write<u16_le>(addr, data); 607 impl->UnmapRegion(page_table, base, size);
399} 608}
400 609
401void Write32(const VAddr addr, const u32 data) { 610void Memory::AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
402 Write<u32_le>(addr, data); 611 Common::MemoryHookPointer hook) {
612 impl->AddDebugHook(page_table, base, size, std::move(hook));
403} 613}
404 614
405void Write64(const VAddr addr, const u64 data) { 615void Memory::RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size,
406 Write<u64_le>(addr, data); 616 Common::MemoryHookPointer hook) {
617 impl->RemoveDebugHook(page_table, base, size, std::move(hook));
407} 618}
408 619
409void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, 620bool Memory::IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) const {
410 const std::size_t size) { 621 return impl->IsValidVirtualAddress(process, vaddr);
411 const auto& page_table = process.VMManager().page_table; 622}
412 std::size_t remaining_size = size;
413 std::size_t page_index = dest_addr >> PAGE_BITS;
414 std::size_t page_offset = dest_addr & PAGE_MASK;
415
416 while (remaining_size > 0) {
417 const std::size_t copy_amount =
418 std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
419 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
420
421 switch (page_table.attributes[page_index]) {
422 case Common::PageType::Unmapped: {
423 LOG_ERROR(HW_Memory,
424 "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
425 current_vaddr, dest_addr, size);
426 break;
427 }
428 case Common::PageType::Memory: {
429 DEBUG_ASSERT(page_table.pointers[page_index]);
430 623
431 u8* dest_ptr = page_table.pointers[page_index] + page_offset; 624bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
432 std::memcpy(dest_ptr, src_buffer, copy_amount); 625 return impl->IsValidVirtualAddress(vaddr);
433 break; 626}
434 }
435 case Common::PageType::RasterizerCachedMemory: {
436 const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)};
437 Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount);
438 std::memcpy(host_ptr, src_buffer, copy_amount);
439 break;
440 }
441 default:
442 UNREACHABLE();
443 }
444 627
445 page_index++; 628u8* Memory::GetPointer(VAddr vaddr) {
446 page_offset = 0; 629 return impl->GetPointer(vaddr);
447 src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
448 remaining_size -= copy_amount;
449 }
450} 630}
451 631
452void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { 632const u8* Memory::GetPointer(VAddr vaddr) const {
453 WriteBlock(*Core::System::GetInstance().CurrentProcess(), dest_addr, src_buffer, size); 633 return impl->GetPointer(vaddr);
454} 634}
455 635
456void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std::size_t size) { 636u8 Memory::Read8(const VAddr addr) {
457 const auto& page_table = process.VMManager().page_table; 637 return impl->Read8(addr);
458 std::size_t remaining_size = size; 638}
459 std::size_t page_index = dest_addr >> PAGE_BITS;
460 std::size_t page_offset = dest_addr & PAGE_MASK;
461
462 while (remaining_size > 0) {
463 const std::size_t copy_amount =
464 std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
465 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
466
467 switch (page_table.attributes[page_index]) {
468 case Common::PageType::Unmapped: {
469 LOG_ERROR(HW_Memory,
470 "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
471 current_vaddr, dest_addr, size);
472 break;
473 }
474 case Common::PageType::Memory: {
475 DEBUG_ASSERT(page_table.pointers[page_index]);
476 639
477 u8* dest_ptr = page_table.pointers[page_index] + page_offset; 640u16 Memory::Read16(const VAddr addr) {
478 std::memset(dest_ptr, 0, copy_amount); 641 return impl->Read16(addr);
479 break; 642}
480 }
481 case Common::PageType::RasterizerCachedMemory: {
482 const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)};
483 Core::System::GetInstance().GPU().InvalidateRegion(ToCacheAddr(host_ptr), copy_amount);
484 std::memset(host_ptr, 0, copy_amount);
485 break;
486 }
487 default:
488 UNREACHABLE();
489 }
490 643
491 page_index++; 644u32 Memory::Read32(const VAddr addr) {
492 page_offset = 0; 645 return impl->Read32(addr);
493 remaining_size -= copy_amount;
494 }
495} 646}
496 647
497void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, 648u64 Memory::Read64(const VAddr addr) {
498 const std::size_t size) { 649 return impl->Read64(addr);
499 const auto& page_table = process.VMManager().page_table; 650}
500 std::size_t remaining_size = size;
501 std::size_t page_index = src_addr >> PAGE_BITS;
502 std::size_t page_offset = src_addr & PAGE_MASK;
503
504 while (remaining_size > 0) {
505 const std::size_t copy_amount =
506 std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
507 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
508
509 switch (page_table.attributes[page_index]) {
510 case Common::PageType::Unmapped: {
511 LOG_ERROR(HW_Memory,
512 "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
513 current_vaddr, src_addr, size);
514 ZeroBlock(process, dest_addr, copy_amount);
515 break;
516 }
517 case Common::PageType::Memory: {
518 DEBUG_ASSERT(page_table.pointers[page_index]);
519 const u8* src_ptr = page_table.pointers[page_index] + page_offset;
520 WriteBlock(process, dest_addr, src_ptr, copy_amount);
521 break;
522 }
523 case Common::PageType::RasterizerCachedMemory: {
524 const auto& host_ptr{GetPointerFromVMA(process, current_vaddr)};
525 Core::System::GetInstance().GPU().FlushRegion(ToCacheAddr(host_ptr), copy_amount);
526 WriteBlock(process, dest_addr, host_ptr, copy_amount);
527 break;
528 }
529 default:
530 UNREACHABLE();
531 }
532 651
533 page_index++; 652void Memory::Write8(VAddr addr, u8 data) {
534 page_offset = 0; 653 impl->Write8(addr, data);
535 dest_addr += static_cast<VAddr>(copy_amount); 654}
536 src_addr += static_cast<VAddr>(copy_amount); 655
537 remaining_size -= copy_amount; 656void Memory::Write16(VAddr addr, u16 data) {
538 } 657 impl->Write16(addr, data);
658}
659
660void Memory::Write32(VAddr addr, u32 data) {
661 impl->Write32(addr, data);
662}
663
664void Memory::Write64(VAddr addr, u64 data) {
665 impl->Write64(addr, data);
666}
667
668std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) {
669 return impl->ReadCString(vaddr, max_length);
670}
671
672void Memory::ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer,
673 const std::size_t size) {
674 impl->ReadBlock(process, src_addr, dest_buffer, size);
675}
676
677void Memory::ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) {
678 impl->ReadBlock(src_addr, dest_buffer, size);
679}
680
681void Memory::WriteBlock(const Kernel::Process& process, VAddr dest_addr, const void* src_buffer,
682 std::size_t size) {
683 impl->WriteBlock(process, dest_addr, src_buffer, size);
684}
685
686void Memory::WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) {
687 impl->WriteBlock(dest_addr, src_buffer, size);
539} 688}
540 689
541void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size) { 690void Memory::ZeroBlock(const Kernel::Process& process, VAddr dest_addr, std::size_t size) {
542 CopyBlock(*Core::System::GetInstance().CurrentProcess(), dest_addr, src_addr, size); 691 impl->ZeroBlock(process, dest_addr, size);
692}
693
694void Memory::ZeroBlock(VAddr dest_addr, std::size_t size) {
695 impl->ZeroBlock(dest_addr, size);
696}
697
698void Memory::CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr,
699 const std::size_t size) {
700 impl->CopyBlock(process, dest_addr, src_addr, size);
701}
702
703void Memory::CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size) {
704 impl->CopyBlock(dest_addr, src_addr, size);
705}
706
707void Memory::RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
708 impl->RasterizerMarkRegionCached(vaddr, size, cached);
709}
710
711bool IsKernelVirtualAddress(const VAddr vaddr) {
712 return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END;
543} 713}
544 714
545} // namespace Memory 715} // namespace Memory