summaryrefslogtreecommitdiff
path: root/src/core/memory.cpp
diff options
context:
space:
mode:
authorGravatar liamwhite2023-03-23 10:00:19 -0400
committerGravatar GitHub2023-03-23 10:00:19 -0400
commitc41a4baf06efe935f08331bc6f8ff6d80dc088f5 (patch)
treea6580d41bd440b240b2f60db38fdeec60fca2eff /src/core/memory.cpp
parentMerge pull request #9962 from Kelebek1/disable_srgb (diff)
parentkernel: use KTypedAddress for addresses (diff)
downloadyuzu-c41a4baf06efe935f08331bc6f8ff6d80dc088f5.tar.gz
yuzu-c41a4baf06efe935f08331bc6f8ff6d80dc088f5.tar.xz
yuzu-c41a4baf06efe935f08331bc6f8ff6d80dc088f5.zip
Merge pull request #9964 from liamwhite/typed-address
kernel: use KTypedAddress for addresses
Diffstat (limited to 'src/core/memory.cpp')
-rw-r--r--src/core/memory.cpp319
1 files changed, 176 insertions, 143 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 4397fcfb1..95e070825 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -35,31 +35,35 @@ struct Memory::Impl {
35 system.ArmInterface(core_id).PageTableChanged(*current_page_table, address_space_width); 35 system.ArmInterface(core_id).PageTableChanged(*current_page_table, address_space_width);
36 } 36 }
37 37
38 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { 38 void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
39 Common::PhysicalAddress target) {
39 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); 40 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
40 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base); 41 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
41 ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", target); 42 ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}",
43 GetInteger(target));
42 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target, 44 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target,
43 Common::PageType::Memory); 45 Common::PageType::Memory);
44 46
45 if (Settings::IsFastmemEnabled()) { 47 if (Settings::IsFastmemEnabled()) {
46 system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size); 48 system.DeviceMemory().buffer.Map(GetInteger(base),
49 GetInteger(target) - DramMemoryMap::Base, size);
47 } 50 }
48 } 51 }
49 52
50 void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { 53 void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) {
51 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); 54 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
52 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base); 55 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
53 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0, 56 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
54 Common::PageType::Unmapped); 57 Common::PageType::Unmapped);
55 58
56 if (Settings::IsFastmemEnabled()) { 59 if (Settings::IsFastmemEnabled()) {
57 system.DeviceMemory().buffer.Unmap(base, size); 60 system.DeviceMemory().buffer.Unmap(GetInteger(base), size);
58 } 61 }
59 } 62 }
60 63
61 [[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const { 64 [[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(u64 vaddr) const {
62 const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]}; 65 const Common::PhysicalAddress paddr{
66 current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
63 67
64 if (!paddr) { 68 if (!paddr) {
65 return {}; 69 return {};
@@ -68,8 +72,9 @@ struct Memory::Impl {
68 return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr; 72 return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
69 } 73 }
70 74
71 [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const { 75 [[nodiscard]] u8* GetPointerFromDebugMemory(u64 vaddr) const {
72 const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]}; 76 const Common::PhysicalAddress paddr{
77 current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
73 78
74 if (paddr == 0) { 79 if (paddr == 0) {
75 return {}; 80 return {};
@@ -78,11 +83,11 @@ struct Memory::Impl {
78 return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr; 83 return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
79 } 84 }
80 85
81 u8 Read8(const VAddr addr) { 86 u8 Read8(const Common::ProcessAddress addr) {
82 return Read<u8>(addr); 87 return Read<u8>(addr);
83 } 88 }
84 89
85 u16 Read16(const VAddr addr) { 90 u16 Read16(const Common::ProcessAddress addr) {
86 if ((addr & 1) == 0) { 91 if ((addr & 1) == 0) {
87 return Read<u16_le>(addr); 92 return Read<u16_le>(addr);
88 } else { 93 } else {
@@ -92,7 +97,7 @@ struct Memory::Impl {
92 } 97 }
93 } 98 }
94 99
95 u32 Read32(const VAddr addr) { 100 u32 Read32(const Common::ProcessAddress addr) {
96 if ((addr & 3) == 0) { 101 if ((addr & 3) == 0) {
97 return Read<u32_le>(addr); 102 return Read<u32_le>(addr);
98 } else { 103 } else {
@@ -102,7 +107,7 @@ struct Memory::Impl {
102 } 107 }
103 } 108 }
104 109
105 u64 Read64(const VAddr addr) { 110 u64 Read64(const Common::ProcessAddress addr) {
106 if ((addr & 7) == 0) { 111 if ((addr & 7) == 0) {
107 return Read<u64_le>(addr); 112 return Read<u64_le>(addr);
108 } else { 113 } else {
@@ -112,11 +117,11 @@ struct Memory::Impl {
112 } 117 }
113 } 118 }
114 119
115 void Write8(const VAddr addr, const u8 data) { 120 void Write8(const Common::ProcessAddress addr, const u8 data) {
116 Write<u8>(addr, data); 121 Write<u8>(addr, data);
117 } 122 }
118 123
119 void Write16(const VAddr addr, const u16 data) { 124 void Write16(const Common::ProcessAddress addr, const u16 data) {
120 if ((addr & 1) == 0) { 125 if ((addr & 1) == 0) {
121 Write<u16_le>(addr, data); 126 Write<u16_le>(addr, data);
122 } else { 127 } else {
@@ -125,7 +130,7 @@ struct Memory::Impl {
125 } 130 }
126 } 131 }
127 132
128 void Write32(const VAddr addr, const u32 data) { 133 void Write32(const Common::ProcessAddress addr, const u32 data) {
129 if ((addr & 3) == 0) { 134 if ((addr & 3) == 0) {
130 Write<u32_le>(addr, data); 135 Write<u32_le>(addr, data);
131 } else { 136 } else {
@@ -134,7 +139,7 @@ struct Memory::Impl {
134 } 139 }
135 } 140 }
136 141
137 void Write64(const VAddr addr, const u64 data) { 142 void Write64(const Common::ProcessAddress addr, const u64 data) {
138 if ((addr & 7) == 0) { 143 if ((addr & 7) == 0) {
139 Write<u64_le>(addr, data); 144 Write<u64_le>(addr, data);
140 } else { 145 } else {
@@ -143,23 +148,23 @@ struct Memory::Impl {
143 } 148 }
144 } 149 }
145 150
146 bool WriteExclusive8(const VAddr addr, const u8 data, const u8 expected) { 151 bool WriteExclusive8(const Common::ProcessAddress addr, const u8 data, const u8 expected) {
147 return WriteExclusive<u8>(addr, data, expected); 152 return WriteExclusive<u8>(addr, data, expected);
148 } 153 }
149 154
150 bool WriteExclusive16(const VAddr addr, const u16 data, const u16 expected) { 155 bool WriteExclusive16(const Common::ProcessAddress addr, const u16 data, const u16 expected) {
151 return WriteExclusive<u16_le>(addr, data, expected); 156 return WriteExclusive<u16_le>(addr, data, expected);
152 } 157 }
153 158
154 bool WriteExclusive32(const VAddr addr, const u32 data, const u32 expected) { 159 bool WriteExclusive32(const Common::ProcessAddress addr, const u32 data, const u32 expected) {
155 return WriteExclusive<u32_le>(addr, data, expected); 160 return WriteExclusive<u32_le>(addr, data, expected);
156 } 161 }
157 162
158 bool WriteExclusive64(const VAddr addr, const u64 data, const u64 expected) { 163 bool WriteExclusive64(const Common::ProcessAddress addr, const u64 data, const u64 expected) {
159 return WriteExclusive<u64_le>(addr, data, expected); 164 return WriteExclusive<u64_le>(addr, data, expected);
160 } 165 }
161 166
162 std::string ReadCString(VAddr vaddr, std::size_t max_length) { 167 std::string ReadCString(Common::ProcessAddress vaddr, std::size_t max_length) {
163 std::string string; 168 std::string string;
164 string.reserve(max_length); 169 string.reserve(max_length);
165 for (std::size_t i = 0; i < max_length; ++i) { 170 for (std::size_t i = 0; i < max_length; ++i) {
@@ -174,8 +179,9 @@ struct Memory::Impl {
174 return string; 179 return string;
175 } 180 }
176 181
177 void WalkBlock(const Kernel::KProcess& process, const VAddr addr, const std::size_t size, 182 void WalkBlock(const Kernel::KProcess& process, const Common::ProcessAddress addr,
178 auto on_unmapped, auto on_memory, auto on_rasterizer, auto increment) { 183 const std::size_t size, auto on_unmapped, auto on_memory, auto on_rasterizer,
184 auto increment) {
179 const auto& page_table = process.PageTable().PageTableImpl(); 185 const auto& page_table = process.PageTable().PageTableImpl();
180 std::size_t remaining_size = size; 186 std::size_t remaining_size = size;
181 std::size_t page_index = addr >> YUZU_PAGEBITS; 187 std::size_t page_index = addr >> YUZU_PAGEBITS;
@@ -185,7 +191,7 @@ struct Memory::Impl {
185 const std::size_t copy_amount = 191 const std::size_t copy_amount =
186 std::min(static_cast<std::size_t>(YUZU_PAGESIZE) - page_offset, remaining_size); 192 std::min(static_cast<std::size_t>(YUZU_PAGESIZE) - page_offset, remaining_size);
187 const auto current_vaddr = 193 const auto current_vaddr =
188 static_cast<VAddr>((page_index << YUZU_PAGEBITS) + page_offset); 194 static_cast<u64>((page_index << YUZU_PAGEBITS) + page_offset);
189 195
190 const auto [pointer, type] = page_table.pointers[page_index].PointerType(); 196 const auto [pointer, type] = page_table.pointers[page_index].PointerType();
191 switch (type) { 197 switch (type) {
@@ -220,24 +226,24 @@ struct Memory::Impl {
220 } 226 }
221 227
222 template <bool UNSAFE> 228 template <bool UNSAFE>
223 void ReadBlockImpl(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer, 229 void ReadBlockImpl(const Kernel::KProcess& process, const Common::ProcessAddress src_addr,
224 const std::size_t size) { 230 void* dest_buffer, const std::size_t size) {
225 WalkBlock( 231 WalkBlock(
226 process, src_addr, size, 232 process, src_addr, size,
227 [src_addr, size, &dest_buffer](const std::size_t copy_amount, 233 [src_addr, size, &dest_buffer](const std::size_t copy_amount,
228 const VAddr current_vaddr) { 234 const Common::ProcessAddress current_vaddr) {
229 LOG_ERROR(HW_Memory, 235 LOG_ERROR(HW_Memory,
230 "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", 236 "Unmapped ReadBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
231 current_vaddr, src_addr, size); 237 GetInteger(current_vaddr), GetInteger(src_addr), size);
232 std::memset(dest_buffer, 0, copy_amount); 238 std::memset(dest_buffer, 0, copy_amount);
233 }, 239 },
234 [&](const std::size_t copy_amount, const u8* const src_ptr) { 240 [&](const std::size_t copy_amount, const u8* const src_ptr) {
235 std::memcpy(dest_buffer, src_ptr, copy_amount); 241 std::memcpy(dest_buffer, src_ptr, copy_amount);
236 }, 242 },
237 [&](const VAddr current_vaddr, const std::size_t copy_amount, 243 [&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
238 const u8* const host_ptr) { 244 const u8* const host_ptr) {
239 if constexpr (!UNSAFE) { 245 if constexpr (!UNSAFE) {
240 system.GPU().FlushRegion(current_vaddr, copy_amount); 246 system.GPU().FlushRegion(GetInteger(current_vaddr), copy_amount);
241 } 247 }
242 std::memcpy(dest_buffer, host_ptr, copy_amount); 248 std::memcpy(dest_buffer, host_ptr, copy_amount);
243 }, 249 },
@@ -246,30 +252,34 @@ struct Memory::Impl {
246 }); 252 });
247 } 253 }
248 254
249 void ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { 255 void ReadBlock(const Common::ProcessAddress src_addr, void* dest_buffer,
256 const std::size_t size) {
250 ReadBlockImpl<false>(*system.ApplicationProcess(), src_addr, dest_buffer, size); 257 ReadBlockImpl<false>(*system.ApplicationProcess(), src_addr, dest_buffer, size);
251 } 258 }
252 259
253 void ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) { 260 void ReadBlockUnsafe(const Common::ProcessAddress src_addr, void* dest_buffer,
261 const std::size_t size) {
254 ReadBlockImpl<true>(*system.ApplicationProcess(), src_addr, dest_buffer, size); 262 ReadBlockImpl<true>(*system.ApplicationProcess(), src_addr, dest_buffer, size);
255 } 263 }
256 264
257 template <bool UNSAFE> 265 template <bool UNSAFE>
258 void WriteBlockImpl(const Kernel::KProcess& process, const VAddr dest_addr, 266 void WriteBlockImpl(const Kernel::KProcess& process, const Common::ProcessAddress dest_addr,
259 const void* src_buffer, const std::size_t size) { 267 const void* src_buffer, const std::size_t size) {
260 WalkBlock( 268 WalkBlock(
261 process, dest_addr, size, 269 process, dest_addr, size,
262 [dest_addr, size](const std::size_t copy_amount, const VAddr current_vaddr) { 270 [dest_addr, size](const std::size_t copy_amount,
271 const Common::ProcessAddress current_vaddr) {
263 LOG_ERROR(HW_Memory, 272 LOG_ERROR(HW_Memory,
264 "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", 273 "Unmapped WriteBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
265 current_vaddr, dest_addr, size); 274 GetInteger(current_vaddr), GetInteger(dest_addr), size);
266 }, 275 },
267 [&](const std::size_t copy_amount, u8* const dest_ptr) { 276 [&](const std::size_t copy_amount, u8* const dest_ptr) {
268 std::memcpy(dest_ptr, src_buffer, copy_amount); 277 std::memcpy(dest_ptr, src_buffer, copy_amount);
269 }, 278 },
270 [&](const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) { 279 [&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
280 u8* const host_ptr) {
271 if constexpr (!UNSAFE) { 281 if constexpr (!UNSAFE) {
272 system.GPU().InvalidateRegion(current_vaddr, copy_amount); 282 system.GPU().InvalidateRegion(GetInteger(current_vaddr), copy_amount);
273 } 283 }
274 std::memcpy(host_ptr, src_buffer, copy_amount); 284 std::memcpy(host_ptr, src_buffer, copy_amount);
275 }, 285 },
@@ -278,71 +288,77 @@ struct Memory::Impl {
278 }); 288 });
279 } 289 }
280 290
281 void WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { 291 void WriteBlock(const Common::ProcessAddress dest_addr, const void* src_buffer,
292 const std::size_t size) {
282 WriteBlockImpl<false>(*system.ApplicationProcess(), dest_addr, src_buffer, size); 293 WriteBlockImpl<false>(*system.ApplicationProcess(), dest_addr, src_buffer, size);
283 } 294 }
284 295
285 void WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { 296 void WriteBlockUnsafe(const Common::ProcessAddress dest_addr, const void* src_buffer,
297 const std::size_t size) {
286 WriteBlockImpl<true>(*system.ApplicationProcess(), dest_addr, src_buffer, size); 298 WriteBlockImpl<true>(*system.ApplicationProcess(), dest_addr, src_buffer, size);
287 } 299 }
288 300
289 void ZeroBlock(const Kernel::KProcess& process, const VAddr dest_addr, const std::size_t size) { 301 void ZeroBlock(const Kernel::KProcess& process, const Common::ProcessAddress dest_addr,
302 const std::size_t size) {
290 WalkBlock( 303 WalkBlock(
291 process, dest_addr, size, 304 process, dest_addr, size,
292 [dest_addr, size](const std::size_t copy_amount, const VAddr current_vaddr) { 305 [dest_addr, size](const std::size_t copy_amount,
306 const Common::ProcessAddress current_vaddr) {
293 LOG_ERROR(HW_Memory, 307 LOG_ERROR(HW_Memory,
294 "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", 308 "Unmapped ZeroBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
295 current_vaddr, dest_addr, size); 309 GetInteger(current_vaddr), GetInteger(dest_addr), size);
296 }, 310 },
297 [](const std::size_t copy_amount, u8* const dest_ptr) { 311 [](const std::size_t copy_amount, u8* const dest_ptr) {
298 std::memset(dest_ptr, 0, copy_amount); 312 std::memset(dest_ptr, 0, copy_amount);
299 }, 313 },
300 [&](const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) { 314 [&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
301 system.GPU().InvalidateRegion(current_vaddr, copy_amount); 315 u8* const host_ptr) {
316 system.GPU().InvalidateRegion(GetInteger(current_vaddr), copy_amount);
302 std::memset(host_ptr, 0, copy_amount); 317 std::memset(host_ptr, 0, copy_amount);
303 }, 318 },
304 [](const std::size_t copy_amount) {}); 319 [](const std::size_t copy_amount) {});
305 } 320 }
306 321
307 void CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr, 322 void CopyBlock(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
308 const std::size_t size) { 323 Common::ProcessAddress src_addr, const std::size_t size) {
309 WalkBlock( 324 WalkBlock(
310 process, dest_addr, size, 325 process, dest_addr, size,
311 [&](const std::size_t copy_amount, const VAddr current_vaddr) { 326 [&](const std::size_t copy_amount, const Common::ProcessAddress current_vaddr) {
312 LOG_ERROR(HW_Memory, 327 LOG_ERROR(HW_Memory,
313 "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})", 328 "Unmapped CopyBlock @ 0x{:016X} (start address = 0x{:016X}, size = {})",
314 current_vaddr, src_addr, size); 329 GetInteger(current_vaddr), GetInteger(src_addr), size);
315 ZeroBlock(process, dest_addr, copy_amount); 330 ZeroBlock(process, dest_addr, copy_amount);
316 }, 331 },
317 [&](const std::size_t copy_amount, const u8* const src_ptr) { 332 [&](const std::size_t copy_amount, const u8* const src_ptr) {
318 WriteBlockImpl<false>(process, dest_addr, src_ptr, copy_amount); 333 WriteBlockImpl<false>(process, dest_addr, src_ptr, copy_amount);
319 }, 334 },
320 [&](const VAddr current_vaddr, const std::size_t copy_amount, u8* const host_ptr) { 335 [&](const Common::ProcessAddress current_vaddr, const std::size_t copy_amount,
321 system.GPU().FlushRegion(current_vaddr, copy_amount); 336 u8* const host_ptr) {
337 system.GPU().FlushRegion(GetInteger(current_vaddr), copy_amount);
322 WriteBlockImpl<false>(process, dest_addr, host_ptr, copy_amount); 338 WriteBlockImpl<false>(process, dest_addr, host_ptr, copy_amount);
323 }, 339 },
324 [&](const std::size_t copy_amount) { 340 [&](const std::size_t copy_amount) {
325 dest_addr += static_cast<VAddr>(copy_amount); 341 dest_addr += copy_amount;
326 src_addr += static_cast<VAddr>(copy_amount); 342 src_addr += copy_amount;
327 }); 343 });
328 } 344 }
329 345
330 template <typename Callback> 346 template <typename Callback>
331 Result PerformCacheOperation(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size, 347 Result PerformCacheOperation(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
332 Callback&& cb) { 348 std::size_t size, Callback&& cb) {
333 class InvalidMemoryException : public std::exception {}; 349 class InvalidMemoryException : public std::exception {};
334 350
335 try { 351 try {
336 WalkBlock( 352 WalkBlock(
337 process, dest_addr, size, 353 process, dest_addr, size,
338 [&](const std::size_t block_size, const VAddr current_vaddr) { 354 [&](const std::size_t block_size, const Common::ProcessAddress current_vaddr) {
339 LOG_ERROR(HW_Memory, "Unmapped cache maintenance @ {:#018X}", current_vaddr); 355 LOG_ERROR(HW_Memory, "Unmapped cache maintenance @ {:#018X}",
356 GetInteger(current_vaddr));
340 throw InvalidMemoryException(); 357 throw InvalidMemoryException();
341 }, 358 },
342 [&](const std::size_t block_size, u8* const host_ptr) {}, 359 [&](const std::size_t block_size, u8* const host_ptr) {},
343 [&](const VAddr current_vaddr, const std::size_t block_size, u8* const host_ptr) { 360 [&](const Common::ProcessAddress current_vaddr, const std::size_t block_size,
344 cb(current_vaddr, block_size); 361 u8* const host_ptr) { cb(current_vaddr, block_size); },
345 },
346 [](const std::size_t block_size) {}); 362 [](const std::size_t block_size) {});
347 } catch (InvalidMemoryException&) { 363 } catch (InvalidMemoryException&) {
348 return Kernel::ResultInvalidCurrentMemory; 364 return Kernel::ResultInvalidCurrentMemory;
@@ -351,34 +367,40 @@ struct Memory::Impl {
351 return ResultSuccess; 367 return ResultSuccess;
352 } 368 }
353 369
354 Result InvalidateDataCache(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size) { 370 Result InvalidateDataCache(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
355 auto on_rasterizer = [&](const VAddr current_vaddr, const std::size_t block_size) { 371 std::size_t size) {
372 auto on_rasterizer = [&](const Common::ProcessAddress current_vaddr,
373 const std::size_t block_size) {
356 // dc ivac: Invalidate to point of coherency 374 // dc ivac: Invalidate to point of coherency
357 // GPU flush -> CPU invalidate 375 // GPU flush -> CPU invalidate
358 system.GPU().FlushRegion(current_vaddr, block_size); 376 system.GPU().FlushRegion(GetInteger(current_vaddr), block_size);
359 }; 377 };
360 return PerformCacheOperation(process, dest_addr, size, on_rasterizer); 378 return PerformCacheOperation(process, dest_addr, size, on_rasterizer);
361 } 379 }
362 380
363 Result StoreDataCache(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size) { 381 Result StoreDataCache(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
364 auto on_rasterizer = [&](const VAddr current_vaddr, const std::size_t block_size) { 382 std::size_t size) {
383 auto on_rasterizer = [&](const Common::ProcessAddress current_vaddr,
384 const std::size_t block_size) {
365 // dc cvac: Store to point of coherency 385 // dc cvac: Store to point of coherency
366 // CPU flush -> GPU invalidate 386 // CPU flush -> GPU invalidate
367 system.GPU().InvalidateRegion(current_vaddr, block_size); 387 system.GPU().InvalidateRegion(GetInteger(current_vaddr), block_size);
368 }; 388 };
369 return PerformCacheOperation(process, dest_addr, size, on_rasterizer); 389 return PerformCacheOperation(process, dest_addr, size, on_rasterizer);
370 } 390 }
371 391
372 Result FlushDataCache(const Kernel::KProcess& process, VAddr dest_addr, std::size_t size) { 392 Result FlushDataCache(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
373 auto on_rasterizer = [&](const VAddr current_vaddr, const std::size_t block_size) { 393 std::size_t size) {
394 auto on_rasterizer = [&](const Common::ProcessAddress current_vaddr,
395 const std::size_t block_size) {
374 // dc civac: Store to point of coherency, and invalidate from cache 396 // dc civac: Store to point of coherency, and invalidate from cache
375 // CPU flush -> GPU invalidate 397 // CPU flush -> GPU invalidate
376 system.GPU().InvalidateRegion(current_vaddr, block_size); 398 system.GPU().InvalidateRegion(GetInteger(current_vaddr), block_size);
377 }; 399 };
378 return PerformCacheOperation(process, dest_addr, size, on_rasterizer); 400 return PerformCacheOperation(process, dest_addr, size, on_rasterizer);
379 } 401 }
380 402
381 void MarkRegionDebug(VAddr vaddr, u64 size, bool debug) { 403 void MarkRegionDebug(u64 vaddr, u64 size, bool debug) {
382 if (vaddr == 0) { 404 if (vaddr == 0) {
383 return; 405 return;
384 } 406 }
@@ -434,7 +456,7 @@ struct Memory::Impl {
434 } 456 }
435 } 457 }
436 458
437 void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { 459 void RasterizerMarkRegionCached(u64 vaddr, u64 size, bool cached) {
438 if (vaddr == 0) { 460 if (vaddr == 0) {
439 return; 461 return;
440 } 462 }
@@ -514,10 +536,12 @@ struct Memory::Impl {
514 * @param target The target address to begin mapping from. 536 * @param target The target address to begin mapping from.
515 * @param type The page type to map the memory as. 537 * @param type The page type to map the memory as.
516 */ 538 */
517 void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target, 539 void MapPages(Common::PageTable& page_table, Common::ProcessAddress base_address, u64 size,
518 Common::PageType type) { 540 Common::PhysicalAddress target, Common::PageType type) {
519 LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * YUZU_PAGESIZE, 541 auto base = GetInteger(base_address);
520 (base + size) * YUZU_PAGESIZE); 542
543 LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", GetInteger(target),
544 base * YUZU_PAGESIZE, (base + size) * YUZU_PAGESIZE);
521 545
522 // During boot, current_page_table might not be set yet, in which case we need not flush 546 // During boot, current_page_table might not be set yet, in which case we need not flush
523 if (system.IsPoweredOn()) { 547 if (system.IsPoweredOn()) {
@@ -530,7 +554,7 @@ struct Memory::Impl {
530 } 554 }
531 } 555 }
532 556
533 const VAddr end = base + size; 557 const Common::ProcessAddress end = base + size;
534 ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", 558 ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}",
535 base + page_table.pointers.size()); 559 base + page_table.pointers.size());
536 560
@@ -548,7 +572,7 @@ struct Memory::Impl {
548 while (base != end) { 572 while (base != end) {
549 page_table.pointers[base].Store( 573 page_table.pointers[base].Store(
550 system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS), type); 574 system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS), type);
551 page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS); 575 page_table.backing_addr[base] = GetInteger(target) - (base << YUZU_PAGEBITS);
552 576
553 ASSERT_MSG(page_table.pointers[base].Pointer(), 577 ASSERT_MSG(page_table.pointers[base].Pointer(),
554 "memory mapping base yield a nullptr within the table"); 578 "memory mapping base yield a nullptr within the table");
@@ -559,9 +583,9 @@ struct Memory::Impl {
559 } 583 }
560 } 584 }
561 585
562 [[nodiscard]] u8* GetPointerImpl(VAddr vaddr, auto on_unmapped, auto on_rasterizer) const { 586 [[nodiscard]] u8* GetPointerImpl(u64 vaddr, auto on_unmapped, auto on_rasterizer) const {
563 // AARCH64 masks the upper 16 bit of all memory accesses 587 // AARCH64 masks the upper 16 bit of all memory accesses
564 vaddr &= 0xffffffffffffULL; 588 vaddr = vaddr & 0xffffffffffffULL;
565 589
566 if (vaddr >= 1uLL << current_page_table->GetAddressSpaceBits()) { 590 if (vaddr >= 1uLL << current_page_table->GetAddressSpaceBits()) {
567 on_unmapped(); 591 on_unmapped();
@@ -593,15 +617,18 @@ struct Memory::Impl {
593 return nullptr; 617 return nullptr;
594 } 618 }
595 619
596 [[nodiscard]] u8* GetPointer(const VAddr vaddr) const { 620 [[nodiscard]] u8* GetPointer(const Common::ProcessAddress vaddr) const {
597 return GetPointerImpl( 621 return GetPointerImpl(
598 vaddr, [vaddr]() { LOG_ERROR(HW_Memory, "Unmapped GetPointer @ 0x{:016X}", vaddr); }, 622 GetInteger(vaddr),
623 [vaddr]() {
624 LOG_ERROR(HW_Memory, "Unmapped GetPointer @ 0x{:016X}", GetInteger(vaddr));
625 },
599 []() {}); 626 []() {});
600 } 627 }
601 628
602 [[nodiscard]] u8* GetPointerSilent(const VAddr vaddr) const { 629 [[nodiscard]] u8* GetPointerSilent(const Common::ProcessAddress vaddr) const {
603 return GetPointerImpl( 630 return GetPointerImpl(
604 vaddr, []() {}, []() {}); 631 GetInteger(vaddr), []() {}, []() {});
605 } 632 }
606 633
607 /** 634 /**
@@ -616,14 +643,15 @@ struct Memory::Impl {
616 * @returns The instance of T read from the specified virtual address. 643 * @returns The instance of T read from the specified virtual address.
617 */ 644 */
618 template <typename T> 645 template <typename T>
619 T Read(VAddr vaddr) { 646 T Read(Common::ProcessAddress vaddr) {
620 T result = 0; 647 T result = 0;
621 const u8* const ptr = GetPointerImpl( 648 const u8* const ptr = GetPointerImpl(
622 vaddr, 649 GetInteger(vaddr),
623 [vaddr]() { 650 [vaddr]() {
624 LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8, vaddr); 651 LOG_ERROR(HW_Memory, "Unmapped Read{} @ 0x{:016X}", sizeof(T) * 8,
652 GetInteger(vaddr));
625 }, 653 },
626 [&]() { system.GPU().FlushRegion(vaddr, sizeof(T)); }); 654 [&]() { system.GPU().FlushRegion(GetInteger(vaddr), sizeof(T)); });
627 if (ptr) { 655 if (ptr) {
628 std::memcpy(&result, ptr, sizeof(T)); 656 std::memcpy(&result, ptr, sizeof(T));
629 } 657 }
@@ -640,28 +668,28 @@ struct Memory::Impl {
640 * is undefined. 668 * is undefined.
641 */ 669 */
642 template <typename T> 670 template <typename T>
643 void Write(VAddr vaddr, const T data) { 671 void Write(Common::ProcessAddress vaddr, const T data) {
644 u8* const ptr = GetPointerImpl( 672 u8* const ptr = GetPointerImpl(
645 vaddr, 673 GetInteger(vaddr),
646 [vaddr, data]() { 674 [vaddr, data]() {
647 LOG_ERROR(HW_Memory, "Unmapped Write{} @ 0x{:016X} = 0x{:016X}", sizeof(T) * 8, 675 LOG_ERROR(HW_Memory, "Unmapped Write{} @ 0x{:016X} = 0x{:016X}", sizeof(T) * 8,
648 vaddr, static_cast<u64>(data)); 676 GetInteger(vaddr), static_cast<u64>(data));
649 }, 677 },
650 [&]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); }); 678 [&]() { system.GPU().InvalidateRegion(GetInteger(vaddr), sizeof(T)); });
651 if (ptr) { 679 if (ptr) {
652 std::memcpy(ptr, &data, sizeof(T)); 680 std::memcpy(ptr, &data, sizeof(T));
653 } 681 }
654 } 682 }
655 683
656 template <typename T> 684 template <typename T>
657 bool WriteExclusive(VAddr vaddr, const T data, const T expected) { 685 bool WriteExclusive(Common::ProcessAddress vaddr, const T data, const T expected) {
658 u8* const ptr = GetPointerImpl( 686 u8* const ptr = GetPointerImpl(
659 vaddr, 687 GetInteger(vaddr),
660 [vaddr, data]() { 688 [vaddr, data]() {
661 LOG_ERROR(HW_Memory, "Unmapped WriteExclusive{} @ 0x{:016X} = 0x{:016X}", 689 LOG_ERROR(HW_Memory, "Unmapped WriteExclusive{} @ 0x{:016X} = 0x{:016X}",
662 sizeof(T) * 8, vaddr, static_cast<u64>(data)); 690 sizeof(T) * 8, GetInteger(vaddr), static_cast<u64>(data));
663 }, 691 },
664 [&]() { system.GPU().InvalidateRegion(vaddr, sizeof(T)); }); 692 [&]() { system.GPU().InvalidateRegion(GetInteger(vaddr), sizeof(T)); });
665 if (ptr) { 693 if (ptr) {
666 const auto volatile_pointer = reinterpret_cast<volatile T*>(ptr); 694 const auto volatile_pointer = reinterpret_cast<volatile T*>(ptr);
667 return Common::AtomicCompareAndSwap(volatile_pointer, data, expected); 695 return Common::AtomicCompareAndSwap(volatile_pointer, data, expected);
@@ -669,14 +697,14 @@ struct Memory::Impl {
669 return true; 697 return true;
670 } 698 }
671 699
672 bool WriteExclusive128(VAddr vaddr, const u128 data, const u128 expected) { 700 bool WriteExclusive128(Common::ProcessAddress vaddr, const u128 data, const u128 expected) {
673 u8* const ptr = GetPointerImpl( 701 u8* const ptr = GetPointerImpl(
674 vaddr, 702 GetInteger(vaddr),
675 [vaddr, data]() { 703 [vaddr, data]() {
676 LOG_ERROR(HW_Memory, "Unmapped WriteExclusive128 @ 0x{:016X} = 0x{:016X}{:016X}", 704 LOG_ERROR(HW_Memory, "Unmapped WriteExclusive128 @ 0x{:016X} = 0x{:016X}{:016X}",
677 vaddr, static_cast<u64>(data[1]), static_cast<u64>(data[0])); 705 GetInteger(vaddr), static_cast<u64>(data[1]), static_cast<u64>(data[0]));
678 }, 706 },
679 [&]() { system.GPU().InvalidateRegion(vaddr, sizeof(u128)); }); 707 [&]() { system.GPU().InvalidateRegion(GetInteger(vaddr), sizeof(u128)); });
680 if (ptr) { 708 if (ptr) {
681 const auto volatile_pointer = reinterpret_cast<volatile u64*>(ptr); 709 const auto volatile_pointer = reinterpret_cast<volatile u64*>(ptr);
682 return Common::AtomicCompareAndSwap(volatile_pointer, data, expected); 710 return Common::AtomicCompareAndSwap(volatile_pointer, data, expected);
@@ -702,15 +730,16 @@ void Memory::SetCurrentPageTable(Kernel::KProcess& process, u32 core_id) {
702 impl->SetCurrentPageTable(process, core_id); 730 impl->SetCurrentPageTable(process, core_id);
703} 731}
704 732
705void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { 733void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
734 Common::PhysicalAddress target) {
706 impl->MapMemoryRegion(page_table, base, size, target); 735 impl->MapMemoryRegion(page_table, base, size, target);
707} 736}
708 737
709void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { 738void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) {
710 impl->UnmapRegion(page_table, base, size); 739 impl->UnmapRegion(page_table, base, size);
711} 740}
712 741
713bool Memory::IsValidVirtualAddress(const VAddr vaddr) const { 742bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const {
714 const Kernel::KProcess& process = *system.ApplicationProcess(); 743 const Kernel::KProcess& process = *system.ApplicationProcess();
715 const auto& page_table = process.PageTable().PageTableImpl(); 744 const auto& page_table = process.PageTable().PageTableImpl();
716 const size_t page = vaddr >> YUZU_PAGEBITS; 745 const size_t page = vaddr >> YUZU_PAGEBITS;
@@ -722,9 +751,9 @@ bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
722 type == Common::PageType::DebugMemory; 751 type == Common::PageType::DebugMemory;
723} 752}
724 753
725bool Memory::IsValidVirtualAddressRange(VAddr base, u64 size) const { 754bool Memory::IsValidVirtualAddressRange(Common::ProcessAddress base, u64 size) const {
726 VAddr end = base + size; 755 Common::ProcessAddress end = base + size;
727 VAddr page = Common::AlignDown(base, YUZU_PAGESIZE); 756 Common::ProcessAddress page = Common::AlignDown(GetInteger(base), YUZU_PAGESIZE);
728 757
729 for (; page < end; page += YUZU_PAGESIZE) { 758 for (; page < end; page += YUZU_PAGESIZE) {
730 if (!IsValidVirtualAddress(page)) { 759 if (!IsValidVirtualAddress(page)) {
@@ -735,131 +764,135 @@ bool Memory::IsValidVirtualAddressRange(VAddr base, u64 size) const {
735 return true; 764 return true;
736} 765}
737 766
738u8* Memory::GetPointer(VAddr vaddr) { 767u8* Memory::GetPointer(Common::ProcessAddress vaddr) {
739 return impl->GetPointer(vaddr); 768 return impl->GetPointer(vaddr);
740} 769}
741 770
742u8* Memory::GetPointerSilent(VAddr vaddr) { 771u8* Memory::GetPointerSilent(Common::ProcessAddress vaddr) {
743 return impl->GetPointerSilent(vaddr); 772 return impl->GetPointerSilent(vaddr);
744} 773}
745 774
746const u8* Memory::GetPointer(VAddr vaddr) const { 775const u8* Memory::GetPointer(Common::ProcessAddress vaddr) const {
747 return impl->GetPointer(vaddr); 776 return impl->GetPointer(vaddr);
748} 777}
749 778
750u8 Memory::Read8(const VAddr addr) { 779u8 Memory::Read8(const Common::ProcessAddress addr) {
751 return impl->Read8(addr); 780 return impl->Read8(addr);
752} 781}
753 782
754u16 Memory::Read16(const VAddr addr) { 783u16 Memory::Read16(const Common::ProcessAddress addr) {
755 return impl->Read16(addr); 784 return impl->Read16(addr);
756} 785}
757 786
758u32 Memory::Read32(const VAddr addr) { 787u32 Memory::Read32(const Common::ProcessAddress addr) {
759 return impl->Read32(addr); 788 return impl->Read32(addr);
760} 789}
761 790
762u64 Memory::Read64(const VAddr addr) { 791u64 Memory::Read64(const Common::ProcessAddress addr) {
763 return impl->Read64(addr); 792 return impl->Read64(addr);
764} 793}
765 794
766void Memory::Write8(VAddr addr, u8 data) { 795void Memory::Write8(Common::ProcessAddress addr, u8 data) {
767 impl->Write8(addr, data); 796 impl->Write8(addr, data);
768} 797}
769 798
770void Memory::Write16(VAddr addr, u16 data) { 799void Memory::Write16(Common::ProcessAddress addr, u16 data) {
771 impl->Write16(addr, data); 800 impl->Write16(addr, data);
772} 801}
773 802
774void Memory::Write32(VAddr addr, u32 data) { 803void Memory::Write32(Common::ProcessAddress addr, u32 data) {
775 impl->Write32(addr, data); 804 impl->Write32(addr, data);
776} 805}
777 806
778void Memory::Write64(VAddr addr, u64 data) { 807void Memory::Write64(Common::ProcessAddress addr, u64 data) {
779 impl->Write64(addr, data); 808 impl->Write64(addr, data);
780} 809}
781 810
782bool Memory::WriteExclusive8(VAddr addr, u8 data, u8 expected) { 811bool Memory::WriteExclusive8(Common::ProcessAddress addr, u8 data, u8 expected) {
783 return impl->WriteExclusive8(addr, data, expected); 812 return impl->WriteExclusive8(addr, data, expected);
784} 813}
785 814
786bool Memory::WriteExclusive16(VAddr addr, u16 data, u16 expected) { 815bool Memory::WriteExclusive16(Common::ProcessAddress addr, u16 data, u16 expected) {
787 return impl->WriteExclusive16(addr, data, expected); 816 return impl->WriteExclusive16(addr, data, expected);
788} 817}
789 818
790bool Memory::WriteExclusive32(VAddr addr, u32 data, u32 expected) { 819bool Memory::WriteExclusive32(Common::ProcessAddress addr, u32 data, u32 expected) {
791 return impl->WriteExclusive32(addr, data, expected); 820 return impl->WriteExclusive32(addr, data, expected);
792} 821}
793 822
794bool Memory::WriteExclusive64(VAddr addr, u64 data, u64 expected) { 823bool Memory::WriteExclusive64(Common::ProcessAddress addr, u64 data, u64 expected) {
795 return impl->WriteExclusive64(addr, data, expected); 824 return impl->WriteExclusive64(addr, data, expected);
796} 825}
797 826
798bool Memory::WriteExclusive128(VAddr addr, u128 data, u128 expected) { 827bool Memory::WriteExclusive128(Common::ProcessAddress addr, u128 data, u128 expected) {
799 return impl->WriteExclusive128(addr, data, expected); 828 return impl->WriteExclusive128(addr, data, expected);
800} 829}
801 830
802std::string Memory::ReadCString(VAddr vaddr, std::size_t max_length) { 831std::string Memory::ReadCString(Common::ProcessAddress vaddr, std::size_t max_length) {
803 return impl->ReadCString(vaddr, max_length); 832 return impl->ReadCString(vaddr, max_length);
804} 833}
805 834
806void Memory::ReadBlock(const Kernel::KProcess& process, const VAddr src_addr, void* dest_buffer, 835void Memory::ReadBlock(const Kernel::KProcess& process, const Common::ProcessAddress src_addr,
807 const std::size_t size) { 836 void* dest_buffer, const std::size_t size) {
808 impl->ReadBlockImpl<false>(process, src_addr, dest_buffer, size); 837 impl->ReadBlockImpl<false>(process, src_addr, dest_buffer, size);
809} 838}
810 839
811void Memory::ReadBlock(const VAddr src_addr, void* dest_buffer, const std::size_t size) { 840void Memory::ReadBlock(const Common::ProcessAddress src_addr, void* dest_buffer,
841 const std::size_t size) {
812 impl->ReadBlock(src_addr, dest_buffer, size); 842 impl->ReadBlock(src_addr, dest_buffer, size);
813} 843}
814 844
815void Memory::ReadBlockUnsafe(const VAddr src_addr, void* dest_buffer, const std::size_t size) { 845void Memory::ReadBlockUnsafe(const Common::ProcessAddress src_addr, void* dest_buffer,
846 const std::size_t size) {
816 impl->ReadBlockUnsafe(src_addr, dest_buffer, size); 847 impl->ReadBlockUnsafe(src_addr, dest_buffer, size);
817} 848}
818 849
819void Memory::WriteBlock(const Kernel::KProcess& process, VAddr dest_addr, const void* src_buffer, 850void Memory::WriteBlock(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
820 std::size_t size) { 851 const void* src_buffer, std::size_t size) {
821 impl->WriteBlockImpl<false>(process, dest_addr, src_buffer, size); 852 impl->WriteBlockImpl<false>(process, dest_addr, src_buffer, size);
822} 853}
823 854
824void Memory::WriteBlock(const VAddr dest_addr, const void* src_buffer, const std::size_t size) { 855void Memory::WriteBlock(const Common::ProcessAddress dest_addr, const void* src_buffer,
856 const std::size_t size) {
825 impl->WriteBlock(dest_addr, src_buffer, size); 857 impl->WriteBlock(dest_addr, src_buffer, size);
826} 858}
827 859
828void Memory::WriteBlockUnsafe(const VAddr dest_addr, const void* src_buffer, 860void Memory::WriteBlockUnsafe(const Common::ProcessAddress dest_addr, const void* src_buffer,
829 const std::size_t size) { 861 const std::size_t size) {
830 impl->WriteBlockUnsafe(dest_addr, src_buffer, size); 862 impl->WriteBlockUnsafe(dest_addr, src_buffer, size);
831} 863}
832 864
833void Memory::CopyBlock(const Kernel::KProcess& process, VAddr dest_addr, VAddr src_addr, 865void Memory::CopyBlock(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
834 const std::size_t size) { 866 Common::ProcessAddress src_addr, const std::size_t size) {
835 impl->CopyBlock(process, dest_addr, src_addr, size); 867 impl->CopyBlock(process, dest_addr, src_addr, size);
836} 868}
837 869
838void Memory::ZeroBlock(const Kernel::KProcess& process, VAddr dest_addr, const std::size_t size) { 870void Memory::ZeroBlock(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
871 const std::size_t size) {
839 impl->ZeroBlock(process, dest_addr, size); 872 impl->ZeroBlock(process, dest_addr, size);
840} 873}
841 874
842Result Memory::InvalidateDataCache(const Kernel::KProcess& process, VAddr dest_addr, 875Result Memory::InvalidateDataCache(const Kernel::KProcess& process,
843 const std::size_t size) { 876 Common::ProcessAddress dest_addr, const std::size_t size) {
844 return impl->InvalidateDataCache(process, dest_addr, size); 877 return impl->InvalidateDataCache(process, dest_addr, size);
845} 878}
846 879
847Result Memory::StoreDataCache(const Kernel::KProcess& process, VAddr dest_addr, 880Result Memory::StoreDataCache(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
848 const std::size_t size) { 881 const std::size_t size) {
849 return impl->StoreDataCache(process, dest_addr, size); 882 return impl->StoreDataCache(process, dest_addr, size);
850} 883}
851 884
852Result Memory::FlushDataCache(const Kernel::KProcess& process, VAddr dest_addr, 885Result Memory::FlushDataCache(const Kernel::KProcess& process, Common::ProcessAddress dest_addr,
853 const std::size_t size) { 886 const std::size_t size) {
854 return impl->FlushDataCache(process, dest_addr, size); 887 return impl->FlushDataCache(process, dest_addr, size);
855} 888}
856 889
857void Memory::RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) { 890void Memory::RasterizerMarkRegionCached(Common::ProcessAddress vaddr, u64 size, bool cached) {
858 impl->RasterizerMarkRegionCached(vaddr, size, cached); 891 impl->RasterizerMarkRegionCached(GetInteger(vaddr), size, cached);
859} 892}
860 893
861void Memory::MarkRegionDebug(VAddr vaddr, u64 size, bool debug) { 894void Memory::MarkRegionDebug(Common::ProcessAddress vaddr, u64 size, bool debug) {
862 impl->MarkRegionDebug(vaddr, size, debug); 895 impl->MarkRegionDebug(GetInteger(vaddr), size, debug);
863} 896}
864 897
865} // namespace Core::Memory 898} // namespace Core::Memory