summaryrefslogtreecommitdiff
path: root/src/core/memory.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/memory.cpp')
-rw-r--r--src/core/memory.cpp51
1 files changed, 15 insertions, 36 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index bc34bfd6d..0e4e0157c 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -251,8 +251,8 @@ std::string ReadCString(VAddr vaddr, std::size_t max_length) {
251 return string; 251 return string;
252} 252}
253 253
254void RasterizerMarkRegionCached(Tegra::GPUVAddr gpu_addr, u64 size, bool cached) { 254void RasterizerMarkRegionCached(VAddr vaddr, u64 size, bool cached) {
255 if (gpu_addr == 0) { 255 if (vaddr == 0) {
256 return; 256 return;
257 } 257 }
258 258
@@ -261,19 +261,8 @@ void RasterizerMarkRegionCached(Tegra::GPUVAddr gpu_addr, u64 size, bool cached)
261 // CPU pages, hence why we iterate on a CPU page basis (note: GPU page size is different). This 261 // CPU pages, hence why we iterate on a CPU page basis (note: GPU page size is different). This
262 // assumes the specified GPU address region is contiguous as well. 262 // assumes the specified GPU address region is contiguous as well.
263 263
264 u64 num_pages = ((gpu_addr + size - 1) >> PAGE_BITS) - (gpu_addr >> PAGE_BITS) + 1; 264 u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
265 for (unsigned i = 0; i < num_pages; ++i, gpu_addr += PAGE_SIZE) { 265 for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
266 boost::optional<VAddr> maybe_vaddr =
267 Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(gpu_addr);
268 // The GPU <-> CPU virtual memory mapping is not 1:1
269 if (!maybe_vaddr) {
270 LOG_ERROR(HW_Memory,
271 "Trying to flush a cached region to an invalid physical address {:016X}",
272 gpu_addr);
273 continue;
274 }
275 VAddr vaddr = *maybe_vaddr;
276
277 PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS]; 266 PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
278 267
279 if (cached) { 268 if (cached) {
@@ -344,29 +333,19 @@ void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) {
344 333
345 const VAddr overlap_start = std::max(start, region_start); 334 const VAddr overlap_start = std::max(start, region_start);
346 const VAddr overlap_end = std::min(end, region_end); 335 const VAddr overlap_end = std::min(end, region_end);
347
348 const std::vector<Tegra::GPUVAddr> gpu_addresses =
349 system_instance.GPU().MemoryManager().CpuToGpuAddress(overlap_start);
350
351 if (gpu_addresses.empty()) {
352 return;
353 }
354
355 const u64 overlap_size = overlap_end - overlap_start; 336 const u64 overlap_size = overlap_end - overlap_start;
356 337
357 for (const auto& gpu_address : gpu_addresses) { 338 auto& rasterizer = system_instance.Renderer().Rasterizer();
358 auto& rasterizer = system_instance.Renderer().Rasterizer(); 339 switch (mode) {
359 switch (mode) { 340 case FlushMode::Flush:
360 case FlushMode::Flush: 341 rasterizer.FlushRegion(overlap_start, overlap_size);
361 rasterizer.FlushRegion(gpu_address, overlap_size); 342 break;
362 break; 343 case FlushMode::Invalidate:
363 case FlushMode::Invalidate: 344 rasterizer.InvalidateRegion(overlap_start, overlap_size);
364 rasterizer.InvalidateRegion(gpu_address, overlap_size); 345 break;
365 break; 346 case FlushMode::FlushAndInvalidate:
366 case FlushMode::FlushAndInvalidate: 347 rasterizer.FlushAndInvalidateRegion(overlap_start, overlap_size);
367 rasterizer.FlushAndInvalidateRegion(gpu_address, overlap_size); 348 break;
368 break;
369 }
370 } 349 }
371 }; 350 };
372 351