summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar Fernando Sahmkow2019-05-14 00:55:32 -0400
committerGravatar ReinUsesLisp2019-06-20 21:36:12 -0300
commitd267948a73d2364949660a24d07833ea05c9fcc8 (patch)
treef0bfd7e5bfdcb08be09733926e6b3a8c5ad31edf /src
parenttexture_cache: Document the most important methods. (diff)
downloadyuzu-d267948a73d2364949660a24d07833ea05c9fcc8.tar.gz
yuzu-d267948a73d2364949660a24d07833ea05c9fcc8.tar.xz
yuzu-d267948a73d2364949660a24d07833ea05c9fcc8.zip
texture_cache: loose TryReconstructSurface when accurate GPU is not on.
Also corrects some asserts.
Diffstat (limited to 'src')
-rw-r--r--src/video_core/engines/maxwell_dma.cpp2
-rw-r--r--src/video_core/texture_cache/surface_base.cpp2
-rw-r--r--src/video_core/texture_cache/texture_cache.h20
3 files changed, 20 insertions, 4 deletions
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index 3a5dfef0c..afb9578d0 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -111,7 +111,7 @@ void MaxwellDMA::HandleCopy() {
111 111
112 memory_manager.WriteBlock(dest, write_buffer.data(), dst_size); 112 memory_manager.WriteBlock(dest, write_buffer.data(), dst_size);
113 } else { 113 } else {
114 ASSERT(regs.dst_params.BlockDepth() == 1); 114 ASSERT(regs.dst_params.BlockDepth() == 0);
115 115
116 const u32 src_bytes_per_pixel = regs.src_pitch / regs.x_count; 116 const u32 src_bytes_per_pixel = regs.src_pitch / regs.x_count;
117 117
diff --git a/src/video_core/texture_cache/surface_base.cpp b/src/video_core/texture_cache/surface_base.cpp
index ceff51043..d4aa2c54b 100644
--- a/src/video_core/texture_cache/surface_base.cpp
+++ b/src/video_core/texture_cache/surface_base.cpp
@@ -130,7 +130,7 @@ void SurfaceBaseImpl::FlushBuffer(Tegra::MemoryManager& memory_manager,
130 return; 130 return;
131 } 131 }
132 if (params.is_tiled) { 132 if (params.is_tiled) {
133 ASSERT_MSG(params.block_width == 1, "Block width is defined as {}", params.block_width); 133 ASSERT_MSG(params.block_width == 0, "Block width is defined as {}", params.block_width);
134 for (u32 level = 0; level < params.num_levels; ++level) { 134 for (u32 level = 0; level < params.num_levels; ++level) {
135 const std::size_t host_offset{params.GetHostMipmapLevelOffset(level)}; 135 const std::size_t host_offset{params.GetHostMipmapLevelOffset(level)};
136 SwizzleFunc(MortonSwizzleMode::LinearToMorton, host_ptr, params, 136 SwizzleFunc(MortonSwizzleMode::LinearToMorton, host_ptr, params,
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 04e9528b8..85c9160e0 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -425,6 +425,7 @@ private:
425 } 425 }
426 bool modified = false; 426 bool modified = false;
427 TSurface new_surface = GetUncachedSurface(gpu_addr, params); 427 TSurface new_surface = GetUncachedSurface(gpu_addr, params);
428 u32 passed_tests = 0;
428 for (auto surface : overlaps) { 429 for (auto surface : overlaps) {
429 const SurfaceParams& src_params = surface->GetSurfaceParams(); 430 const SurfaceParams& src_params = surface->GetSurfaceParams();
430 if (src_params.is_layered || src_params.num_levels > 1) { 431 if (src_params.is_layered || src_params.num_levels > 1) {
@@ -434,12 +435,12 @@ private:
434 const std::size_t candidate_size = surface->GetSizeInBytes(); 435 const std::size_t candidate_size = surface->GetSizeInBytes();
435 auto mipmap_layer{new_surface->GetLayerMipmap(surface->GetGpuAddr())}; 436 auto mipmap_layer{new_surface->GetLayerMipmap(surface->GetGpuAddr())};
436 if (!mipmap_layer) { 437 if (!mipmap_layer) {
437 return {}; 438 continue;
438 } 439 }
439 const u32 layer{mipmap_layer->first}; 440 const u32 layer{mipmap_layer->first};
440 const u32 mipmap{mipmap_layer->second}; 441 const u32 mipmap{mipmap_layer->second};
441 if (new_surface->GetMipmapSize(mipmap) != candidate_size) { 442 if (new_surface->GetMipmapSize(mipmap) != candidate_size) {
442 return {}; 443 continue;
443 } 444 }
444 modified |= surface->IsModified(); 445 modified |= surface->IsModified();
445 // Now we got all the data set up 446 // Now we got all the data set up
@@ -448,8 +449,15 @@ private:
448 const CopyParams copy_params(0, 0, 0, 0, 0, layer, 0, mipmap, 449 const CopyParams copy_params(0, 0, 0, 0, 0, layer, 0, mipmap,
449 std::min(src_params.width, dst_width), 450 std::min(src_params.width, dst_width),
450 std::min(src_params.height, dst_height), 1); 451 std::min(src_params.height, dst_height), 1);
452 passed_tests++;
451 ImageCopy(surface, new_surface, copy_params); 453 ImageCopy(surface, new_surface, copy_params);
452 } 454 }
455 if (passed_tests == 0) {
456 return {};
457 // In Accurate GPU all test should pass, else we recycle
458 } else if (Settings::values.use_accurate_gpu_emulation && passed_tests != overlaps.size()) {
459 return {};
460 }
453 for (auto surface : overlaps) { 461 for (auto surface : overlaps) {
454 Unregister(surface); 462 Unregister(surface);
455 } 463 }
@@ -548,6 +556,14 @@ private:
548 } 556 }
549 return {current_surface, *view}; 557 return {current_surface, *view};
550 } 558 }
559 // The next case is unsafe, so if we r in accurate GPU, just skip it
560 if (Settings::values.use_accurate_gpu_emulation) {
561 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents, false);
562 }
563 // This is the case the texture is a part of the parent.
564 if (current_surface->MatchesSubTexture(params, gpu_addr)) {
565 return RebuildSurface(current_surface, params);
566 }
551 } else { 567 } else {
552 // If there are many overlaps, odds are they are subtextures of the candidate 568 // If there are many overlaps, odds are they are subtextures of the candidate
553 // surface. We try to construct a new surface based on the candidate parameters, 569 // surface. We try to construct a new surface based on the candidate parameters,