summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar ReinUsesLisp2020-04-26 19:53:02 -0300
committerGravatar ReinUsesLisp2020-04-26 19:53:02 -0300
commit8da16cf9fb6c2133faed1164a8573992e84a4297 (patch)
treea0d14c5ec0955d35cbc22b1670eadcdbdbe5ba31 /src
parentMerge pull request #3795 from vitor-k/fix-folder (diff)
downloadyuzu-8da16cf9fb6c2133faed1164a8573992e84a4297.tar.gz
yuzu-8da16cf9fb6c2133faed1164a8573992e84a4297.tar.xz
yuzu-8da16cf9fb6c2133faed1164a8573992e84a4297.zip
texture_cache: Reintroduce preserve_contents accurately
This reverts commit 94b0e2e5dae4e0bd0021ac2d8fe1ff904a93ee69. preserve_contents proved to be a meaningful optimization. This commit reintroduces it but properly implemented on OpenGL. We have to make sure the clear removes all the previous contents of the image. It's not currently implemented on Vulkan because we can do smart things there that's preferred to be introduced in a separate commit.
Diffstat (limited to 'src')
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp49
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h3
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp4
-rw-r--r--src/video_core/texture_cache/texture_cache.h66
4 files changed, 81 insertions, 41 deletions
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 6fe155bcc..69a74449c 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -348,7 +348,7 @@ void RasterizerOpenGL::ConfigureFramebuffers() {
348 348
349 texture_cache.GuardRenderTargets(true); 349 texture_cache.GuardRenderTargets(true);
350 350
351 View depth_surface = texture_cache.GetDepthBufferSurface(); 351 View depth_surface = texture_cache.GetDepthBufferSurface(true);
352 352
353 const auto& regs = gpu.regs; 353 const auto& regs = gpu.regs;
354 UNIMPLEMENTED_IF(regs.rt_separate_frag_data == 0); 354 UNIMPLEMENTED_IF(regs.rt_separate_frag_data == 0);
@@ -357,7 +357,7 @@ void RasterizerOpenGL::ConfigureFramebuffers() {
357 FramebufferCacheKey key; 357 FramebufferCacheKey key;
358 const auto colors_count = static_cast<std::size_t>(regs.rt_control.count); 358 const auto colors_count = static_cast<std::size_t>(regs.rt_control.count);
359 for (std::size_t index = 0; index < colors_count; ++index) { 359 for (std::size_t index = 0; index < colors_count; ++index) {
360 View color_surface{texture_cache.GetColorBufferSurface(index)}; 360 View color_surface{texture_cache.GetColorBufferSurface(index, true)};
361 if (!color_surface) { 361 if (!color_surface) {
362 continue; 362 continue;
363 } 363 }
@@ -381,28 +381,52 @@ void RasterizerOpenGL::ConfigureFramebuffers() {
381 glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_cache.GetFramebuffer(key)); 381 glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_cache.GetFramebuffer(key));
382} 382}
383 383
384void RasterizerOpenGL::ConfigureClearFramebuffer(bool using_color_fb, bool using_depth_fb, 384void RasterizerOpenGL::ConfigureClearFramebuffer(bool using_color, bool using_depth_stencil) {
385 bool using_stencil_fb) {
386 auto& gpu = system.GPU().Maxwell3D(); 385 auto& gpu = system.GPU().Maxwell3D();
387 const auto& regs = gpu.regs; 386 const auto& regs = gpu.regs;
388 387
389 texture_cache.GuardRenderTargets(true); 388 texture_cache.GuardRenderTargets(true);
390 View color_surface; 389 View color_surface;
391 if (using_color_fb) { 390
391 if (using_color) {
392 // Determine if we have to preserve the contents.
393 // First we have to make sure all clear masks are enabled.
394 bool preserve_contents = !regs.clear_buffers.R || !regs.clear_buffers.G ||
395 !regs.clear_buffers.B || !regs.clear_buffers.A;
392 const std::size_t index = regs.clear_buffers.RT; 396 const std::size_t index = regs.clear_buffers.RT;
393 color_surface = texture_cache.GetColorBufferSurface(index); 397 if (regs.clear_flags.scissor) {
398 // Then we have to confirm scissor testing clears the whole image.
399 const auto& scissor = regs.scissor_test[0];
400 preserve_contents |= scissor.min_x > 0;
401 preserve_contents |= scissor.min_y > 0;
402 preserve_contents |= scissor.max_x < regs.rt[index].width;
403 preserve_contents |= scissor.max_y < regs.rt[index].height;
404 }
405
406 color_surface = texture_cache.GetColorBufferSurface(index, preserve_contents);
394 texture_cache.MarkColorBufferInUse(index); 407 texture_cache.MarkColorBufferInUse(index);
395 } 408 }
409
396 View depth_surface; 410 View depth_surface;
397 if (using_depth_fb || using_stencil_fb) { 411 if (using_depth_stencil) {
398 depth_surface = texture_cache.GetDepthBufferSurface(); 412 bool preserve_contents = false;
413 if (regs.clear_flags.scissor) {
414 // For depth stencil clears we only have to confirm scissor test covers the whole image.
415 const auto& scissor = regs.scissor_test[0];
416 preserve_contents |= scissor.min_x > 0;
417 preserve_contents |= scissor.min_y > 0;
418 preserve_contents |= scissor.max_x < regs.zeta_width;
419 preserve_contents |= scissor.max_y < regs.zeta_height;
420 }
421
422 depth_surface = texture_cache.GetDepthBufferSurface(preserve_contents);
399 texture_cache.MarkDepthBufferInUse(); 423 texture_cache.MarkDepthBufferInUse();
400 } 424 }
401 texture_cache.GuardRenderTargets(false); 425 texture_cache.GuardRenderTargets(false);
402 426
403 FramebufferCacheKey key; 427 FramebufferCacheKey key;
404 key.colors[0] = color_surface; 428 key.colors[0] = std::move(color_surface);
405 key.zeta = depth_surface; 429 key.zeta = std::move(depth_surface);
406 430
407 state_tracker.NotifyFramebuffer(); 431 state_tracker.NotifyFramebuffer();
408 glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_cache.GetFramebuffer(key)); 432 glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_cache.GetFramebuffer(key));
@@ -422,8 +446,7 @@ void RasterizerOpenGL::Clear() {
422 if (regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B || 446 if (regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B ||
423 regs.clear_buffers.A) { 447 regs.clear_buffers.A) {
424 use_color = true; 448 use_color = true;
425 } 449
426 if (use_color) {
427 state_tracker.NotifyColorMask0(); 450 state_tracker.NotifyColorMask0();
428 glColorMaski(0, regs.clear_buffers.R != 0, regs.clear_buffers.G != 0, 451 glColorMaski(0, regs.clear_buffers.R != 0, regs.clear_buffers.G != 0,
429 regs.clear_buffers.B != 0, regs.clear_buffers.A != 0); 452 regs.clear_buffers.B != 0, regs.clear_buffers.A != 0);
@@ -461,7 +484,7 @@ void RasterizerOpenGL::Clear() {
461 484
462 UNIMPLEMENTED_IF(regs.clear_flags.viewport); 485 UNIMPLEMENTED_IF(regs.clear_flags.viewport);
463 486
464 ConfigureClearFramebuffer(use_color, use_depth, use_stencil); 487 ConfigureClearFramebuffer(use_color, use_depth || use_stencil);
465 488
466 if (use_color) { 489 if (use_color) {
467 glClearBufferfv(GL_COLOR, 0, regs.clear_color); 490 glClearBufferfv(GL_COLOR, 0, regs.clear_color);
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index ebd2173eb..87249fb6f 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -95,7 +95,8 @@ private:
95 /// Configures the color and depth framebuffer states. 95 /// Configures the color and depth framebuffer states.
96 void ConfigureFramebuffers(); 96 void ConfigureFramebuffers();
97 97
98 void ConfigureClearFramebuffer(bool using_color_fb, bool using_depth_fb, bool using_stencil_fb); 98 /// Configures the color and depth framebuffer for clearing.
99 void ConfigureClearFramebuffer(bool using_color, bool using_depth_stencil);
99 100
100 /// Configures the current constbuffers to use for the draw command. 101 /// Configures the current constbuffers to use for the draw command.
101 void SetupDrawConstBuffers(std::size_t stage_index, const Shader& shader); 102 void SetupDrawConstBuffers(std::size_t stage_index, const Shader& shader);
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 68464e637..51c3b0f77 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -652,7 +652,7 @@ RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments() {
652 Texceptions texceptions; 652 Texceptions texceptions;
653 for (std::size_t rt = 0; rt < Maxwell::NumRenderTargets; ++rt) { 653 for (std::size_t rt = 0; rt < Maxwell::NumRenderTargets; ++rt) {
654 if (update_rendertargets) { 654 if (update_rendertargets) {
655 color_attachments[rt] = texture_cache.GetColorBufferSurface(rt); 655 color_attachments[rt] = texture_cache.GetColorBufferSurface(rt, true);
656 } 656 }
657 if (color_attachments[rt] && WalkAttachmentOverlaps(*color_attachments[rt])) { 657 if (color_attachments[rt] && WalkAttachmentOverlaps(*color_attachments[rt])) {
658 texceptions[rt] = true; 658 texceptions[rt] = true;
@@ -660,7 +660,7 @@ RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments() {
660 } 660 }
661 661
662 if (update_rendertargets) { 662 if (update_rendertargets) {
663 zeta_attachment = texture_cache.GetDepthBufferSurface(); 663 zeta_attachment = texture_cache.GetDepthBufferSurface(true);
664 } 664 }
665 if (zeta_attachment && WalkAttachmentOverlaps(*zeta_attachment)) { 665 if (zeta_attachment && WalkAttachmentOverlaps(*zeta_attachment)) {
666 texceptions[ZETA_TEXCEPTION_INDEX] = true; 666 texceptions[ZETA_TEXCEPTION_INDEX] = true;
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index cf6bd005a..d2d2846e6 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -143,7 +143,7 @@ public:
143 } 143 }
144 144
145 const auto params{SurfaceParams::CreateForTexture(format_lookup_table, tic, entry)}; 145 const auto params{SurfaceParams::CreateForTexture(format_lookup_table, tic, entry)};
146 const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, false); 146 const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, true, false);
147 if (guard_samplers) { 147 if (guard_samplers) {
148 sampled_textures.push_back(surface); 148 sampled_textures.push_back(surface);
149 } 149 }
@@ -163,7 +163,7 @@ public:
163 return GetNullSurface(SurfaceParams::ExpectedTarget(entry)); 163 return GetNullSurface(SurfaceParams::ExpectedTarget(entry));
164 } 164 }
165 const auto params{SurfaceParams::CreateForImage(format_lookup_table, tic, entry)}; 165 const auto params{SurfaceParams::CreateForImage(format_lookup_table, tic, entry)};
166 const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, false); 166 const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, true, false);
167 if (guard_samplers) { 167 if (guard_samplers) {
168 sampled_textures.push_back(surface); 168 sampled_textures.push_back(surface);
169 } 169 }
@@ -178,7 +178,7 @@ public:
178 return any_rt; 178 return any_rt;
179 } 179 }
180 180
181 TView GetDepthBufferSurface() { 181 TView GetDepthBufferSurface(bool preserve_contents) {
182 std::lock_guard lock{mutex}; 182 std::lock_guard lock{mutex};
183 auto& maxwell3d = system.GPU().Maxwell3D(); 183 auto& maxwell3d = system.GPU().Maxwell3D();
184 if (!maxwell3d.dirty.flags[VideoCommon::Dirty::ZetaBuffer]) { 184 if (!maxwell3d.dirty.flags[VideoCommon::Dirty::ZetaBuffer]) {
@@ -199,7 +199,7 @@ public:
199 return {}; 199 return {};
200 } 200 }
201 const auto depth_params{SurfaceParams::CreateForDepthBuffer(system)}; 201 const auto depth_params{SurfaceParams::CreateForDepthBuffer(system)};
202 auto surface_view = GetSurface(gpu_addr, *cpu_addr, depth_params, true); 202 auto surface_view = GetSurface(gpu_addr, *cpu_addr, depth_params, preserve_contents, true);
203 if (depth_buffer.target) 203 if (depth_buffer.target)
204 depth_buffer.target->MarkAsRenderTarget(false, NO_RT); 204 depth_buffer.target->MarkAsRenderTarget(false, NO_RT);
205 depth_buffer.target = surface_view.first; 205 depth_buffer.target = surface_view.first;
@@ -209,7 +209,7 @@ public:
209 return surface_view.second; 209 return surface_view.second;
210 } 210 }
211 211
212 TView GetColorBufferSurface(std::size_t index) { 212 TView GetColorBufferSurface(std::size_t index, bool preserve_contents) {
213 std::lock_guard lock{mutex}; 213 std::lock_guard lock{mutex};
214 ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets); 214 ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets);
215 auto& maxwell3d = system.GPU().Maxwell3D(); 215 auto& maxwell3d = system.GPU().Maxwell3D();
@@ -239,8 +239,9 @@ public:
239 return {}; 239 return {};
240 } 240 }
241 241
242 auto surface_view = GetSurface(gpu_addr, *cpu_addr, 242 auto surface_view =
243 SurfaceParams::CreateForFramebuffer(system, index), true); 243 GetSurface(gpu_addr, *cpu_addr, SurfaceParams::CreateForFramebuffer(system, index),
244 preserve_contents, true);
244 if (render_targets[index].target) { 245 if (render_targets[index].target) {
245 auto& surface = render_targets[index].target; 246 auto& surface = render_targets[index].target;
246 surface->MarkAsRenderTarget(false, NO_RT); 247 surface->MarkAsRenderTarget(false, NO_RT);
@@ -300,9 +301,9 @@ public:
300 const std::optional<VAddr> src_cpu_addr = 301 const std::optional<VAddr> src_cpu_addr =
301 system.GPU().MemoryManager().GpuToCpuAddress(src_gpu_addr); 302 system.GPU().MemoryManager().GpuToCpuAddress(src_gpu_addr);
302 std::pair<TSurface, TView> dst_surface = 303 std::pair<TSurface, TView> dst_surface =
303 GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, false); 304 GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, true, false);
304 std::pair<TSurface, TView> src_surface = 305 std::pair<TSurface, TView> src_surface =
305 GetSurface(src_gpu_addr, *src_cpu_addr, src_params, false); 306 GetSurface(src_gpu_addr, *src_cpu_addr, src_params, true, false);
306 ImageBlit(src_surface.second, dst_surface.second, copy_config); 307 ImageBlit(src_surface.second, dst_surface.second, copy_config);
307 dst_surface.first->MarkAsModified(true, Tick()); 308 dst_surface.first->MarkAsModified(true, Tick());
308 } 309 }
@@ -532,18 +533,22 @@ private:
532 * @param overlaps The overlapping surfaces registered in the cache. 533 * @param overlaps The overlapping surfaces registered in the cache.
533 * @param params The parameters for the new surface. 534 * @param params The parameters for the new surface.
534 * @param gpu_addr The starting address of the new surface. 535 * @param gpu_addr The starting address of the new surface.
536 * @param preserve_contents Indicates that the new surface should be loaded from memory or left
537 * blank.
535 * @param untopological Indicates to the recycler that the texture has no way to match the 538 * @param untopological Indicates to the recycler that the texture has no way to match the
536 * overlaps due to topological reasons. 539 * overlaps due to topological reasons.
537 **/ 540 **/
538 std::pair<TSurface, TView> RecycleSurface(std::vector<TSurface>& overlaps, 541 std::pair<TSurface, TView> RecycleSurface(std::vector<TSurface>& overlaps,
539 const SurfaceParams& params, const GPUVAddr gpu_addr, 542 const SurfaceParams& params, const GPUVAddr gpu_addr,
543 const bool preserve_contents,
540 const MatchTopologyResult untopological) { 544 const MatchTopologyResult untopological) {
545 const bool do_load = preserve_contents && Settings::IsGPULevelExtreme();
541 for (auto& surface : overlaps) { 546 for (auto& surface : overlaps) {
542 Unregister(surface); 547 Unregister(surface);
543 } 548 }
544 switch (PickStrategy(overlaps, params, gpu_addr, untopological)) { 549 switch (PickStrategy(overlaps, params, gpu_addr, untopological)) {
545 case RecycleStrategy::Ignore: { 550 case RecycleStrategy::Ignore: {
546 return InitializeSurface(gpu_addr, params, Settings::IsGPULevelExtreme()); 551 return InitializeSurface(gpu_addr, params, do_load);
547 } 552 }
548 case RecycleStrategy::Flush: { 553 case RecycleStrategy::Flush: {
549 std::sort(overlaps.begin(), overlaps.end(), 554 std::sort(overlaps.begin(), overlaps.end(),
@@ -553,7 +558,7 @@ private:
553 for (auto& surface : overlaps) { 558 for (auto& surface : overlaps) {
554 FlushSurface(surface); 559 FlushSurface(surface);
555 } 560 }
556 return InitializeSurface(gpu_addr, params); 561 return InitializeSurface(gpu_addr, params, preserve_contents);
557 } 562 }
558 case RecycleStrategy::BufferCopy: { 563 case RecycleStrategy::BufferCopy: {
559 auto new_surface = GetUncachedSurface(gpu_addr, params); 564 auto new_surface = GetUncachedSurface(gpu_addr, params);
@@ -562,7 +567,7 @@ private:
562 } 567 }
563 default: { 568 default: {
564 UNIMPLEMENTED_MSG("Unimplemented Texture Cache Recycling Strategy!"); 569 UNIMPLEMENTED_MSG("Unimplemented Texture Cache Recycling Strategy!");
565 return InitializeSurface(gpu_addr, params); 570 return InitializeSurface(gpu_addr, params, do_load);
566 } 571 }
567 } 572 }
568 } 573 }
@@ -700,11 +705,14 @@ private:
700 * @param params The parameters on the new surface. 705 * @param params The parameters on the new surface.
701 * @param gpu_addr The starting address of the new surface. 706 * @param gpu_addr The starting address of the new surface.
702 * @param cpu_addr The starting address of the new surface on physical memory. 707 * @param cpu_addr The starting address of the new surface on physical memory.
708 * @param preserve_contents Indicates that the new surface should be loaded from memory or
709 * left blank.
703 */ 710 */
704 std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps, 711 std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps,
705 const SurfaceParams& params, 712 const SurfaceParams& params,
706 const GPUVAddr gpu_addr, 713 const GPUVAddr gpu_addr,
707 const VAddr cpu_addr) { 714 const VAddr cpu_addr,
715 bool preserve_contents) {
708 if (params.target == SurfaceTarget::Texture3D) { 716 if (params.target == SurfaceTarget::Texture3D) {
709 bool failed = false; 717 bool failed = false;
710 if (params.num_levels > 1) { 718 if (params.num_levels > 1) {
@@ -754,7 +762,7 @@ private:
754 return std::nullopt; 762 return std::nullopt;
755 } 763 }
756 Unregister(surface); 764 Unregister(surface);
757 return InitializeSurface(gpu_addr, params); 765 return InitializeSurface(gpu_addr, params, preserve_contents);
758 } 766 }
759 return std::nullopt; 767 return std::nullopt;
760 } 768 }
@@ -765,7 +773,7 @@ private:
765 return {{surface, surface->GetMainView()}}; 773 return {{surface, surface->GetMainView()}};
766 } 774 }
767 } 775 }
768 return InitializeSurface(gpu_addr, params); 776 return InitializeSurface(gpu_addr, params, preserve_contents);
769 } 777 }
770 } 778 }
771 779
@@ -788,10 +796,13 @@ private:
788 * 796 *
789 * @param gpu_addr The starting address of the candidate surface. 797 * @param gpu_addr The starting address of the candidate surface.
790 * @param params The parameters on the candidate surface. 798 * @param params The parameters on the candidate surface.
799 * @param preserve_contents Indicates that the new surface should be loaded from memory or
800 * left blank.
791 * @param is_render Whether or not the surface is a render target. 801 * @param is_render Whether or not the surface is a render target.
792 **/ 802 **/
793 std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const VAddr cpu_addr, 803 std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const VAddr cpu_addr,
794 const SurfaceParams& params, bool is_render) { 804 const SurfaceParams& params, bool preserve_contents,
805 bool is_render) {
795 // Step 1 806 // Step 1
796 // Check Level 1 Cache for a fast structural match. If candidate surface 807 // Check Level 1 Cache for a fast structural match. If candidate surface
797 // matches at certain level we are pretty much done. 808 // matches at certain level we are pretty much done.
@@ -800,7 +811,8 @@ private:
800 const auto topological_result = current_surface->MatchesTopology(params); 811 const auto topological_result = current_surface->MatchesTopology(params);
801 if (topological_result != MatchTopologyResult::FullMatch) { 812 if (topological_result != MatchTopologyResult::FullMatch) {
802 std::vector<TSurface> overlaps{current_surface}; 813 std::vector<TSurface> overlaps{current_surface};
803 return RecycleSurface(overlaps, params, gpu_addr, topological_result); 814 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
815 topological_result);
804 } 816 }
805 817
806 const auto struct_result = current_surface->MatchesStructure(params); 818 const auto struct_result = current_surface->MatchesStructure(params);
@@ -825,7 +837,7 @@ private:
825 837
826 // If none are found, we are done. we just load the surface and create it. 838 // If none are found, we are done. we just load the surface and create it.
827 if (overlaps.empty()) { 839 if (overlaps.empty()) {
828 return InitializeSurface(gpu_addr, params); 840 return InitializeSurface(gpu_addr, params, preserve_contents);
829 } 841 }
830 842
831 // Step 3 843 // Step 3
@@ -835,13 +847,15 @@ private:
835 for (const auto& surface : overlaps) { 847 for (const auto& surface : overlaps) {
836 const auto topological_result = surface->MatchesTopology(params); 848 const auto topological_result = surface->MatchesTopology(params);
837 if (topological_result != MatchTopologyResult::FullMatch) { 849 if (topological_result != MatchTopologyResult::FullMatch) {
838 return RecycleSurface(overlaps, params, gpu_addr, topological_result); 850 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
851 topological_result);
839 } 852 }
840 } 853 }
841 854
842 // Check if it's a 3D texture 855 // Check if it's a 3D texture
843 if (params.block_depth > 0) { 856 if (params.block_depth > 0) {
844 auto surface = Manage3DSurfaces(overlaps, params, gpu_addr, cpu_addr); 857 auto surface =
858 Manage3DSurfaces(overlaps, params, gpu_addr, cpu_addr, preserve_contents);
845 if (surface) { 859 if (surface) {
846 return *surface; 860 return *surface;
847 } 861 }
@@ -861,7 +875,8 @@ private:
861 return *view; 875 return *view;
862 } 876 }
863 } 877 }
864 return RecycleSurface(overlaps, params, gpu_addr, MatchTopologyResult::FullMatch); 878 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
879 MatchTopologyResult::FullMatch);
865 } 880 }
866 // Now we check if the candidate is a mipmap/layer of the overlap 881 // Now we check if the candidate is a mipmap/layer of the overlap
867 std::optional<TView> view = 882 std::optional<TView> view =
@@ -885,7 +900,7 @@ private:
885 pair.first->EmplaceView(params, gpu_addr, candidate_size); 900 pair.first->EmplaceView(params, gpu_addr, candidate_size);
886 if (mirage_view) 901 if (mirage_view)
887 return {pair.first, *mirage_view}; 902 return {pair.first, *mirage_view};
888 return RecycleSurface(overlaps, params, gpu_addr, 903 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
889 MatchTopologyResult::FullMatch); 904 MatchTopologyResult::FullMatch);
890 } 905 }
891 return {current_surface, *view}; 906 return {current_surface, *view};
@@ -901,7 +916,8 @@ private:
901 } 916 }
902 } 917 }
903 // We failed all the tests, recycle the overlaps into a new texture. 918 // We failed all the tests, recycle the overlaps into a new texture.
904 return RecycleSurface(overlaps, params, gpu_addr, MatchTopologyResult::FullMatch); 919 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
920 MatchTopologyResult::FullMatch);
905 } 921 }
906 922
907 /** 923 /**
@@ -1059,10 +1075,10 @@ private:
1059 } 1075 }
1060 1076
1061 std::pair<TSurface, TView> InitializeSurface(GPUVAddr gpu_addr, const SurfaceParams& params, 1077 std::pair<TSurface, TView> InitializeSurface(GPUVAddr gpu_addr, const SurfaceParams& params,
1062 bool do_load = true) { 1078 bool preserve_contents) {
1063 auto new_surface{GetUncachedSurface(gpu_addr, params)}; 1079 auto new_surface{GetUncachedSurface(gpu_addr, params)};
1064 Register(new_surface); 1080 Register(new_surface);
1065 if (do_load) { 1081 if (preserve_contents) {
1066 LoadSurface(new_surface); 1082 LoadSurface(new_surface);
1067 } 1083 }
1068 return {new_surface, new_surface->GetMainView()}; 1084 return {new_surface, new_surface->GetMainView()};