diff options
| author | 2020-04-11 01:50:58 -0300 | |
|---|---|---|
| committer | 2020-04-11 01:51:02 -0300 | |
| commit | 94b0e2e5dae4e0bd0021ac2d8fe1ff904a93ee69 (patch) | |
| tree | 4b5c1a289b4a871dcaec3863cf951388f9637189 /src | |
| parent | Merge pull request #3594 from ReinUsesLisp/vk-instance (diff) | |
| download | yuzu-94b0e2e5dae4e0bd0021ac2d8fe1ff904a93ee69.tar.gz yuzu-94b0e2e5dae4e0bd0021ac2d8fe1ff904a93ee69.tar.xz yuzu-94b0e2e5dae4e0bd0021ac2d8fe1ff904a93ee69.zip | |
texture_cache: Remove preserve_contents
preserve_contents was always true. We can't assume we don't have to
preserve clears because scissored and color masked clears exist.
This removes preserve_contents and assumes it as true at all times.
Diffstat (limited to '')
| -rw-r--r-- | src/video_core/renderer_opengl/gl_rasterizer.cpp | 8 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_rasterizer.cpp | 4 | ||||
| -rw-r--r-- | src/video_core/texture_cache/texture_cache.h | 66 |
3 files changed, 31 insertions, 47 deletions
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 368f399df..c6ff4e27f 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp | |||
| @@ -345,7 +345,7 @@ void RasterizerOpenGL::ConfigureFramebuffers() { | |||
| 345 | 345 | ||
| 346 | texture_cache.GuardRenderTargets(true); | 346 | texture_cache.GuardRenderTargets(true); |
| 347 | 347 | ||
| 348 | View depth_surface = texture_cache.GetDepthBufferSurface(true); | 348 | View depth_surface = texture_cache.GetDepthBufferSurface(); |
| 349 | 349 | ||
| 350 | const auto& regs = gpu.regs; | 350 | const auto& regs = gpu.regs; |
| 351 | UNIMPLEMENTED_IF(regs.rt_separate_frag_data == 0); | 351 | UNIMPLEMENTED_IF(regs.rt_separate_frag_data == 0); |
| @@ -354,7 +354,7 @@ void RasterizerOpenGL::ConfigureFramebuffers() { | |||
| 354 | FramebufferCacheKey key; | 354 | FramebufferCacheKey key; |
| 355 | const auto colors_count = static_cast<std::size_t>(regs.rt_control.count); | 355 | const auto colors_count = static_cast<std::size_t>(regs.rt_control.count); |
| 356 | for (std::size_t index = 0; index < colors_count; ++index) { | 356 | for (std::size_t index = 0; index < colors_count; ++index) { |
| 357 | View color_surface{texture_cache.GetColorBufferSurface(index, true)}; | 357 | View color_surface{texture_cache.GetColorBufferSurface(index)}; |
| 358 | if (!color_surface) { | 358 | if (!color_surface) { |
| 359 | continue; | 359 | continue; |
| 360 | } | 360 | } |
| @@ -387,12 +387,12 @@ void RasterizerOpenGL::ConfigureClearFramebuffer(bool using_color_fb, bool using | |||
| 387 | View color_surface; | 387 | View color_surface; |
| 388 | if (using_color_fb) { | 388 | if (using_color_fb) { |
| 389 | const std::size_t index = regs.clear_buffers.RT; | 389 | const std::size_t index = regs.clear_buffers.RT; |
| 390 | color_surface = texture_cache.GetColorBufferSurface(index, true); | 390 | color_surface = texture_cache.GetColorBufferSurface(index); |
| 391 | texture_cache.MarkColorBufferInUse(index); | 391 | texture_cache.MarkColorBufferInUse(index); |
| 392 | } | 392 | } |
| 393 | View depth_surface; | 393 | View depth_surface; |
| 394 | if (using_depth_fb || using_stencil_fb) { | 394 | if (using_depth_fb || using_stencil_fb) { |
| 395 | depth_surface = texture_cache.GetDepthBufferSurface(true); | 395 | depth_surface = texture_cache.GetDepthBufferSurface(); |
| 396 | texture_cache.MarkDepthBufferInUse(); | 396 | texture_cache.MarkDepthBufferInUse(); |
| 397 | } | 397 | } |
| 398 | texture_cache.GuardRenderTargets(false); | 398 | texture_cache.GuardRenderTargets(false); |
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 0a2ea4fd4..6b99cbbbc 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp | |||
| @@ -599,7 +599,7 @@ RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments() { | |||
| 599 | Texceptions texceptions; | 599 | Texceptions texceptions; |
| 600 | for (std::size_t rt = 0; rt < Maxwell::NumRenderTargets; ++rt) { | 600 | for (std::size_t rt = 0; rt < Maxwell::NumRenderTargets; ++rt) { |
| 601 | if (update_rendertargets) { | 601 | if (update_rendertargets) { |
| 602 | color_attachments[rt] = texture_cache.GetColorBufferSurface(rt, true); | 602 | color_attachments[rt] = texture_cache.GetColorBufferSurface(rt); |
| 603 | } | 603 | } |
| 604 | if (color_attachments[rt] && WalkAttachmentOverlaps(*color_attachments[rt])) { | 604 | if (color_attachments[rt] && WalkAttachmentOverlaps(*color_attachments[rt])) { |
| 605 | texceptions[rt] = true; | 605 | texceptions[rt] = true; |
| @@ -607,7 +607,7 @@ RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments() { | |||
| 607 | } | 607 | } |
| 608 | 608 | ||
| 609 | if (update_rendertargets) { | 609 | if (update_rendertargets) { |
| 610 | zeta_attachment = texture_cache.GetDepthBufferSurface(true); | 610 | zeta_attachment = texture_cache.GetDepthBufferSurface(); |
| 611 | } | 611 | } |
| 612 | if (zeta_attachment && WalkAttachmentOverlaps(*zeta_attachment)) { | 612 | if (zeta_attachment && WalkAttachmentOverlaps(*zeta_attachment)) { |
| 613 | texceptions[ZETA_TEXCEPTION_INDEX] = true; | 613 | texceptions[ZETA_TEXCEPTION_INDEX] = true; |
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index 88fe3e25f..cfc7fe6e9 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h | |||
| @@ -108,7 +108,7 @@ public: | |||
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | const auto params{SurfaceParams::CreateForTexture(format_lookup_table, tic, entry)}; | 110 | const auto params{SurfaceParams::CreateForTexture(format_lookup_table, tic, entry)}; |
| 111 | const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, true, false); | 111 | const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, false); |
| 112 | if (guard_samplers) { | 112 | if (guard_samplers) { |
| 113 | sampled_textures.push_back(surface); | 113 | sampled_textures.push_back(surface); |
| 114 | } | 114 | } |
| @@ -128,7 +128,7 @@ public: | |||
| 128 | return GetNullSurface(SurfaceParams::ExpectedTarget(entry)); | 128 | return GetNullSurface(SurfaceParams::ExpectedTarget(entry)); |
| 129 | } | 129 | } |
| 130 | const auto params{SurfaceParams::CreateForImage(format_lookup_table, tic, entry)}; | 130 | const auto params{SurfaceParams::CreateForImage(format_lookup_table, tic, entry)}; |
| 131 | const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, true, false); | 131 | const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, false); |
| 132 | if (guard_samplers) { | 132 | if (guard_samplers) { |
| 133 | sampled_textures.push_back(surface); | 133 | sampled_textures.push_back(surface); |
| 134 | } | 134 | } |
| @@ -143,7 +143,7 @@ public: | |||
| 143 | return any_rt; | 143 | return any_rt; |
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | TView GetDepthBufferSurface(bool preserve_contents) { | 146 | TView GetDepthBufferSurface() { |
| 147 | std::lock_guard lock{mutex}; | 147 | std::lock_guard lock{mutex}; |
| 148 | auto& maxwell3d = system.GPU().Maxwell3D(); | 148 | auto& maxwell3d = system.GPU().Maxwell3D(); |
| 149 | if (!maxwell3d.dirty.flags[VideoCommon::Dirty::ZetaBuffer]) { | 149 | if (!maxwell3d.dirty.flags[VideoCommon::Dirty::ZetaBuffer]) { |
| @@ -164,7 +164,7 @@ public: | |||
| 164 | return {}; | 164 | return {}; |
| 165 | } | 165 | } |
| 166 | const auto depth_params{SurfaceParams::CreateForDepthBuffer(system)}; | 166 | const auto depth_params{SurfaceParams::CreateForDepthBuffer(system)}; |
| 167 | auto surface_view = GetSurface(gpu_addr, *cpu_addr, depth_params, preserve_contents, true); | 167 | auto surface_view = GetSurface(gpu_addr, *cpu_addr, depth_params, true); |
| 168 | if (depth_buffer.target) | 168 | if (depth_buffer.target) |
| 169 | depth_buffer.target->MarkAsRenderTarget(false, NO_RT); | 169 | depth_buffer.target->MarkAsRenderTarget(false, NO_RT); |
| 170 | depth_buffer.target = surface_view.first; | 170 | depth_buffer.target = surface_view.first; |
| @@ -174,7 +174,7 @@ public: | |||
| 174 | return surface_view.second; | 174 | return surface_view.second; |
| 175 | } | 175 | } |
| 176 | 176 | ||
| 177 | TView GetColorBufferSurface(std::size_t index, bool preserve_contents) { | 177 | TView GetColorBufferSurface(std::size_t index) { |
| 178 | std::lock_guard lock{mutex}; | 178 | std::lock_guard lock{mutex}; |
| 179 | ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets); | 179 | ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets); |
| 180 | auto& maxwell3d = system.GPU().Maxwell3D(); | 180 | auto& maxwell3d = system.GPU().Maxwell3D(); |
| @@ -204,9 +204,8 @@ public: | |||
| 204 | return {}; | 204 | return {}; |
| 205 | } | 205 | } |
| 206 | 206 | ||
| 207 | auto surface_view = | 207 | auto surface_view = GetSurface(gpu_addr, *cpu_addr, |
| 208 | GetSurface(gpu_addr, *cpu_addr, SurfaceParams::CreateForFramebuffer(system, index), | 208 | SurfaceParams::CreateForFramebuffer(system, index), true); |
| 209 | preserve_contents, true); | ||
| 210 | if (render_targets[index].target) | 209 | if (render_targets[index].target) |
| 211 | render_targets[index].target->MarkAsRenderTarget(false, NO_RT); | 210 | render_targets[index].target->MarkAsRenderTarget(false, NO_RT); |
| 212 | render_targets[index].target = surface_view.first; | 211 | render_targets[index].target = surface_view.first; |
| @@ -260,9 +259,9 @@ public: | |||
| 260 | const std::optional<VAddr> src_cpu_addr = | 259 | const std::optional<VAddr> src_cpu_addr = |
| 261 | system.GPU().MemoryManager().GpuToCpuAddress(src_gpu_addr); | 260 | system.GPU().MemoryManager().GpuToCpuAddress(src_gpu_addr); |
| 262 | std::pair<TSurface, TView> dst_surface = | 261 | std::pair<TSurface, TView> dst_surface = |
| 263 | GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, true, false); | 262 | GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, false); |
| 264 | std::pair<TSurface, TView> src_surface = | 263 | std::pair<TSurface, TView> src_surface = |
| 265 | GetSurface(src_gpu_addr, *src_cpu_addr, src_params, true, false); | 264 | GetSurface(src_gpu_addr, *src_cpu_addr, src_params, false); |
| 266 | ImageBlit(src_surface.second, dst_surface.second, copy_config); | 265 | ImageBlit(src_surface.second, dst_surface.second, copy_config); |
| 267 | dst_surface.first->MarkAsModified(true, Tick()); | 266 | dst_surface.first->MarkAsModified(true, Tick()); |
| 268 | } | 267 | } |
| @@ -451,22 +450,18 @@ private: | |||
| 451 | * @param overlaps The overlapping surfaces registered in the cache. | 450 | * @param overlaps The overlapping surfaces registered in the cache. |
| 452 | * @param params The parameters for the new surface. | 451 | * @param params The parameters for the new surface. |
| 453 | * @param gpu_addr The starting address of the new surface. | 452 | * @param gpu_addr The starting address of the new surface. |
| 454 | * @param preserve_contents Indicates that the new surface should be loaded from memory or left | ||
| 455 | * blank. | ||
| 456 | * @param untopological Indicates to the recycler that the texture has no way to match the | 453 | * @param untopological Indicates to the recycler that the texture has no way to match the |
| 457 | * overlaps due to topological reasons. | 454 | * overlaps due to topological reasons. |
| 458 | **/ | 455 | **/ |
| 459 | std::pair<TSurface, TView> RecycleSurface(std::vector<TSurface>& overlaps, | 456 | std::pair<TSurface, TView> RecycleSurface(std::vector<TSurface>& overlaps, |
| 460 | const SurfaceParams& params, const GPUVAddr gpu_addr, | 457 | const SurfaceParams& params, const GPUVAddr gpu_addr, |
| 461 | const bool preserve_contents, | ||
| 462 | const MatchTopologyResult untopological) { | 458 | const MatchTopologyResult untopological) { |
| 463 | const bool do_load = preserve_contents && Settings::values.use_accurate_gpu_emulation; | ||
| 464 | for (auto& surface : overlaps) { | 459 | for (auto& surface : overlaps) { |
| 465 | Unregister(surface); | 460 | Unregister(surface); |
| 466 | } | 461 | } |
| 467 | switch (PickStrategy(overlaps, params, gpu_addr, untopological)) { | 462 | switch (PickStrategy(overlaps, params, gpu_addr, untopological)) { |
| 468 | case RecycleStrategy::Ignore: { | 463 | case RecycleStrategy::Ignore: { |
| 469 | return InitializeSurface(gpu_addr, params, do_load); | 464 | return InitializeSurface(gpu_addr, params, Settings::values.use_accurate_gpu_emulation); |
| 470 | } | 465 | } |
| 471 | case RecycleStrategy::Flush: { | 466 | case RecycleStrategy::Flush: { |
| 472 | std::sort(overlaps.begin(), overlaps.end(), | 467 | std::sort(overlaps.begin(), overlaps.end(), |
| @@ -476,7 +471,7 @@ private: | |||
| 476 | for (auto& surface : overlaps) { | 471 | for (auto& surface : overlaps) { |
| 477 | FlushSurface(surface); | 472 | FlushSurface(surface); |
| 478 | } | 473 | } |
| 479 | return InitializeSurface(gpu_addr, params, preserve_contents); | 474 | return InitializeSurface(gpu_addr, params); |
| 480 | } | 475 | } |
| 481 | case RecycleStrategy::BufferCopy: { | 476 | case RecycleStrategy::BufferCopy: { |
| 482 | auto new_surface = GetUncachedSurface(gpu_addr, params); | 477 | auto new_surface = GetUncachedSurface(gpu_addr, params); |
| @@ -485,7 +480,7 @@ private: | |||
| 485 | } | 480 | } |
| 486 | default: { | 481 | default: { |
| 487 | UNIMPLEMENTED_MSG("Unimplemented Texture Cache Recycling Strategy!"); | 482 | UNIMPLEMENTED_MSG("Unimplemented Texture Cache Recycling Strategy!"); |
| 488 | return InitializeSurface(gpu_addr, params, do_load); | 483 | return InitializeSurface(gpu_addr, params); |
| 489 | } | 484 | } |
| 490 | } | 485 | } |
| 491 | } | 486 | } |
| @@ -621,14 +616,11 @@ private: | |||
| 621 | * @param params The parameters on the new surface. | 616 | * @param params The parameters on the new surface. |
| 622 | * @param gpu_addr The starting address of the new surface. | 617 | * @param gpu_addr The starting address of the new surface. |
| 623 | * @param cache_addr The starting address of the new surface on physical memory. | 618 | * @param cache_addr The starting address of the new surface on physical memory. |
| 624 | * @param preserve_contents Indicates that the new surface should be loaded from memory or | ||
| 625 | * left blank. | ||
| 626 | */ | 619 | */ |
| 627 | std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps, | 620 | std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps, |
| 628 | const SurfaceParams& params, | 621 | const SurfaceParams& params, |
| 629 | const GPUVAddr gpu_addr, | 622 | const GPUVAddr gpu_addr, |
| 630 | const VAddr cpu_addr, | 623 | const VAddr cpu_addr) { |
| 631 | bool preserve_contents) { | ||
| 632 | if (params.target == SurfaceTarget::Texture3D) { | 624 | if (params.target == SurfaceTarget::Texture3D) { |
| 633 | bool failed = false; | 625 | bool failed = false; |
| 634 | if (params.num_levels > 1) { | 626 | if (params.num_levels > 1) { |
| @@ -677,7 +669,7 @@ private: | |||
| 677 | return std::nullopt; | 669 | return std::nullopt; |
| 678 | } | 670 | } |
| 679 | Unregister(surface); | 671 | Unregister(surface); |
| 680 | return InitializeSurface(gpu_addr, params, preserve_contents); | 672 | return InitializeSurface(gpu_addr, params); |
| 681 | } | 673 | } |
| 682 | return std::nullopt; | 674 | return std::nullopt; |
| 683 | } | 675 | } |
| @@ -688,7 +680,7 @@ private: | |||
| 688 | return {{surface, surface->GetMainView()}}; | 680 | return {{surface, surface->GetMainView()}}; |
| 689 | } | 681 | } |
| 690 | } | 682 | } |
| 691 | return InitializeSurface(gpu_addr, params, preserve_contents); | 683 | return InitializeSurface(gpu_addr, params); |
| 692 | } | 684 | } |
| 693 | } | 685 | } |
| 694 | 686 | ||
| @@ -711,13 +703,10 @@ private: | |||
| 711 | * | 703 | * |
| 712 | * @param gpu_addr The starting address of the candidate surface. | 704 | * @param gpu_addr The starting address of the candidate surface. |
| 713 | * @param params The parameters on the candidate surface. | 705 | * @param params The parameters on the candidate surface. |
| 714 | * @param preserve_contents Indicates that the new surface should be loaded from memory or | ||
| 715 | * left blank. | ||
| 716 | * @param is_render Whether or not the surface is a render target. | 706 | * @param is_render Whether or not the surface is a render target. |
| 717 | **/ | 707 | **/ |
| 718 | std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const VAddr cpu_addr, | 708 | std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const VAddr cpu_addr, |
| 719 | const SurfaceParams& params, bool preserve_contents, | 709 | const SurfaceParams& params, bool is_render) { |
| 720 | bool is_render) { | ||
| 721 | // Step 1 | 710 | // Step 1 |
| 722 | // Check Level 1 Cache for a fast structural match. If candidate surface | 711 | // Check Level 1 Cache for a fast structural match. If candidate surface |
| 723 | // matches at certain level we are pretty much done. | 712 | // matches at certain level we are pretty much done. |
| @@ -726,8 +715,7 @@ private: | |||
| 726 | const auto topological_result = current_surface->MatchesTopology(params); | 715 | const auto topological_result = current_surface->MatchesTopology(params); |
| 727 | if (topological_result != MatchTopologyResult::FullMatch) { | 716 | if (topological_result != MatchTopologyResult::FullMatch) { |
| 728 | std::vector<TSurface> overlaps{current_surface}; | 717 | std::vector<TSurface> overlaps{current_surface}; |
| 729 | return RecycleSurface(overlaps, params, gpu_addr, preserve_contents, | 718 | return RecycleSurface(overlaps, params, gpu_addr, topological_result); |
| 730 | topological_result); | ||
| 731 | } | 719 | } |
| 732 | 720 | ||
| 733 | const auto struct_result = current_surface->MatchesStructure(params); | 721 | const auto struct_result = current_surface->MatchesStructure(params); |
| @@ -752,7 +740,7 @@ private: | |||
| 752 | 740 | ||
| 753 | // If none are found, we are done. we just load the surface and create it. | 741 | // If none are found, we are done. we just load the surface and create it. |
| 754 | if (overlaps.empty()) { | 742 | if (overlaps.empty()) { |
| 755 | return InitializeSurface(gpu_addr, params, preserve_contents); | 743 | return InitializeSurface(gpu_addr, params); |
| 756 | } | 744 | } |
| 757 | 745 | ||
| 758 | // Step 3 | 746 | // Step 3 |
| @@ -762,15 +750,13 @@ private: | |||
| 762 | for (const auto& surface : overlaps) { | 750 | for (const auto& surface : overlaps) { |
| 763 | const auto topological_result = surface->MatchesTopology(params); | 751 | const auto topological_result = surface->MatchesTopology(params); |
| 764 | if (topological_result != MatchTopologyResult::FullMatch) { | 752 | if (topological_result != MatchTopologyResult::FullMatch) { |
| 765 | return RecycleSurface(overlaps, params, gpu_addr, preserve_contents, | 753 | return RecycleSurface(overlaps, params, gpu_addr, topological_result); |
| 766 | topological_result); | ||
| 767 | } | 754 | } |
| 768 | } | 755 | } |
| 769 | 756 | ||
| 770 | // Check if it's a 3D texture | 757 | // Check if it's a 3D texture |
| 771 | if (params.block_depth > 0) { | 758 | if (params.block_depth > 0) { |
| 772 | auto surface = | 759 | auto surface = Manage3DSurfaces(overlaps, params, gpu_addr, cpu_addr); |
| 773 | Manage3DSurfaces(overlaps, params, gpu_addr, cpu_addr, preserve_contents); | ||
| 774 | if (surface) { | 760 | if (surface) { |
| 775 | return *surface; | 761 | return *surface; |
| 776 | } | 762 | } |
| @@ -790,8 +776,7 @@ private: | |||
| 790 | return *view; | 776 | return *view; |
| 791 | } | 777 | } |
| 792 | } | 778 | } |
| 793 | return RecycleSurface(overlaps, params, gpu_addr, preserve_contents, | 779 | return RecycleSurface(overlaps, params, gpu_addr, MatchTopologyResult::FullMatch); |
| 794 | MatchTopologyResult::FullMatch); | ||
| 795 | } | 780 | } |
| 796 | // Now we check if the candidate is a mipmap/layer of the overlap | 781 | // Now we check if the candidate is a mipmap/layer of the overlap |
| 797 | std::optional<TView> view = | 782 | std::optional<TView> view = |
| @@ -815,7 +800,7 @@ private: | |||
| 815 | pair.first->EmplaceView(params, gpu_addr, candidate_size); | 800 | pair.first->EmplaceView(params, gpu_addr, candidate_size); |
| 816 | if (mirage_view) | 801 | if (mirage_view) |
| 817 | return {pair.first, *mirage_view}; | 802 | return {pair.first, *mirage_view}; |
| 818 | return RecycleSurface(overlaps, params, gpu_addr, preserve_contents, | 803 | return RecycleSurface(overlaps, params, gpu_addr, |
| 819 | MatchTopologyResult::FullMatch); | 804 | MatchTopologyResult::FullMatch); |
| 820 | } | 805 | } |
| 821 | return {current_surface, *view}; | 806 | return {current_surface, *view}; |
| @@ -831,8 +816,7 @@ private: | |||
| 831 | } | 816 | } |
| 832 | } | 817 | } |
| 833 | // We failed all the tests, recycle the overlaps into a new texture. | 818 | // We failed all the tests, recycle the overlaps into a new texture. |
| 834 | return RecycleSurface(overlaps, params, gpu_addr, preserve_contents, | 819 | return RecycleSurface(overlaps, params, gpu_addr, MatchTopologyResult::FullMatch); |
| 835 | MatchTopologyResult::FullMatch); | ||
| 836 | } | 820 | } |
| 837 | 821 | ||
| 838 | /** | 822 | /** |
| @@ -990,10 +974,10 @@ private: | |||
| 990 | } | 974 | } |
| 991 | 975 | ||
| 992 | std::pair<TSurface, TView> InitializeSurface(GPUVAddr gpu_addr, const SurfaceParams& params, | 976 | std::pair<TSurface, TView> InitializeSurface(GPUVAddr gpu_addr, const SurfaceParams& params, |
| 993 | bool preserve_contents) { | 977 | bool do_load = true) { |
| 994 | auto new_surface{GetUncachedSurface(gpu_addr, params)}; | 978 | auto new_surface{GetUncachedSurface(gpu_addr, params)}; |
| 995 | Register(new_surface); | 979 | Register(new_surface); |
| 996 | if (preserve_contents) { | 980 | if (do_load) { |
| 997 | LoadSurface(new_surface); | 981 | LoadSurface(new_surface); |
| 998 | } | 982 | } |
| 999 | return {new_surface, new_surface->GetMainView()}; | 983 | return {new_surface, new_surface->GetMainView()}; |