summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar Fernando Sahmkow2019-06-25 18:36:19 -0400
committerGravatar Fernando Sahmkow2019-06-25 18:36:19 -0400
commit88bc39374fd7cffd2864229ae60bdab3aebb37ea (patch)
treecd0f94567c5921a210f302230644493b844adb16 /src
parentsurface_params: Corrections, asserts and documentation. (diff)
downloadyuzu-88bc39374fd7cffd2864229ae60bdab3aebb37ea.tar.gz
yuzu-88bc39374fd7cffd2864229ae60bdab3aebb37ea.tar.xz
yuzu-88bc39374fd7cffd2864229ae60bdab3aebb37ea.zip
texture_cache: Corrections, documentation and asserts
Diffstat (limited to 'src')
-rw-r--r--src/video_core/texture_cache/texture_cache.h84
1 files changed, 42 insertions, 42 deletions
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 1516fcea3..fb6ca41ff 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -60,10 +60,10 @@ public:
60 } 60 }
61 } 61 }
62 62
63 /** 63 /***
64 * `Guard` guarantees that rendertargets don't unregister themselves if the 64 * `Guard` guarantees that rendertargets don't unregister themselves if the
65 * collide. Protection is currently only done on 3D slices. 65 * collide. Protection is currently only done on 3D slices.
66 **/ 66 ***/
67 void GuardRenderTargets(bool new_guard) { 67 void GuardRenderTargets(bool new_guard) {
68 guard_render_targets = new_guard; 68 guard_render_targets = new_guard;
69 } 69 }
@@ -191,19 +191,21 @@ public:
191 } 191 }
192 192
193 void SetEmptyDepthBuffer() { 193 void SetEmptyDepthBuffer() {
194 if (depth_buffer.target != nullptr) { 194 if (depth_buffer.target == nullptr) {
195 depth_buffer.target->MarkAsRenderTarget(false); 195 return;
196 depth_buffer.target = nullptr;
197 depth_buffer.view = nullptr;
198 } 196 }
197 depth_buffer.target->MarkAsRenderTarget(false);
198 depth_buffer.target = nullptr;
199 depth_buffer.view = nullptr;
199 } 200 }
200 201
201 void SetEmptyColorBuffer(std::size_t index) { 202 void SetEmptyColorBuffer(std::size_t index) {
202 if (render_targets[index].target != nullptr) { 203 if (render_targets[index].target == nullptr) {
203 render_targets[index].target->MarkAsRenderTarget(false); 204 return;
204 render_targets[index].target = nullptr;
205 render_targets[index].view = nullptr;
206 } 205 }
206 render_targets[index].target->MarkAsRenderTarget(false);
207 render_targets[index].target = nullptr;
208 render_targets[index].view = nullptr;
207 } 209 }
208 210
209 void DoFermiCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src_config, 211 void DoFermiCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src_config,
@@ -283,8 +285,8 @@ protected:
283 gpu_addr); 285 gpu_addr);
284 return; 286 return;
285 } 287 }
286 bool continuouty = system.GPU().MemoryManager().IsBlockContinuous(gpu_addr, size); 288 const bool continuous = system.GPU().MemoryManager().IsBlockContinuous(gpu_addr, size);
287 surface->MarkAsContinuous(continuouty); 289 surface->MarkAsContinuous(continuous);
288 surface->SetCacheAddr(cache_ptr); 290 surface->SetCacheAddr(cache_ptr);
289 surface->SetCpuAddr(*cpu_addr); 291 surface->SetCpuAddr(*cpu_addr);
290 RegisterInnerCache(surface); 292 RegisterInnerCache(surface);
@@ -381,8 +383,8 @@ private:
381 const SurfaceParams& params, const GPUVAddr gpu_addr, 383 const SurfaceParams& params, const GPUVAddr gpu_addr,
382 const bool preserve_contents, 384 const bool preserve_contents,
383 const MatchTopologyResult untopological) { 385 const MatchTopologyResult untopological) {
384 const bool do_load = Settings::values.use_accurate_gpu_emulation && preserve_contents; 386 const bool do_load = preserve_contents && Settings::values.use_accurate_gpu_emulation;
385 for (auto surface : overlaps) { 387 for (auto& surface : overlaps) {
386 Unregister(surface); 388 Unregister(surface);
387 } 389 }
388 switch (PickStrategy(overlaps, params, gpu_addr, untopological)) { 390 switch (PickStrategy(overlaps, params, gpu_addr, untopological)) {
@@ -394,7 +396,7 @@ private:
394 [](const TSurface& a, const TSurface& b) -> bool { 396 [](const TSurface& a, const TSurface& b) -> bool {
395 return a->GetModificationTick() < b->GetModificationTick(); 397 return a->GetModificationTick() < b->GetModificationTick();
396 }); 398 });
397 for (auto surface : overlaps) { 399 for (auto& surface : overlaps) {
398 FlushSurface(surface); 400 FlushSurface(surface);
399 } 401 }
400 return InitializeSurface(gpu_addr, params, preserve_contents); 402 return InitializeSurface(gpu_addr, params, preserve_contents);
@@ -460,19 +462,19 @@ private:
460 const SurfaceParams& params, bool is_render) { 462 const SurfaceParams& params, bool is_render) {
461 const bool is_mirage = !current_surface->MatchFormat(params.pixel_format); 463 const bool is_mirage = !current_surface->MatchFormat(params.pixel_format);
462 const bool matches_target = current_surface->MatchTarget(params.target); 464 const bool matches_target = current_surface->MatchTarget(params.target);
463 auto match_check = ([&]() -> std::pair<TSurface, TView> { 465 const auto match_check = ([&]() -> std::pair<TSurface, TView> {
464 if (matches_target) { 466 if (matches_target) {
465 return {current_surface, current_surface->GetMainView()}; 467 return {current_surface, current_surface->GetMainView()};
466 } 468 }
467 return {current_surface, current_surface->EmplaceOverview(params)}; 469 return {current_surface, current_surface->EmplaceOverview(params)};
468 }); 470 });
469 if (is_mirage) { 471 if (!is_mirage) {
470 if (!is_render && siblings_table[current_surface->GetFormat()] == params.pixel_format) { 472 return match_check();
471 return match_check(); 473 }
472 } 474 if (!is_render && siblings_table[current_surface->GetFormat()] == params.pixel_format) {
473 return RebuildSurface(current_surface, params, is_render); 475 return match_check();
474 } 476 }
475 return match_check(); 477 return RebuildSurface(current_surface, params, is_render);
476 } 478 }
477 479
478 /** 480 /**
@@ -493,7 +495,7 @@ private:
493 bool modified = false; 495 bool modified = false;
494 TSurface new_surface = GetUncachedSurface(gpu_addr, params); 496 TSurface new_surface = GetUncachedSurface(gpu_addr, params);
495 u32 passed_tests = 0; 497 u32 passed_tests = 0;
496 for (auto surface : overlaps) { 498 for (auto& surface : overlaps) {
497 const SurfaceParams& src_params = surface->GetSurfaceParams(); 499 const SurfaceParams& src_params = surface->GetSurfaceParams();
498 if (src_params.is_layered || src_params.num_levels > 1) { 500 if (src_params.is_layered || src_params.num_levels > 1) {
499 // We send this cases to recycle as they are more complex to handle 501 // We send this cases to recycle as they are more complex to handle
@@ -504,8 +506,7 @@ private:
504 if (!mipmap_layer) { 506 if (!mipmap_layer) {
505 continue; 507 continue;
506 } 508 }
507 const u32 layer{mipmap_layer->first}; 509 const auto [layer, mipmap] = *mipmap_layer;
508 const u32 mipmap{mipmap_layer->second};
509 if (new_surface->GetMipmapSize(mipmap) != candidate_size) { 510 if (new_surface->GetMipmapSize(mipmap) != candidate_size) {
510 continue; 511 continue;
511 } 512 }
@@ -519,7 +520,7 @@ private:
519 } 520 }
520 if (passed_tests == 0) { 521 if (passed_tests == 0) {
521 return {}; 522 return {};
522 // In Accurate GPU all test should pass, else we recycle 523 // In Accurate GPU all tests should pass, else we recycle
523 } else if (Settings::values.use_accurate_gpu_emulation && passed_tests != overlaps.size()) { 524 } else if (Settings::values.use_accurate_gpu_emulation && passed_tests != overlaps.size()) {
524 return {}; 525 return {};
525 } 526 }
@@ -548,7 +549,6 @@ private:
548 **/ 549 **/
549 std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const SurfaceParams& params, 550 std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const SurfaceParams& params,
550 bool preserve_contents, bool is_render) { 551 bool preserve_contents, bool is_render) {
551
552 const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)}; 552 const auto host_ptr{system.GPU().MemoryManager().GetPointer(gpu_addr)};
553 const auto cache_addr{ToCacheAddr(host_ptr)}; 553 const auto cache_addr{ToCacheAddr(host_ptr)};
554 554
@@ -570,17 +570,17 @@ private:
570 auto iter = l1_cache.find(cache_addr); 570 auto iter = l1_cache.find(cache_addr);
571 if (iter != l1_cache.end()) { 571 if (iter != l1_cache.end()) {
572 TSurface& current_surface = iter->second; 572 TSurface& current_surface = iter->second;
573 auto topological_result = current_surface->MatchesTopology(params); 573 const auto topological_result = current_surface->MatchesTopology(params);
574 if (topological_result != MatchTopologyResult::FullMatch) { 574 if (topological_result != MatchTopologyResult::FullMatch) {
575 std::vector<TSurface> overlaps{current_surface}; 575 std::vector<TSurface> overlaps{current_surface};
576 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents, 576 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
577 topological_result); 577 topological_result);
578 } 578 }
579 MatchStructureResult s_result = current_surface->MatchesStructure(params); 579 const auto struct_result = current_surface->MatchesStructure(params);
580 if (s_result != MatchStructureResult::None && 580 if (struct_result != MatchStructureResult::None &&
581 (params.target != SurfaceTarget::Texture3D || 581 (params.target != SurfaceTarget::Texture3D ||
582 current_surface->MatchTarget(params.target))) { 582 current_surface->MatchTarget(params.target))) {
583 if (s_result == MatchStructureResult::FullMatch) { 583 if (struct_result == MatchStructureResult::FullMatch) {
584 return ManageStructuralMatch(current_surface, params, is_render); 584 return ManageStructuralMatch(current_surface, params, is_render);
585 } else { 585 } else {
586 return RebuildSurface(current_surface, params, is_render); 586 return RebuildSurface(current_surface, params, is_render);
@@ -602,8 +602,8 @@ private:
602 // Now we need to figure the relationship between the texture and its overlaps 602 // Now we need to figure the relationship between the texture and its overlaps
603 // we do a topological test to ensure we can find some relationship. If it fails 603 // we do a topological test to ensure we can find some relationship. If it fails
604 // inmediatly recycle the texture 604 // inmediatly recycle the texture
605 for (auto surface : overlaps) { 605 for (const auto& surface : overlaps) {
606 auto topological_result = surface->MatchesTopology(params); 606 const auto topological_result = surface->MatchesTopology(params);
607 if (topological_result != MatchTopologyResult::FullMatch) { 607 if (topological_result != MatchTopologyResult::FullMatch) {
608 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents, 608 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
609 topological_result); 609 topological_result);
@@ -620,7 +620,7 @@ private:
620 if (current_surface->GetGpuAddr() == gpu_addr) { 620 if (current_surface->GetGpuAddr() == gpu_addr) {
621 std::optional<std::pair<TSurface, TView>> view = 621 std::optional<std::pair<TSurface, TView>> view =
622 TryReconstructSurface(overlaps, params, gpu_addr); 622 TryReconstructSurface(overlaps, params, gpu_addr);
623 if (view.has_value()) { 623 if (view) {
624 return *view; 624 return *view;
625 } 625 }
626 } 626 }
@@ -630,7 +630,7 @@ private:
630 // Now we check if the candidate is a mipmap/layer of the overlap 630 // Now we check if the candidate is a mipmap/layer of the overlap
631 std::optional<TView> view = 631 std::optional<TView> view =
632 current_surface->EmplaceView(params, gpu_addr, candidate_size); 632 current_surface->EmplaceView(params, gpu_addr, candidate_size);
633 if (view.has_value()) { 633 if (view) {
634 const bool is_mirage = !current_surface->MatchFormat(params.pixel_format); 634 const bool is_mirage = !current_surface->MatchFormat(params.pixel_format);
635 if (is_mirage) { 635 if (is_mirage) {
636 // On a mirage view, we need to recreate the surface under this new view 636 // On a mirage view, we need to recreate the surface under this new view
@@ -669,7 +669,7 @@ private:
669 // using the overlaps. If a single overlap fails, this will fail. 669 // using the overlaps. If a single overlap fails, this will fail.
670 std::optional<std::pair<TSurface, TView>> view = 670 std::optional<std::pair<TSurface, TView>> view =
671 TryReconstructSurface(overlaps, params, gpu_addr); 671 TryReconstructSurface(overlaps, params, gpu_addr);
672 if (view.has_value()) { 672 if (view) {
673 return *view; 673 return *view;
674 } 674 }
675 } 675 }
@@ -738,16 +738,16 @@ private:
738 std::vector<TSurface> surfaces; 738 std::vector<TSurface> surfaces;
739 while (start <= end) { 739 while (start <= end) {
740 std::vector<TSurface>& list = registry[start]; 740 std::vector<TSurface>& list = registry[start];
741 for (auto& s : list) { 741 for (auto& surface : list) {
742 if (!s->IsPicked() && s->Overlaps(cache_addr, cache_addr_end)) { 742 if (!surface->IsPicked() && surface->Overlaps(cache_addr, cache_addr_end)) {
743 s->MarkAsPicked(true); 743 surface->MarkAsPicked(true);
744 surfaces.push_back(s); 744 surfaces.push_back(surface);
745 } 745 }
746 } 746 }
747 start++; 747 start++;
748 } 748 }
749 for (auto& s : surfaces) { 749 for (auto& surface : surfaces) {
750 s->MarkAsPicked(false); 750 surface->MarkAsPicked(false);
751 } 751 }
752 return surfaces; 752 return surfaces;
753 } 753 }