summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/video_core/renderer_opengl/gl_device.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.h2
-rw-r--r--src/video_core/shader/control_flow.cpp6
-rw-r--r--src/video_core/texture_cache/surface_base.cpp6
-rw-r--r--src/video_core/texture_cache/surface_base.h4
-rw-r--r--src/video_core/texture_cache/surface_view.cpp4
-rw-r--r--src/video_core/texture_cache/surface_view.h1
-rw-r--r--src/video_core/texture_cache/texture_cache.h8
8 files changed, 22 insertions, 11 deletions
diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp
index c286502ba..d83dca25a 100644
--- a/src/video_core/renderer_opengl/gl_device.cpp
+++ b/src/video_core/renderer_opengl/gl_device.cpp
@@ -87,7 +87,7 @@ u32 Extract(u32& base, u32& num, u32 amount, std::optional<GLenum> limit = {}) {
87std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> BuildBaseBindings() noexcept { 87std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> BuildBaseBindings() noexcept {
88 std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> bindings; 88 std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> bindings;
89 89
90 static std::array<std::size_t, 5> stage_swizzle = {0, 1, 2, 3, 4}; 90 static constexpr std::array<std::size_t, 5> stage_swizzle{0, 1, 2, 3, 4};
91 const u32 total_ubos = GetInteger<u32>(GL_MAX_UNIFORM_BUFFER_BINDINGS); 91 const u32 total_ubos = GetInteger<u32>(GL_MAX_UNIFORM_BUFFER_BINDINGS);
92 const u32 total_ssbos = GetInteger<u32>(GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS); 92 const u32 total_ssbos = GetInteger<u32>(GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS);
93 const u32 total_samplers = GetInteger<u32>(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS); 93 const u32 total_samplers = GetInteger<u32>(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS);
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.h b/src/video_core/renderer_vulkan/vk_memory_manager.h
index 35ee54d30..5b6858e9b 100644
--- a/src/video_core/renderer_vulkan/vk_memory_manager.h
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.h
@@ -32,7 +32,7 @@ public:
32 * memory. When passing false, it will try to allocate device local memory. 32 * memory. When passing false, it will try to allocate device local memory.
33 * @returns A memory commit. 33 * @returns A memory commit.
34 */ 34 */
35 VKMemoryCommit Commit(const VkMemoryRequirements& reqs, bool host_visible); 35 VKMemoryCommit Commit(const VkMemoryRequirements& requirements, bool host_visible);
36 36
37 /// Commits memory required by the buffer and binds it. 37 /// Commits memory required by the buffer and binds it.
38 VKMemoryCommit Commit(const vk::Buffer& buffer, bool host_visible); 38 VKMemoryCommit Commit(const vk::Buffer& buffer, bool host_visible);
diff --git a/src/video_core/shader/control_flow.cpp b/src/video_core/shader/control_flow.cpp
index 2e2711350..6d313963a 100644
--- a/src/video_core/shader/control_flow.cpp
+++ b/src/video_core/shader/control_flow.cpp
@@ -484,17 +484,17 @@ bool TryInspectAddress(CFGRebuildState& state) {
484 } 484 }
485 case BlockCollision::Inside: { 485 case BlockCollision::Inside: {
486 // This case is the tricky one: 486 // This case is the tricky one:
487 // We need to Split the block in 2 sepparate blocks 487 // We need to split the block into 2 separate blocks
488 const u32 end = state.block_info[block_index].end; 488 const u32 end = state.block_info[block_index].end;
489 BlockInfo& new_block = CreateBlockInfo(state, address, end); 489 BlockInfo& new_block = CreateBlockInfo(state, address, end);
490 BlockInfo& current_block = state.block_info[block_index]; 490 BlockInfo& current_block = state.block_info[block_index];
491 current_block.end = address - 1; 491 current_block.end = address - 1;
492 new_block.branch = current_block.branch; 492 new_block.branch = std::move(current_block.branch);
493 BlockBranchInfo forward_branch = MakeBranchInfo<SingleBranch>(); 493 BlockBranchInfo forward_branch = MakeBranchInfo<SingleBranch>();
494 const auto branch = std::get_if<SingleBranch>(forward_branch.get()); 494 const auto branch = std::get_if<SingleBranch>(forward_branch.get());
495 branch->address = address; 495 branch->address = address;
496 branch->ignore = true; 496 branch->ignore = true;
497 current_block.branch = forward_branch; 497 current_block.branch = std::move(forward_branch);
498 return true; 498 return true;
499 } 499 }
500 default: 500 default:
diff --git a/src/video_core/texture_cache/surface_base.cpp b/src/video_core/texture_cache/surface_base.cpp
index 7af0e792c..715f39d0d 100644
--- a/src/video_core/texture_cache/surface_base.cpp
+++ b/src/video_core/texture_cache/surface_base.cpp
@@ -248,8 +248,14 @@ void SurfaceBaseImpl::FlushBuffer(Tegra::MemoryManager& memory_manager,
248 248
249 // Use an extra temporal buffer 249 // Use an extra temporal buffer
250 auto& tmp_buffer = staging_cache.GetBuffer(1); 250 auto& tmp_buffer = staging_cache.GetBuffer(1);
251 // Special case for 3D Texture Segments
252 const bool must_read_current_data =
253 params.block_depth > 0 && params.target == VideoCore::Surface::SurfaceTarget::Texture2D;
251 tmp_buffer.resize(guest_memory_size); 254 tmp_buffer.resize(guest_memory_size);
252 host_ptr = tmp_buffer.data(); 255 host_ptr = tmp_buffer.data();
256 if (must_read_current_data) {
257 memory_manager.ReadBlockUnsafe(gpu_addr, host_ptr, guest_memory_size);
258 }
253 259
254 if (params.is_tiled) { 260 if (params.is_tiled) {
255 ASSERT_MSG(params.block_width == 0, "Block width is defined as {}", params.block_width); 261 ASSERT_MSG(params.block_width == 0, "Block width is defined as {}", params.block_width);
diff --git a/src/video_core/texture_cache/surface_base.h b/src/video_core/texture_cache/surface_base.h
index a39a8661b..c5ab21f56 100644
--- a/src/video_core/texture_cache/surface_base.h
+++ b/src/video_core/texture_cache/surface_base.h
@@ -72,9 +72,9 @@ public:
72 return (cpu_addr < end) && (cpu_addr_end > start); 72 return (cpu_addr < end) && (cpu_addr_end > start);
73 } 73 }
74 74
75 bool IsInside(const GPUVAddr other_start, const GPUVAddr other_end) { 75 bool IsInside(const GPUVAddr other_start, const GPUVAddr other_end) const {
76 const GPUVAddr gpu_addr_end = gpu_addr + guest_memory_size; 76 const GPUVAddr gpu_addr_end = gpu_addr + guest_memory_size;
77 return (gpu_addr <= other_start && other_end <= gpu_addr_end); 77 return gpu_addr <= other_start && other_end <= gpu_addr_end;
78 } 78 }
79 79
80 // Use only when recycling a surface 80 // Use only when recycling a surface
diff --git a/src/video_core/texture_cache/surface_view.cpp b/src/video_core/texture_cache/surface_view.cpp
index 57a1f5803..6b5f5984b 100644
--- a/src/video_core/texture_cache/surface_view.cpp
+++ b/src/video_core/texture_cache/surface_view.cpp
@@ -20,4 +20,8 @@ bool ViewParams::operator==(const ViewParams& rhs) const {
20 std::tie(rhs.base_layer, rhs.num_layers, rhs.base_level, rhs.num_levels, rhs.target); 20 std::tie(rhs.base_layer, rhs.num_layers, rhs.base_level, rhs.num_levels, rhs.target);
21} 21}
22 22
23bool ViewParams::operator!=(const ViewParams& rhs) const {
24 return !operator==(rhs);
25}
26
23} // namespace VideoCommon 27} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/surface_view.h b/src/video_core/texture_cache/surface_view.h
index b17fd11a9..90a8bb0ae 100644
--- a/src/video_core/texture_cache/surface_view.h
+++ b/src/video_core/texture_cache/surface_view.h
@@ -21,6 +21,7 @@ struct ViewParams {
21 std::size_t Hash() const; 21 std::size_t Hash() const;
22 22
23 bool operator==(const ViewParams& rhs) const; 23 bool operator==(const ViewParams& rhs) const;
24 bool operator!=(const ViewParams& rhs) const;
24 25
25 bool IsLayered() const { 26 bool IsLayered() const {
26 switch (target) { 27 switch (target) {
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 4edd4313b..3e8663adf 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -614,10 +614,10 @@ private:
614 * textures within the GPU if possible. Falls back to LLE when it isn't possible to use any of 614 * textures within the GPU if possible. Falls back to LLE when it isn't possible to use any of
615 * the HLE methods. 615 * the HLE methods.
616 * 616 *
617 * @param overlaps The overlapping surfaces registered in the cache. 617 * @param overlaps The overlapping surfaces registered in the cache.
618 * @param params The parameters on the new surface. 618 * @param params The parameters on the new surface.
619 * @param gpu_addr The starting address of the new surface. 619 * @param gpu_addr The starting address of the new surface.
620 * @param cache_addr The starting address of the new surface on physical memory. 620 * @param cpu_addr The starting address of the new surface on physical memory.
621 */ 621 */
622 std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps, 622 std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps,
623 const SurfaceParams& params, 623 const SurfaceParams& params,