From c6cac2ffaad4ac27f35cea25022d9c59c7ecfbf4 Mon Sep 17 00:00:00 2001
From: Fernando Sahmkow
Date: Sun, 30 Apr 2023 17:14:06 +0200
Subject: GPU: Add Reactive flushing
---
src/video_core/buffer_cache/buffer_base.h | 9 ++++++
src/video_core/buffer_cache/buffer_cache.h | 35 +++++++++++++++++------
src/video_core/buffer_cache/buffer_cache_base.h | 5 ++--
src/video_core/buffer_cache/memory_tracker_base.h | 26 +++++++++++++++++
src/video_core/buffer_cache/word_manager.h | 14 +++++++--
src/video_core/engines/maxwell_dma.cpp | 8 ++----
src/video_core/fence_manager.h | 4 +--
src/video_core/gpu.cpp | 19 ++++++++++++
src/video_core/gpu.h | 4 +++
src/video_core/rasterizer_download_area.h | 13 +++++++++
src/video_core/rasterizer_interface.h | 3 ++
src/video_core/renderer_null/null_rasterizer.cpp | 10 +++++++
src/video_core/renderer_null/null_rasterizer.h | 1 +
src/video_core/renderer_opengl/gl_rasterizer.cpp | 23 +++++++++++++++
src/video_core/renderer_opengl/gl_rasterizer.h | 1 +
src/video_core/renderer_vulkan/vk_rasterizer.cpp | 23 +++++++++++++++
src/video_core/renderer_vulkan/vk_rasterizer.h | 1 +
src/video_core/texture_cache/image_info.h | 1 +
src/video_core/texture_cache/image_view_base.cpp | 3 +-
src/video_core/texture_cache/texture_cache.h | 32 ++++++++++++++++++++-
src/video_core/texture_cache/texture_cache_base.h | 2 ++
21 files changed, 213 insertions(+), 24 deletions(-)
create mode 100644 src/video_core/rasterizer_download_area.h
(limited to 'src/video_core')
diff --git a/src/video_core/buffer_cache/buffer_base.h b/src/video_core/buffer_cache/buffer_base.h
index 9cbd95c4b..0bb3bf8ae 100644
--- a/src/video_core/buffer_cache/buffer_base.h
+++ b/src/video_core/buffer_cache/buffer_base.h
@@ -18,6 +18,7 @@ namespace VideoCommon {
enum class BufferFlagBits {
Picked = 1 << 0,
CachedWrites = 1 << 1,
+ PreemtiveDownload = 1 << 2,
};
DECLARE_ENUM_FLAG_OPERATORS(BufferFlagBits)
@@ -54,6 +55,10 @@ public:
flags |= BufferFlagBits::Picked;
}
+ void MarkPreemtiveDownload() noexcept {
+ flags |= BufferFlagBits::PreemtiveDownload;
+ }
+
/// Unmark buffer as picked
void Unpick() noexcept {
flags &= ~BufferFlagBits::Picked;
@@ -84,6 +89,10 @@ public:
return True(flags & BufferFlagBits::CachedWrites);
}
+ bool IsPreemtiveDownload() const noexcept {
+ return True(flags & BufferFlagBits::PreemtiveDownload);
+ }
+
/// Returns the base CPU address of the buffer
[[nodiscard]] VAddr CpuAddr() const noexcept {
return cpu_addr;
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index e534e1e9c..479a1a508 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -111,9 +111,24 @@ void BufferCache
::WriteMemory(VAddr cpu_addr, u64 size) {
template
void BufferCache::CachedWriteMemory(VAddr cpu_addr, u64 size) {
memory_tracker.CachedCpuWrite(cpu_addr, size);
- const IntervalType add_interval{Common::AlignDown(cpu_addr, YUZU_PAGESIZE),
- Common::AlignUp(cpu_addr + size, YUZU_PAGESIZE)};
- cached_ranges.add(add_interval);
+}
+
+template
+std::optional BufferCache::GetFlushArea(VAddr cpu_addr,
+ u64 size) {
+ std::optional area{};
+ area.emplace();
+ VAddr cpu_addr_start_aligned = Common::AlignDown(cpu_addr, Core::Memory::YUZU_PAGESIZE);
+ VAddr cpu_addr_end_aligned = Common::AlignUp(cpu_addr + size, Core::Memory::YUZU_PAGESIZE);
+ area->start_address = cpu_addr_start_aligned;
+ area->end_address = cpu_addr_end_aligned;
+ if (memory_tracker.IsRegionPreflushable(cpu_addr, size)) {
+ area->preemtive = true;
+ return area;
+ };
+ memory_tracker.MarkRegionAsPreflushable(cpu_addr_start_aligned, cpu_addr_end_aligned - cpu_addr_start_aligned);
+ area->preemtive = !IsRegionGpuModified(cpu_addr, size);
+ return area;
}
template
@@ -191,8 +206,10 @@ bool BufferCache::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
const VAddr new_base_address = *cpu_dest_address + diff;
const IntervalType add_interval{new_base_address, new_base_address + size};
tmp_intervals.push_back(add_interval);
- uncommitted_ranges.add(add_interval);
- pending_ranges.add(add_interval);
+ if (memory_tracker.IsRegionPreflushable(new_base_address, new_base_address + size)) {
+ uncommitted_ranges.add(add_interval);
+ pending_ranges.add(add_interval);
+ }
};
ForEachInRangeSet(common_ranges, *cpu_src_address, amount, mirror);
// This subtraction in this order is important for overlapping copies.
@@ -205,7 +222,7 @@ bool BufferCache
::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
if (has_new_downloads) {
memory_tracker.MarkRegionAsGpuModified(*cpu_dest_address, amount);
}
- std::vector tmp_buffer(amount);
+ tmp_buffer.resize(amount);
cpu_memory.ReadBlockUnsafe(*cpu_src_address, tmp_buffer.data(), amount);
cpu_memory.WriteBlockUnsafe(*cpu_dest_address, tmp_buffer.data(), amount);
return true;
@@ -441,9 +458,7 @@ void BufferCache::BindComputeTextureBuffer(size_t tbo_index, GPUVAddr gpu_add
template
void BufferCache::FlushCachedWrites() {
- cached_write_buffer_ids.clear();
memory_tracker.FlushCachedWrites();
- cached_ranges.clear();
}
template
@@ -1221,6 +1236,9 @@ void BufferCache::MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 s
const IntervalType base_interval{cpu_addr, cpu_addr + size};
common_ranges.add(base_interval);
+ if (!memory_tracker.IsRegionPreflushable(cpu_addr, cpu_addr + size)) {
+ return;
+ }
uncommitted_ranges.add(base_interval);
pending_ranges.add(base_interval);
}
@@ -1629,7 +1647,6 @@ void BufferCache
::DeleteBuffer(BufferId buffer_id, bool do_not_mark) {
replace(transform_feedback_buffers);
replace(compute_uniform_buffers);
replace(compute_storage_buffers);
- std::erase(cached_write_buffer_ids, buffer_id);
// Mark the whole buffer as CPU written to stop tracking CPU writes
if (!do_not_mark) {
diff --git a/src/video_core/buffer_cache/buffer_cache_base.h b/src/video_core/buffer_cache/buffer_cache_base.h
index 656baa550..e3914a53a 100644
--- a/src/video_core/buffer_cache/buffer_cache_base.h
+++ b/src/video_core/buffer_cache/buffer_cache_base.h
@@ -188,6 +188,8 @@ public:
void DownloadMemory(VAddr cpu_addr, u64 size);
+ std::optional GetFlushArea(VAddr cpu_addr, u64 size);
+
bool InlineMemory(VAddr dest_address, size_t copy_size, std::span inlined_buffer);
void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size);
@@ -541,8 +543,6 @@ private:
std::array, NUM_STAGES>, Empty>
uniform_buffer_binding_sizes{};
- std::vector cached_write_buffer_ids;
-
MemoryTracker memory_tracker;
IntervalSet uncommitted_ranges;
IntervalSet common_ranges;
@@ -575,6 +575,7 @@ private:
bool active_async_buffers = false;
std::array> CACHING_PAGEBITS)> page_table;
+ std::vector tmp_buffer;
};
} // namespace VideoCommon
diff --git a/src/video_core/buffer_cache/memory_tracker_base.h b/src/video_core/buffer_cache/memory_tracker_base.h
index dc4ebfcaa..6036b21c9 100644
--- a/src/video_core/buffer_cache/memory_tracker_base.h
+++ b/src/video_core/buffer_cache/memory_tracker_base.h
@@ -66,6 +66,14 @@ public:
});
}
+ /// Returns true if a region has been marked as Preflushable
+ [[nodiscard]] bool IsRegionPreflushable(VAddr query_cpu_addr, u64 query_size) noexcept {
+ return IteratePages(
+ query_cpu_addr, query_size, [](Manager* manager, u64 offset, size_t size) {
+ return manager->template IsRegionModified(offset, size);
+ });
+ }
+
/// Mark region as CPU modified, notifying the rasterizer about this change
void MarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 query_size) {
IteratePages(dirty_cpu_addr, query_size,
@@ -93,6 +101,15 @@ public:
});
}
+ /// Mark region as modified from the host GPU
+ void MarkRegionAsPreflushable(VAddr dirty_cpu_addr, u64 query_size) noexcept {
+ IteratePages(dirty_cpu_addr, query_size,
+ [](Manager* manager, u64 offset, size_t size) {
+ manager->template ChangeRegionState(
+ manager->GetCpuAddr() + offset, size);
+ });
+ }
+
/// Unmark region as modified from the host GPU
void UnmarkRegionAsGpuModified(VAddr dirty_cpu_addr, u64 query_size) noexcept {
IteratePages(dirty_cpu_addr, query_size,
@@ -102,6 +119,15 @@ public:
});
}
+ /// Unmark region as modified from the host GPU
+ void UnmarkRegionAsPreflushable(VAddr dirty_cpu_addr, u64 query_size) noexcept {
+ IteratePages(dirty_cpu_addr, query_size,
+ [](Manager* manager, u64 offset, size_t size) {
+ manager->template ChangeRegionState(
+ manager->GetCpuAddr() + offset, size);
+ });
+ }
+
/// Mark region as modified from the CPU
/// but don't mark it as modified until FlusHCachedWrites is called.
void CachedCpuWrite(VAddr dirty_cpu_addr, u64 query_size) {
diff --git a/src/video_core/buffer_cache/word_manager.h b/src/video_core/buffer_cache/word_manager.h
index a42455045..0fb199a54 100644
--- a/src/video_core/buffer_cache/word_manager.h
+++ b/src/video_core/buffer_cache/word_manager.h
@@ -26,6 +26,7 @@ enum class Type {
GPU,
CachedCPU,
Untracked,
+ Preflushable,
};
/// Vector tracking modified pages tightly packed with small vector optimization
@@ -55,17 +56,20 @@ struct Words {
gpu.stack.fill(0);
cached_cpu.stack.fill(0);
untracked.stack.fill(~u64{0});
+ preflushable.stack.fill(0);
} else {
// Share allocation between CPU and GPU pages and set their default values
- u64* const alloc = new u64[num_words * 4];
+ u64* const alloc = new u64[num_words * 5];
cpu.heap = alloc;
gpu.heap = alloc + num_words;
cached_cpu.heap = alloc + num_words * 2;
untracked.heap = alloc + num_words * 3;
+ preflushable.heap = alloc + num_words * 4;
std::fill_n(cpu.heap, num_words, ~u64{0});
std::fill_n(gpu.heap, num_words, 0);
std::fill_n(cached_cpu.heap, num_words, 0);
std::fill_n(untracked.heap, num_words, ~u64{0});
+ std::fill_n(preflushable.heap, num_words, 0);
}
// Clean up tailing bits
const u64 last_word_size = size_bytes % BYTES_PER_WORD;
@@ -88,13 +92,14 @@ struct Words {
gpu = rhs.gpu;
cached_cpu = rhs.cached_cpu;
untracked = rhs.untracked;
+ preflushable = rhs.preflushable;
rhs.cpu.heap = nullptr;
return *this;
}
Words(Words&& rhs) noexcept
: size_bytes{rhs.size_bytes}, num_words{rhs.num_words}, cpu{rhs.cpu}, gpu{rhs.gpu},
- cached_cpu{rhs.cached_cpu}, untracked{rhs.untracked} {
+ cached_cpu{rhs.cached_cpu}, untracked{rhs.untracked}, preflushable{rhs.preflushable} {
rhs.cpu.heap = nullptr;
}
@@ -129,6 +134,8 @@ struct Words {
return std::span(cached_cpu.Pointer(IsShort()), num_words);
} else if constexpr (type == Type::Untracked) {
return std::span(untracked.Pointer(IsShort()), num_words);
+ } else if constexpr (type == Type::Preflushable) {
+ return std::span(preflushable.Pointer(IsShort()), num_words);
}
}
@@ -142,6 +149,8 @@ struct Words {
return std::span(cached_cpu.Pointer(IsShort()), num_words);
} else if constexpr (type == Type::Untracked) {
return std::span(untracked.Pointer(IsShort()), num_words);
+ } else if constexpr (type == Type::Preflushable) {
+ return std::span(preflushable.Pointer(IsShort()), num_words);
}
}
@@ -151,6 +160,7 @@ struct Words {
WordsArray gpu;
WordsArray cached_cpu;
WordsArray untracked;
+ WordsArray preflushable;
};
template
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index e68850dc5..f7400aac8 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -223,7 +223,7 @@ void MaxwellDMA::CopyBlockLinearToPitch() {
write_buffer.resize_destructive(dst_size);
memory_manager.ReadBlock(src_operand.address, read_buffer.data(), src_size);
- memory_manager.ReadBlockUnsafe(dst_operand.address, write_buffer.data(), dst_size);
+ memory_manager.ReadBlock(dst_operand.address, write_buffer.data(), dst_size);
UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
src_params.origin.y, x_elements, regs.line_count, block_height, block_depth,
@@ -288,11 +288,7 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
write_buffer.resize_destructive(dst_size);
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
- if (Settings::IsGPULevelExtreme()) {
- memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
- } else {
- memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
- }
+ memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
// If the input is linear and the output is tiled, swizzle the input and copy it over.
SwizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h
index 3b2f6aab6..850d6f27d 100644
--- a/src/video_core/fence_manager.h
+++ b/src/video_core/fence_manager.h
@@ -55,8 +55,8 @@ public:
// Unlike other fences, this one doesn't
void SignalOrdering() {
- std::scoped_lock lock{buffer_cache.mutex};
- buffer_cache.AccumulateFlushes();
+ std::function do_nothing([]{});
+ SignalFence(std::move(do_nothing));
}
void SyncOperation(std::function&& func) {
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 2e7f9c5ed..295a416a8 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -283,6 +283,21 @@ struct GPU::Impl {
gpu_thread.FlushRegion(addr, size);
}
+ VideoCore::RasterizerDownloadArea OnCPURead(VAddr addr, u64 size) {
+ auto raster_area = rasterizer->GetFlushArea(addr, size);
+ if (raster_area.preemtive) {
+ return raster_area;
+ }
+ raster_area.preemtive = true;
+ const u64 fence = RequestSyncOperation([this, &raster_area]() {
+ rasterizer->FlushRegion(raster_area.start_address,
+ raster_area.end_address - raster_area.start_address);
+ });
+ gpu_thread.TickGPU();
+ WaitForSyncOperation(fence);
+ return raster_area;
+ }
+
/// Notify rasterizer that any caches of the specified region should be invalidated
void InvalidateRegion(VAddr addr, u64 size) {
gpu_thread.InvalidateRegion(addr, size);
@@ -538,6 +553,10 @@ void GPU::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
impl->SwapBuffers(framebuffer);
}
+VideoCore::RasterizerDownloadArea GPU::OnCPURead(VAddr addr, u64 size) {
+ return impl->OnCPURead(addr, size);
+}
+
void GPU::FlushRegion(VAddr addr, u64 size) {
impl->FlushRegion(addr, size);
}
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index 8a871593a..e49c40cf2 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -10,6 +10,7 @@
#include "core/hle/service/nvdrv/nvdata.h"
#include "video_core/cdma_pusher.h"
#include "video_core/framebuffer_config.h"
+#include "video_core/rasterizer_download_area.h"
namespace Core {
class System;
@@ -240,6 +241,9 @@ public:
/// Swap buffers (render frame)
void SwapBuffers(const Tegra::FramebufferConfig* framebuffer);
+ /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
+ [[nodiscard]] VideoCore::RasterizerDownloadArea OnCPURead(VAddr addr, u64 size);
+
/// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
void FlushRegion(VAddr addr, u64 size);
diff --git a/src/video_core/rasterizer_download_area.h b/src/video_core/rasterizer_download_area.h
new file mode 100644
index 000000000..771ce903d
--- /dev/null
+++ b/src/video_core/rasterizer_download_area.h
@@ -0,0 +1,13 @@
+#pragma once
+
+#include "common/common_types.h"
+
+namespace VideoCore {
+
+struct RasterizerDownloadArea {
+ VAddr start_address;
+ VAddr end_address;
+ bool preemtive;
+};
+
+} // namespace VideoCore
\ No newline at end of file
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index 33e2610bc..7566a8c4e 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -12,6 +12,7 @@
#include "video_core/cache_types.h"
#include "video_core/engines/fermi_2d.h"
#include "video_core/gpu.h"
+#include "video_core/rasterizer_download_area.h"
namespace Tegra {
class MemoryManager;
@@ -95,6 +96,8 @@ public:
virtual bool MustFlushRegion(VAddr addr, u64 size,
VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0;
+ virtual RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) = 0;
+
/// Notify rasterizer that any caches of the specified region should be invalidated
virtual void InvalidateRegion(VAddr addr, u64 size,
VideoCommon::CacheType which = VideoCommon::CacheType::All) = 0;
diff --git a/src/video_core/renderer_null/null_rasterizer.cpp b/src/video_core/renderer_null/null_rasterizer.cpp
index 2b5c7defa..bf2ce4c49 100644
--- a/src/video_core/renderer_null/null_rasterizer.cpp
+++ b/src/video_core/renderer_null/null_rasterizer.cpp
@@ -1,6 +1,8 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
+#include "common/alignment.h"
+#include "core/memory.h"
#include "video_core/host1x/host1x.h"
#include "video_core/memory_manager.h"
#include "video_core/renderer_null/null_rasterizer.h"
@@ -46,6 +48,14 @@ bool RasterizerNull::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheTyp
}
void RasterizerNull::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType) {}
void RasterizerNull::OnCPUWrite(VAddr addr, u64 size) {}
+VideoCore::RasterizerDownloadArea RasterizerNull::GetFlushArea(VAddr addr, u64 size) {
+ VideoCore::RasterizerDownloadArea new_area{
+ .start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE),
+ .end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE),
+ .preemtive = true,
+ };
+ return new_area;
+}
void RasterizerNull::InvalidateGPUCache() {}
void RasterizerNull::UnmapMemory(VAddr addr, u64 size) {}
void RasterizerNull::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) {}
diff --git a/src/video_core/renderer_null/null_rasterizer.h b/src/video_core/renderer_null/null_rasterizer.h
index 0c59e6a1f..a8d35d2c1 100644
--- a/src/video_core/renderer_null/null_rasterizer.h
+++ b/src/video_core/renderer_null/null_rasterizer.h
@@ -54,6 +54,7 @@ public:
void InvalidateRegion(VAddr addr, u64 size,
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
void OnCPUWrite(VAddr addr, u64 size) override;
+ VideoCore::RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) override;
void InvalidateGPUCache() override;
void UnmapMemory(VAddr addr, u64 size) override;
void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override;
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 0089b4b27..3f07fe8bb 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -433,6 +433,29 @@ bool RasterizerOpenGL::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheT
return false;
}
+VideoCore::RasterizerDownloadArea RasterizerOpenGL::GetFlushArea(VAddr addr, u64 size) {
+ {
+ std::scoped_lock lock{texture_cache.mutex};
+ auto area = texture_cache.GetFlushArea(addr, size);
+ if (area) {
+ return *area;
+ }
+ }
+ {
+ std::scoped_lock lock{buffer_cache.mutex};
+ auto area = buffer_cache.GetFlushArea(addr, size);
+ if (area) {
+ return *area;
+ }
+ }
+ VideoCore::RasterizerDownloadArea new_area{
+ .start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE),
+ .end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE),
+ .preemtive = true,
+ };
+ return new_area;
+}
+
void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType which) {
MICROPROFILE_SCOPE(OpenGL_CacheManagement);
if (addr == 0 || size == 0) {
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index ad6978bd0..410d8ffc5 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -95,6 +95,7 @@ public:
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
bool MustFlushRegion(VAddr addr, u64 size,
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
+ VideoCore::RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) override;
void InvalidateRegion(VAddr addr, u64 size,
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
void OnCPUWrite(VAddr addr, u64 size) override;
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index d1489fc95..bae4aa611 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -502,6 +502,29 @@ bool RasterizerVulkan::MustFlushRegion(VAddr addr, u64 size, VideoCommon::CacheT
return false;
}
+VideoCore::RasterizerDownloadArea RasterizerVulkan::GetFlushArea(VAddr addr, u64 size) {
+ {
+ std::scoped_lock lock{texture_cache.mutex};
+ auto area = texture_cache.GetFlushArea(addr, size);
+ if (area) {
+ return *area;
+ }
+ }
+ {
+ std::scoped_lock lock{buffer_cache.mutex};
+ auto area = buffer_cache.GetFlushArea(addr, size);
+ if (area) {
+ return *area;
+ }
+ }
+ VideoCore::RasterizerDownloadArea new_area{
+ .start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE),
+ .end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE),
+ .preemtive = true,
+ };
+ return new_area;
+}
+
void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType which) {
if (addr == 0 || size == 0) {
return;
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 1659fbc13..9bd422850 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -92,6 +92,7 @@ public:
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
bool MustFlushRegion(VAddr addr, u64 size,
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
+ VideoCore::RasterizerDownloadArea GetFlushArea(VAddr addr, u64 size) override;
void InvalidateRegion(VAddr addr, u64 size,
VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
void InnerInvalidation(std::span> sequences) override;
diff --git a/src/video_core/texture_cache/image_info.h b/src/video_core/texture_cache/image_info.h
index 4b7dfa315..cfb85a3dc 100644
--- a/src/video_core/texture_cache/image_info.h
+++ b/src/video_core/texture_cache/image_info.h
@@ -39,6 +39,7 @@ struct ImageInfo {
u32 tile_width_spacing = 0;
bool rescaleable = false;
bool downscaleable = false;
+ bool forced_flushed = false;
};
} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/image_view_base.cpp b/src/video_core/texture_cache/image_view_base.cpp
index bcad40353..8f28342d5 100644
--- a/src/video_core/texture_cache/image_view_base.cpp
+++ b/src/video_core/texture_cache/image_view_base.cpp
@@ -26,8 +26,7 @@ ImageViewBase::ImageViewBase(const ImageViewInfo& info, const ImageInfo& image_i
ASSERT_MSG(VideoCore::Surface::IsViewCompatible(image_info.format, info.format, false, true),
"Image view format {} is incompatible with image format {}", info.format,
image_info.format);
- const bool is_async = Settings::values.use_asynchronous_gpu_emulation.GetValue();
- if (image_info.type == ImageType::Linear && is_async) {
+ if (image_info.forced_flushed) {
flags |= ImageViewFlagBits::PreemtiveDownload;
}
if (image_info.type == ImageType::e3D && info.type != ImageViewType::e3D) {
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index b5297e76b..fb8ffc002 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -490,6 +490,32 @@ void TextureCache::DownloadMemory(VAddr cpu_addr, size_t size) {
}
}
+template
+std::optional TextureCache::GetFlushArea(VAddr cpu_addr,
+ u64 size) {
+ std::optional area{};
+ ForEachImageInRegion(cpu_addr, size, [&](ImageId, ImageBase& image) {
+ if (False(image.flags & ImageFlagBits::GpuModified)) {
+ return;
+ }
+ if (!area) {
+ area.emplace();
+ area->start_address = cpu_addr;
+ area->end_address = cpu_addr + size;
+ area->preemtive = true;
+ }
+ area->start_address = std::min(area->start_address, image.cpu_addr);
+ area->end_address = std::max(area->end_address, image.cpu_addr_end);
+ for (auto image_view_id : image.image_view_ids) {
+ auto& image_view = slot_image_views[image_view_id];
+ image_view.flags |= ImageViewFlagBits::PreemtiveDownload;
+ }
+ area->preemtive &= image.info.forced_flushed;
+ image.info.forced_flushed = true;
+ });
+ return area;
+}
+
template
void TextureCache::UnmapMemory(VAddr cpu_addr, size_t size) {
std::vector deleted_images;
@@ -789,11 +815,15 @@ ImageId TextureCache::DmaImageId(const Tegra::DMA::ImageOperand& operand) {
if (!dst_id) {
return NULL_IMAGE_ID;
}
- const auto& image = slot_images[dst_id];
+ auto& image = slot_images[dst_id];
if (False(image.flags & ImageFlagBits::GpuModified)) {
// No need to waste time on an image that's synced with guest
return NULL_IMAGE_ID;
}
+ if (!image.info.forced_flushed) {
+ image.info.forced_flushed = true;
+ return NULL_IMAGE_ID;
+ }
const auto base = image.TryFindBase(operand.address);
if (!base) {
return NULL_IMAGE_ID;
diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h
index 758b7e212..01f5ac588 100644
--- a/src/video_core/texture_cache/texture_cache_base.h
+++ b/src/video_core/texture_cache/texture_cache_base.h
@@ -179,6 +179,8 @@ public:
/// Download contents of host images to guest memory in a region
void DownloadMemory(VAddr cpu_addr, size_t size);
+ std::optional GetFlushArea(VAddr cpu_addr, u64 size);
+
/// Remove images in a region
void UnmapMemory(VAddr cpu_addr, size_t size);
--
cgit v1.2.3
From 0f4f18265f4f45ae1314b0fef10c4bdf9b870b8b Mon Sep 17 00:00:00 2001
From: Fernando Sahmkow
Date: Mon, 1 May 2023 17:34:03 +0200
Subject: Texture cache: sync the first flush.
---
src/video_core/texture_cache/image_info.h | 1 +
src/video_core/texture_cache/texture_cache.h | 32 +++++++++++++++++++++++++---
2 files changed, 30 insertions(+), 3 deletions(-)
(limited to 'src/video_core')
diff --git a/src/video_core/texture_cache/image_info.h b/src/video_core/texture_cache/image_info.h
index cfb85a3dc..8a4cb0cbd 100644
--- a/src/video_core/texture_cache/image_info.h
+++ b/src/video_core/texture_cache/image_info.h
@@ -40,6 +40,7 @@ struct ImageInfo {
bool rescaleable = false;
bool downscaleable = false;
bool forced_flushed = false;
+ bool dma_downloaded = false;
};
} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index fb8ffc002..762e8a52f 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -709,18 +709,43 @@ void TextureCache::CommitAsyncFlushes() {
download_info.async_buffer_id = last_async_buffer_id;
}
}
+
if (any_none_dma) {
+ bool all_pre_sync = true;
auto download_map = runtime.DownloadStagingBuffer(total_size_bytes, true);
for (const PendingDownload& download_info : download_ids) {
if (download_info.is_swizzle) {
Image& image = slot_images[download_info.object_id];
+ all_pre_sync &= image.info.dma_downloaded;
+ image.info.dma_downloaded = true;
const auto copies = FullDownloadCopies(image.info);
image.DownloadMemory(download_map, copies);
download_map.offset += Common::AlignUp(image.unswizzled_size_bytes, 64);
}
}
- uncommitted_async_buffers.emplace_back(download_map);
+ if (!all_pre_sync) {
+ runtime.Finish();
+ auto it = download_ids.begin();
+ while (it != download_ids.end()) {
+ const PendingDownload& download_info = *it;
+ if (download_info.is_swizzle) {
+ const ImageBase& image = slot_images[download_info.object_id];
+ const auto copies = FullDownloadCopies(image.info);
+ download_map.offset -= Common::AlignUp(image.unswizzled_size_bytes, 64);
+ std::span download_span =
+ download_map.mapped_span.subspan(download_map.offset);
+ SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, download_span,
+ swizzle_data_buffer);
+ it = download_ids.erase(it);
+ } else {
+ it++;
+ }
+ }
+ } else {
+ uncommitted_async_buffers.emplace_back(download_map);
+ }
}
+
async_buffers.emplace_back(std::move(uncommitted_async_buffers));
uncommitted_async_buffers.clear();
}
@@ -820,8 +845,9 @@ ImageId TextureCache::DmaImageId(const Tegra::DMA::ImageOperand& operand) {
// No need to waste time on an image that's synced with guest
return NULL_IMAGE_ID;
}
- if (!image.info.forced_flushed) {
- image.info.forced_flushed = true;
+ if (!image.info.dma_downloaded) {
+ // Force a full sync.
+ image.info.dma_downloaded = true;
return NULL_IMAGE_ID;
}
const auto base = image.TryFindBase(operand.address);
--
cgit v1.2.3
From 92da86290c5ea657ae918bfe36071bdf7ac15075 Mon Sep 17 00:00:00 2001
From: Fernando Sahmkow
Date: Thu, 4 May 2023 02:34:49 +0200
Subject: Settings: add option to enable / disable reactive flushing
---
src/video_core/buffer_cache/buffer_cache.h | 9 ++++++---
src/video_core/texture_cache/image_view_base.cpp | 3 ++-
2 files changed, 8 insertions(+), 4 deletions(-)
(limited to 'src/video_core')
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 479a1a508..474822354 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -126,7 +126,8 @@ std::optional BufferCache::GetFlushArea(VA
area->preemtive = true;
return area;
};
- memory_tracker.MarkRegionAsPreflushable(cpu_addr_start_aligned, cpu_addr_end_aligned - cpu_addr_start_aligned);
+ memory_tracker.MarkRegionAsPreflushable(cpu_addr_start_aligned,
+ cpu_addr_end_aligned - cpu_addr_start_aligned);
area->preemtive = !IsRegionGpuModified(cpu_addr, size);
return area;
}
@@ -206,7 +207,8 @@ bool BufferCache
::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
const VAddr new_base_address = *cpu_dest_address + diff;
const IntervalType add_interval{new_base_address, new_base_address + size};
tmp_intervals.push_back(add_interval);
- if (memory_tracker.IsRegionPreflushable(new_base_address, new_base_address + size)) {
+ if (!Settings::values.use_reactive_flushing.GetValue() ||
+ memory_tracker.IsRegionPreflushable(new_base_address, new_base_address + size)) {
uncommitted_ranges.add(add_interval);
pending_ranges.add(add_interval);
}
@@ -1236,7 +1238,8 @@ void BufferCache
::MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 s
const IntervalType base_interval{cpu_addr, cpu_addr + size};
common_ranges.add(base_interval);
- if (!memory_tracker.IsRegionPreflushable(cpu_addr, cpu_addr + size)) {
+ if (Settings::values.use_reactive_flushing.GetValue() &&
+ !memory_tracker.IsRegionPreflushable(cpu_addr, cpu_addr + size)) {
return;
}
uncommitted_ranges.add(base_interval);
diff --git a/src/video_core/texture_cache/image_view_base.cpp b/src/video_core/texture_cache/image_view_base.cpp
index 8f28342d5..30a7c11f5 100644
--- a/src/video_core/texture_cache/image_view_base.cpp
+++ b/src/video_core/texture_cache/image_view_base.cpp
@@ -26,7 +26,8 @@ ImageViewBase::ImageViewBase(const ImageViewInfo& info, const ImageInfo& image_i
ASSERT_MSG(VideoCore::Surface::IsViewCompatible(image_info.format, info.format, false, true),
"Image view format {} is incompatible with image format {}", info.format,
image_info.format);
- if (image_info.forced_flushed) {
+ const bool preemptive = !Settings::values.use_reactive_flushing.GetValue() && image_info.type == ImageType::Linear;
+ if (image_info.forced_flushed || preemptive) {
flags |= ImageViewFlagBits::PreemtiveDownload;
}
if (image_info.type == ImageType::e3D && info.type != ImageViewType::e3D) {
--
cgit v1.2.3
From ab0c0a469c29900314847520f30d6edb914fa028 Mon Sep 17 00:00:00 2001
From: Fernando Sahmkow
Date: Thu, 4 May 2023 02:36:18 +0200
Subject: Query cache: stop updating pages as it's not affected by cpu writes
---
src/video_core/query_cache.h | 2 --
1 file changed, 2 deletions(-)
(limited to 'src/video_core')
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h
index 941de95c1..1528cc1dd 100644
--- a/src/video_core/query_cache.h
+++ b/src/video_core/query_cache.h
@@ -255,7 +255,6 @@ private:
if (!in_range(query)) {
continue;
}
- rasterizer.UpdatePagesCachedCount(query.GetCpuAddr(), query.SizeInBytes(), -1);
AsyncJobId async_job_id = query.GetAsyncJob();
auto flush_result = query.Flush(async);
if (async_job_id == NULL_ASYNC_JOB_ID) {
@@ -273,7 +272,6 @@ private:
/// Registers the passed parameters as cached and returns a pointer to the stored cached query.
CachedQuery* Register(VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr, bool timestamp) {
- rasterizer.UpdatePagesCachedCount(cpu_addr, CachedQuery::SizeInBytes(timestamp), 1);
const u64 page = static_cast(cpu_addr) >> YUZU_PAGEBITS;
return &cached_queries[page].emplace_back(static_cast(*this), type, cpu_addr,
host_ptr);
--
cgit v1.2.3
From 6f90dff2938b5bd5e9311e924e8a29945f16ac18 Mon Sep 17 00:00:00 2001
From: Fernando Sahmkow
Date: Thu, 4 May 2023 03:16:57 +0200
Subject: Address feedback, add CR notice, etc
---
src/video_core/fence_manager.h | 7 ++++++-
src/video_core/rasterizer_download_area.h | 3 +++
src/video_core/renderer_vulkan/vk_rasterizer.cpp | 4 ++--
src/video_core/texture_cache/image_view_base.cpp | 3 ++-
4 files changed, 13 insertions(+), 4 deletions(-)
(limited to 'src/video_core')
diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h
index 850d6f27d..35d699bbf 100644
--- a/src/video_core/fence_manager.h
+++ b/src/video_core/fence_manager.h
@@ -55,7 +55,12 @@ public:
// Unlike other fences, this one doesn't
void SignalOrdering() {
- std::function do_nothing([]{});
+ std::scoped_lock lock{buffer_cache.mutex};
+ buffer_cache.AccumulateFlushes();
+ }
+
+ void SignalReference() {
+ std::function do_nothing([] {});
SignalFence(std::move(do_nothing));
}
diff --git a/src/video_core/rasterizer_download_area.h b/src/video_core/rasterizer_download_area.h
index 771ce903d..2d7425c79 100644
--- a/src/video_core/rasterizer_download_area.h
+++ b/src/video_core/rasterizer_download_area.h
@@ -1,3 +1,6 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
#pragma once
#include "common/common_types.h"
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index bae4aa611..2beb0efea 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -621,7 +621,7 @@ void RasterizerVulkan::SignalSyncPoint(u32 value) {
}
void RasterizerVulkan::SignalReference() {
- fence_manager.SignalOrdering();
+ fence_manager.SignalReference();
}
void RasterizerVulkan::ReleaseFences() {
@@ -654,7 +654,7 @@ void RasterizerVulkan::WaitForIdle() {
cmdbuf.SetEvent(event, flags);
cmdbuf.WaitEvents(event, flags, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, {}, {}, {});
});
- SignalReference();
+ fence_manager.SignalOrdering();
}
void RasterizerVulkan::FragmentBarrier() {
diff --git a/src/video_core/texture_cache/image_view_base.cpp b/src/video_core/texture_cache/image_view_base.cpp
index 30a7c11f5..c3b2b196d 100644
--- a/src/video_core/texture_cache/image_view_base.cpp
+++ b/src/video_core/texture_cache/image_view_base.cpp
@@ -26,7 +26,8 @@ ImageViewBase::ImageViewBase(const ImageViewInfo& info, const ImageInfo& image_i
ASSERT_MSG(VideoCore::Surface::IsViewCompatible(image_info.format, info.format, false, true),
"Image view format {} is incompatible with image format {}", info.format,
image_info.format);
- const bool preemptive = !Settings::values.use_reactive_flushing.GetValue() && image_info.type == ImageType::Linear;
+ const bool preemptive =
+ !Settings::values.use_reactive_flushing.GetValue() && image_info.type == ImageType::Linear;
if (image_info.forced_flushed || preemptive) {
flags |= ImageViewFlagBits::PreemtiveDownload;
}
--
cgit v1.2.3
From 36c302fa32b475abb1b211934eab14fe0cad936a Mon Sep 17 00:00:00 2001
From: Fernando Sahmkow
Date: Thu, 4 May 2023 13:23:36 +0200
Subject: Buffer cache: always use async buffer downloads and fix regression.
---
src/video_core/buffer_cache/buffer_cache.h | 114 ++++++++++++------------
src/video_core/buffer_cache/buffer_cache_base.h | 2 -
src/video_core/buffer_cache/word_manager.h | 13 +++
3 files changed, 68 insertions(+), 61 deletions(-)
(limited to 'src/video_core')
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 474822354..0b15944d6 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -23,8 +23,6 @@ BufferCache::BufferCache(VideoCore::RasterizerInterface& rasterizer_,
common_ranges.clear();
inline_buffer_id = NULL_BUFFER_ID;
- active_async_buffers = !Settings::IsGPULevelHigh();
-
if (!runtime.CanReportMemoryUsage()) {
minimum_memory = DEFAULT_EXPECTED_MEMORY;
critical_memory = DEFAULT_CRITICAL_MEMORY;
@@ -75,8 +73,6 @@ void BufferCache
::TickFrame() {
uniform_cache_hits[0] = 0;
uniform_cache_shots[0] = 0;
- active_async_buffers = !Settings::IsGPULevelHigh();
-
const bool skip_preferred = hits * 256 < shots * 251;
uniform_buffer_skip_cache_size = skip_preferred ? DEFAULT_SKIP_CACHE_SIZE : 0;
@@ -491,9 +487,8 @@ void BufferCache
::CommitAsyncFlushesHigh() {
if (committed_ranges.empty()) {
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
- if (active_async_buffers) {
- async_buffers.emplace_back(std::optional{});
- }
+
+ async_buffers.emplace_back(std::optional{});
}
return;
}
@@ -554,64 +549,65 @@ void BufferCache::CommitAsyncFlushesHigh() {
committed_ranges.clear();
if (downloads.empty()) {
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
- if (active_async_buffers) {
- async_buffers.emplace_back(std::optional{});
- }
+
+ async_buffers.emplace_back(std::optional{});
}
return;
}
- if (active_async_buffers) {
- if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
- auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes, true);
- boost::container::small_vector normalized_copies;
- IntervalSet new_async_range{};
- runtime.PreCopyBarrier();
- for (auto& [copy, buffer_id] : downloads) {
- copy.dst_offset += download_staging.offset;
- const std::array copies{copy};
- BufferCopy second_copy{copy};
- Buffer& buffer = slot_buffers[buffer_id];
- second_copy.src_offset = static_cast(buffer.CpuAddr()) + copy.src_offset;
- VAddr orig_cpu_addr = static_cast(second_copy.src_offset);
- const IntervalType base_interval{orig_cpu_addr, orig_cpu_addr + copy.size};
- async_downloads += std::make_pair(base_interval, 1);
- runtime.CopyBuffer(download_staging.buffer, buffer, copies, false);
- normalized_copies.push_back(second_copy);
- }
- runtime.PostCopyBarrier();
- pending_downloads.emplace_back(std::move(normalized_copies));
- async_buffers.emplace_back(download_staging);
- } else {
- committed_ranges.clear();
- uncommitted_ranges.clear();
+ if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
+ auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes, true);
+ boost::container::small_vector normalized_copies;
+ IntervalSet new_async_range{};
+ runtime.PreCopyBarrier();
+ for (auto& [copy, buffer_id] : downloads) {
+ copy.dst_offset += download_staging.offset;
+ const std::array copies{copy};
+ BufferCopy second_copy{copy};
+ Buffer& buffer = slot_buffers[buffer_id];
+ second_copy.src_offset = static_cast(buffer.CpuAddr()) + copy.src_offset;
+ VAddr orig_cpu_addr = static_cast(second_copy.src_offset);
+ const IntervalType base_interval{orig_cpu_addr, orig_cpu_addr + copy.size};
+ async_downloads += std::make_pair(base_interval, 1);
+ runtime.CopyBuffer(download_staging.buffer, buffer, copies, false);
+ normalized_copies.push_back(second_copy);
}
+ runtime.PostCopyBarrier();
+ pending_downloads.emplace_back(std::move(normalized_copies));
+ async_buffers.emplace_back(download_staging);
} else {
- if constexpr (USE_MEMORY_MAPS) {
- auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes);
- runtime.PreCopyBarrier();
- for (auto& [copy, buffer_id] : downloads) {
- // Have in mind the staging buffer offset for the copy
- copy.dst_offset += download_staging.offset;
- const std::array copies{copy};
- runtime.CopyBuffer(download_staging.buffer, slot_buffers[buffer_id], copies, false);
- }
- runtime.PostCopyBarrier();
- runtime.Finish();
- for (const auto& [copy, buffer_id] : downloads) {
- const Buffer& buffer = slot_buffers[buffer_id];
- const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
- // Undo the modified offset
- const u64 dst_offset = copy.dst_offset - download_staging.offset;
- const u8* read_mapped_memory = download_staging.mapped_span.data() + dst_offset;
- cpu_memory.WriteBlockUnsafe(cpu_addr, read_mapped_memory, copy.size);
- }
+ if (!Settings::IsGPULevelHigh()) {
+ committed_ranges.clear();
+ uncommitted_ranges.clear();
} else {
- const std::span immediate_buffer = ImmediateBuffer(largest_copy);
- for (const auto& [copy, buffer_id] : downloads) {
- Buffer& buffer = slot_buffers[buffer_id];
- buffer.ImmediateDownload(copy.src_offset, immediate_buffer.subspan(0, copy.size));
- const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
- cpu_memory.WriteBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size);
+ if constexpr (USE_MEMORY_MAPS) {
+ auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes);
+ runtime.PreCopyBarrier();
+ for (auto& [copy, buffer_id] : downloads) {
+ // Have in mind the staging buffer offset for the copy
+ copy.dst_offset += download_staging.offset;
+ const std::array copies{copy};
+ runtime.CopyBuffer(download_staging.buffer, slot_buffers[buffer_id], copies,
+ false);
+ }
+ runtime.PostCopyBarrier();
+ runtime.Finish();
+ for (const auto& [copy, buffer_id] : downloads) {
+ const Buffer& buffer = slot_buffers[buffer_id];
+ const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
+ // Undo the modified offset
+ const u64 dst_offset = copy.dst_offset - download_staging.offset;
+ const u8* read_mapped_memory = download_staging.mapped_span.data() + dst_offset;
+ cpu_memory.WriteBlockUnsafe(cpu_addr, read_mapped_memory, copy.size);
+ }
+ } else {
+ const std::span immediate_buffer = ImmediateBuffer(largest_copy);
+ for (const auto& [copy, buffer_id] : downloads) {
+ Buffer& buffer = slot_buffers[buffer_id];
+ buffer.ImmediateDownload(copy.src_offset,
+ immediate_buffer.subspan(0, copy.size));
+ const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
+ cpu_memory.WriteBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size);
+ }
}
}
}
diff --git a/src/video_core/buffer_cache/buffer_cache_base.h b/src/video_core/buffer_cache/buffer_cache_base.h
index e3914a53a..0445ec47f 100644
--- a/src/video_core/buffer_cache/buffer_cache_base.h
+++ b/src/video_core/buffer_cache/buffer_cache_base.h
@@ -572,8 +572,6 @@ private:
u64 critical_memory = 0;
BufferId inline_buffer_id;
- bool active_async_buffers = false;
-
std::array> CACHING_PAGEBITS)> page_table;
std::vector tmp_buffer;
};
diff --git a/src/video_core/buffer_cache/word_manager.h b/src/video_core/buffer_cache/word_manager.h
index 0fb199a54..a336bde41 100644
--- a/src/video_core/buffer_cache/word_manager.h
+++ b/src/video_core/buffer_cache/word_manager.h
@@ -302,6 +302,9 @@ public:
(pending_pointer - pending_offset) * BYTES_PER_PAGE);
};
IterateWords(offset, size, [&](size_t index, u64 mask) {
+ if constexpr (type == Type::GPU) {
+ mask &= ~untracked_words[index];
+ }
const u64 word = state_words[index] & mask;
if constexpr (clear) {
if constexpr (type == Type::CPU || type == Type::CachedCPU) {
@@ -350,8 +353,13 @@ public:
static_assert(type != Type::Untracked);
const std::span state_words = words.template Span();
+ [[maybe_unused]] const std::span untracked_words =
+ words.template Span();
bool result = false;
IterateWords(offset, size, [&](size_t index, u64 mask) {
+ if constexpr (type == Type::GPU) {
+ mask &= ~untracked_words[index];
+ }
const u64 word = state_words[index] & mask;
if (word != 0) {
result = true;
@@ -372,9 +380,14 @@ public:
[[nodiscard]] std::pair ModifiedRegion(u64 offset, u64 size) const noexcept {
static_assert(type != Type::Untracked);
const std::span state_words = words.template Span();
+ [[maybe_unused]] const std::span untracked_words =
+ words.template Span();
u64 begin = std::numeric_limits::max();
u64 end = 0;
IterateWords(offset, size, [&](size_t index, u64 mask) {
+ if constexpr (type == Type::GPU) {
+ mask &= ~untracked_words[index];
+ }
const u64 word = state_words[index] & mask;
if (word == 0) {
return;
--
cgit v1.2.3
From 016c6feb49255792e4093be381e20509eb94e6d1 Mon Sep 17 00:00:00 2001
From: Fernando Sahmkow
Date: Thu, 4 May 2023 17:44:49 +0200
Subject: Texture cache: reverse inmediate flush changes
---
src/video_core/texture_cache/image_info.cpp | 12 ++++++++++++
src/video_core/texture_cache/image_view_base.cpp | 5 +----
src/video_core/texture_cache/texture_cache.h | 25 +-----------------------
3 files changed, 14 insertions(+), 28 deletions(-)
(limited to 'src/video_core')
diff --git a/src/video_core/texture_cache/image_info.cpp b/src/video_core/texture_cache/image_info.cpp
index 11f3f78a1..e8ddde691 100644
--- a/src/video_core/texture_cache/image_info.cpp
+++ b/src/video_core/texture_cache/image_info.cpp
@@ -4,6 +4,7 @@
#include
#include "common/assert.h"
+#include "common/settings.h"
#include "video_core/surface.h"
#include "video_core/texture_cache/format_lookup_table.h"
#include "video_core/texture_cache/image_info.h"
@@ -22,6 +23,8 @@ using VideoCore::Surface::PixelFormat;
using VideoCore::Surface::SurfaceType;
ImageInfo::ImageInfo(const TICEntry& config) noexcept {
+ forced_flushed = config.IsPitchLinear() && !Settings::values.use_reactive_flushing.GetValue();
+ dma_downloaded = forced_flushed;
format = PixelFormatFromTextureInfo(config.format, config.r_type, config.g_type, config.b_type,
config.a_type, config.srgb_conversion);
num_samples = NumSamples(config.msaa_mode);
@@ -117,6 +120,9 @@ ImageInfo::ImageInfo(const TICEntry& config) noexcept {
ImageInfo::ImageInfo(const Maxwell3D::Regs::RenderTargetConfig& ct,
Tegra::Texture::MsaaMode msaa_mode) noexcept {
+ forced_flushed =
+ ct.tile_mode.is_pitch_linear && !Settings::values.use_reactive_flushing.GetValue();
+ dma_downloaded = forced_flushed;
format = VideoCore::Surface::PixelFormatFromRenderTargetFormat(ct.format);
rescaleable = false;
if (ct.tile_mode.is_pitch_linear) {
@@ -155,6 +161,9 @@ ImageInfo::ImageInfo(const Maxwell3D::Regs::RenderTargetConfig& ct,
ImageInfo::ImageInfo(const Maxwell3D::Regs::Zeta& zt, const Maxwell3D::Regs::ZetaSize& zt_size,
Tegra::Texture::MsaaMode msaa_mode) noexcept {
+ forced_flushed =
+ zt.tile_mode.is_pitch_linear && !Settings::values.use_reactive_flushing.GetValue();
+ dma_downloaded = forced_flushed;
format = VideoCore::Surface::PixelFormatFromDepthFormat(zt.format);
size.width = zt_size.width;
size.height = zt_size.height;
@@ -195,6 +204,9 @@ ImageInfo::ImageInfo(const Maxwell3D::Regs::Zeta& zt, const Maxwell3D::Regs::Zet
ImageInfo::ImageInfo(const Fermi2D::Surface& config) noexcept {
UNIMPLEMENTED_IF_MSG(config.layer != 0, "Surface layer is not zero");
+ forced_flushed = config.linear == Fermi2D::MemoryLayout::Pitch &&
+ !Settings::values.use_reactive_flushing.GetValue();
+ dma_downloaded = forced_flushed;
format = VideoCore::Surface::PixelFormatFromRenderTargetFormat(config.format);
rescaleable = false;
if (config.linear == Fermi2D::MemoryLayout::Pitch) {
diff --git a/src/video_core/texture_cache/image_view_base.cpp b/src/video_core/texture_cache/image_view_base.cpp
index c3b2b196d..d134b6738 100644
--- a/src/video_core/texture_cache/image_view_base.cpp
+++ b/src/video_core/texture_cache/image_view_base.cpp
@@ -4,7 +4,6 @@
#include
#include "common/assert.h"
-#include "common/settings.h"
#include "video_core/compatible_formats.h"
#include "video_core/surface.h"
#include "video_core/texture_cache/formatter.h"
@@ -26,9 +25,7 @@ ImageViewBase::ImageViewBase(const ImageViewInfo& info, const ImageInfo& image_i
ASSERT_MSG(VideoCore::Surface::IsViewCompatible(image_info.format, info.format, false, true),
"Image view format {} is incompatible with image format {}", info.format,
image_info.format);
- const bool preemptive =
- !Settings::values.use_reactive_flushing.GetValue() && image_info.type == ImageType::Linear;
- if (image_info.forced_flushed || preemptive) {
+ if (image_info.forced_flushed) {
flags |= ImageViewFlagBits::PreemtiveDownload;
}
if (image_info.type == ImageType::e3D && info.type != ImageViewType::e3D) {
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 762e8a52f..29ac01eb4 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -711,39 +711,16 @@ void TextureCache::CommitAsyncFlushes() {
}
if (any_none_dma) {
- bool all_pre_sync = true;
auto download_map = runtime.DownloadStagingBuffer(total_size_bytes, true);
for (const PendingDownload& download_info : download_ids) {
if (download_info.is_swizzle) {
Image& image = slot_images[download_info.object_id];
- all_pre_sync &= image.info.dma_downloaded;
- image.info.dma_downloaded = true;
const auto copies = FullDownloadCopies(image.info);
image.DownloadMemory(download_map, copies);
download_map.offset += Common::AlignUp(image.unswizzled_size_bytes, 64);
}
}
- if (!all_pre_sync) {
- runtime.Finish();
- auto it = download_ids.begin();
- while (it != download_ids.end()) {
- const PendingDownload& download_info = *it;
- if (download_info.is_swizzle) {
- const ImageBase& image = slot_images[download_info.object_id];
- const auto copies = FullDownloadCopies(image.info);
- download_map.offset -= Common::AlignUp(image.unswizzled_size_bytes, 64);
- std::span download_span =
- download_map.mapped_span.subspan(download_map.offset);
- SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, download_span,
- swizzle_data_buffer);
- it = download_ids.erase(it);
- } else {
- it++;
- }
- }
- } else {
- uncommitted_async_buffers.emplace_back(download_map);
- }
+ uncommitted_async_buffers.emplace_back(download_map);
}
async_buffers.emplace_back(std::move(uncommitted_async_buffers));
--
cgit v1.2.3
From 2df19ef0fd5a91ca87e2c2cf201166a40c9d44dc Mon Sep 17 00:00:00 2001
From: Fernando Sahmkow
Date: Sun, 7 May 2023 23:25:34 +0200
Subject: Buffer Cache: disable reactive flushing in it.
---
src/video_core/buffer_cache/buffer_cache.h | 11 ++---------
src/video_core/renderer_vulkan/vk_rasterizer.cpp | 7 -------
src/video_core/texture_cache/texture_cache.h | 8 ++++++--
3 files changed, 8 insertions(+), 18 deletions(-)
(limited to 'src/video_core')
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 0b15944d6..6624919a4 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -203,11 +203,8 @@ bool BufferCache::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
const VAddr new_base_address = *cpu_dest_address + diff;
const IntervalType add_interval{new_base_address, new_base_address + size};
tmp_intervals.push_back(add_interval);
- if (!Settings::values.use_reactive_flushing.GetValue() ||
- memory_tracker.IsRegionPreflushable(new_base_address, new_base_address + size)) {
- uncommitted_ranges.add(add_interval);
- pending_ranges.add(add_interval);
- }
+ uncommitted_ranges.add(add_interval);
+ pending_ranges.add(add_interval);
};
ForEachInRangeSet(common_ranges, *cpu_src_address, amount, mirror);
// This subtraction in this order is important for overlapping copies.
@@ -1234,10 +1231,6 @@ void BufferCache
::MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 s
const IntervalType base_interval{cpu_addr, cpu_addr + size};
common_ranges.add(base_interval);
- if (Settings::values.use_reactive_flushing.GetValue() &&
- !memory_tracker.IsRegionPreflushable(cpu_addr, cpu_addr + size)) {
- return;
- }
uncommitted_ranges.add(base_interval);
pending_ranges.add(base_interval);
}
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 2beb0efea..0bcc20544 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -510,13 +510,6 @@ VideoCore::RasterizerDownloadArea RasterizerVulkan::GetFlushArea(VAddr addr, u64
return *area;
}
}
- {
- std::scoped_lock lock{buffer_cache.mutex};
- auto area = buffer_cache.GetFlushArea(addr, size);
- if (area) {
- return *area;
- }
- }
VideoCore::RasterizerDownloadArea new_area{
.start_address = Common::AlignDown(addr, Core::Memory::YUZU_PAGESIZE),
.end_address = Common::AlignUp(addr + size, Core::Memory::YUZU_PAGESIZE),
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 29ac01eb4..d49f3a7a0 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -1323,7 +1323,6 @@ ImageId TextureCache
::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
all_siblings.push_back(overlap_id);
} else {
bad_overlap_ids.push_back(overlap_id);
- overlap.flags |= ImageFlagBits::BadOverlap;
}
};
ForEachImageInRegion(cpu_addr, size_bytes, region_check);
@@ -1434,7 +1433,12 @@ ImageId TextureCache
::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
ImageBase& aliased = slot_images[aliased_id];
aliased.overlapping_images.push_back(new_image_id);
new_image.overlapping_images.push_back(aliased_id);
- new_image.flags |= ImageFlagBits::BadOverlap;
+ if (aliased.info.resources.levels == 1 && aliased.overlapping_images.size() > 1) {
+ aliased.flags |= ImageFlagBits::BadOverlap;
+ }
+ if (new_image.info.resources.levels == 1 && new_image.overlapping_images.size() > 1) {
+ new_image.flags |= ImageFlagBits::BadOverlap;
+ }
}
RegisterImage(new_image_id);
return new_image_id;
--
cgit v1.2.3
From 8014dd82594dbb40e13749203e67b21e8447733c Mon Sep 17 00:00:00 2001
From: Fernando Sahmkow
Date: Sun, 7 May 2023 23:34:52 +0200
Subject: Texture cache: Only force flush the dma downloads
---
src/video_core/engines/maxwell_dma.cpp | 2 +-
src/video_core/renderer_opengl/gl_rasterizer.cpp | 2 +-
src/video_core/renderer_vulkan/vk_rasterizer.cpp | 2 +-
src/video_core/texture_cache/texture_cache.h | 4 ++--
src/video_core/texture_cache/texture_cache_base.h | 2 +-
5 files changed, 6 insertions(+), 6 deletions(-)
(limited to 'src/video_core')
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index f7400aac8..ebe5536de 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -288,7 +288,7 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
write_buffer.resize_destructive(dst_size);
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
- memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
+ memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
// If the input is linear and the output is tiled, swizzle the input and copy it over.
SwizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 3f07fe8bb..f5baa0f3c 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -1304,7 +1304,7 @@ bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
const Tegra::DMA::BufferOperand& buffer_operand,
const Tegra::DMA::ImageOperand& image_operand) {
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
- const auto image_id = texture_cache.DmaImageId(image_operand);
+ const auto image_id = texture_cache.DmaImageId(image_operand, IS_IMAGE_UPLOAD);
if (image_id == VideoCommon::NULL_IMAGE_ID) {
return false;
}
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 0bcc20544..628e1376f 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -793,7 +793,7 @@ bool AccelerateDMA::DmaBufferImageCopy(const Tegra::DMA::ImageCopy& copy_info,
const Tegra::DMA::BufferOperand& buffer_operand,
const Tegra::DMA::ImageOperand& image_operand) {
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
- const auto image_id = texture_cache.DmaImageId(image_operand);
+ const auto image_id = texture_cache.DmaImageId(image_operand, IS_IMAGE_UPLOAD);
if (image_id == VideoCommon::NULL_IMAGE_ID) {
return false;
}
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index d49f3a7a0..e1198dcf8 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -811,7 +811,7 @@ void TextureCache
::PopAsyncFlushes() {
}
template
-ImageId TextureCache::DmaImageId(const Tegra::DMA::ImageOperand& operand) {
+ImageId TextureCache
::DmaImageId(const Tegra::DMA::ImageOperand& operand, bool is_upload) {
const ImageInfo dst_info(operand);
const ImageId dst_id = FindDMAImage(dst_info, operand.address);
if (!dst_id) {
@@ -822,7 +822,7 @@ ImageId TextureCache
::DmaImageId(const Tegra::DMA::ImageOperand& operand) {
// No need to waste time on an image that's synced with guest
return NULL_IMAGE_ID;
}
- if (!image.info.dma_downloaded) {
+ if (!is_upload && !image.info.dma_downloaded) {
// Force a full sync.
image.info.dma_downloaded = true;
return NULL_IMAGE_ID;
diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h
index 01f5ac588..0720494e5 100644
--- a/src/video_core/texture_cache/texture_cache_base.h
+++ b/src/video_core/texture_cache/texture_cache_base.h
@@ -207,7 +207,7 @@ public:
/// Pop asynchronous downloads
void PopAsyncFlushes();
- [[nodiscard]] ImageId DmaImageId(const Tegra::DMA::ImageOperand& operand);
+ [[nodiscard]] ImageId DmaImageId(const Tegra::DMA::ImageOperand& operand, bool is_upload);
[[nodiscard]] std::pair DmaBufferImageCopy(
const Tegra::DMA::ImageCopy& copy_info, const Tegra::DMA::BufferOperand& buffer_operand,
--
cgit v1.2.3