From c6cac2ffaad4ac27f35cea25022d9c59c7ecfbf4 Mon Sep 17 00:00:00 2001
From: Fernando Sahmkow
Date: Sun, 30 Apr 2023 17:14:06 +0200
Subject: GPU: Add Reactive flushing
---
src/video_core/buffer_cache/buffer_base.h | 9 ++++++
src/video_core/buffer_cache/buffer_cache.h | 35 +++++++++++++++++------
src/video_core/buffer_cache/buffer_cache_base.h | 5 ++--
src/video_core/buffer_cache/memory_tracker_base.h | 26 +++++++++++++++++
src/video_core/buffer_cache/word_manager.h | 14 +++++++--
5 files changed, 76 insertions(+), 13 deletions(-)
(limited to 'src/video_core/buffer_cache')
diff --git a/src/video_core/buffer_cache/buffer_base.h b/src/video_core/buffer_cache/buffer_base.h
index 9cbd95c4b..0bb3bf8ae 100644
--- a/src/video_core/buffer_cache/buffer_base.h
+++ b/src/video_core/buffer_cache/buffer_base.h
@@ -18,6 +18,7 @@ namespace VideoCommon {
enum class BufferFlagBits {
Picked = 1 << 0,
CachedWrites = 1 << 1,
+ PreemtiveDownload = 1 << 2,
};
DECLARE_ENUM_FLAG_OPERATORS(BufferFlagBits)
@@ -54,6 +55,10 @@ public:
flags |= BufferFlagBits::Picked;
}
+ void MarkPreemtiveDownload() noexcept {
+ flags |= BufferFlagBits::PreemtiveDownload;
+ }
+
/// Unmark buffer as picked
void Unpick() noexcept {
flags &= ~BufferFlagBits::Picked;
@@ -84,6 +89,10 @@ public:
return True(flags & BufferFlagBits::CachedWrites);
}
+ bool IsPreemtiveDownload() const noexcept {
+ return True(flags & BufferFlagBits::PreemtiveDownload);
+ }
+
/// Returns the base CPU address of the buffer
[[nodiscard]] VAddr CpuAddr() const noexcept {
return cpu_addr;
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index e534e1e9c..479a1a508 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -111,9 +111,24 @@ void BufferCache
::WriteMemory(VAddr cpu_addr, u64 size) {
template
void BufferCache::CachedWriteMemory(VAddr cpu_addr, u64 size) {
memory_tracker.CachedCpuWrite(cpu_addr, size);
- const IntervalType add_interval{Common::AlignDown(cpu_addr, YUZU_PAGESIZE),
- Common::AlignUp(cpu_addr + size, YUZU_PAGESIZE)};
- cached_ranges.add(add_interval);
+}
+
+template
+std::optional BufferCache::GetFlushArea(VAddr cpu_addr,
+ u64 size) {
+ std::optional area{};
+ area.emplace();
+ VAddr cpu_addr_start_aligned = Common::AlignDown(cpu_addr, Core::Memory::YUZU_PAGESIZE);
+ VAddr cpu_addr_end_aligned = Common::AlignUp(cpu_addr + size, Core::Memory::YUZU_PAGESIZE);
+ area->start_address = cpu_addr_start_aligned;
+ area->end_address = cpu_addr_end_aligned;
+ if (memory_tracker.IsRegionPreflushable(cpu_addr, size)) {
+ area->preemtive = true;
+ return area;
+ };
+ memory_tracker.MarkRegionAsPreflushable(cpu_addr_start_aligned, cpu_addr_end_aligned - cpu_addr_start_aligned);
+ area->preemtive = !IsRegionGpuModified(cpu_addr, size);
+ return area;
}
template
@@ -191,8 +206,10 @@ bool BufferCache::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
const VAddr new_base_address = *cpu_dest_address + diff;
const IntervalType add_interval{new_base_address, new_base_address + size};
tmp_intervals.push_back(add_interval);
- uncommitted_ranges.add(add_interval);
- pending_ranges.add(add_interval);
+ if (memory_tracker.IsRegionPreflushable(new_base_address, new_base_address + size)) {
+ uncommitted_ranges.add(add_interval);
+ pending_ranges.add(add_interval);
+ }
};
ForEachInRangeSet(common_ranges, *cpu_src_address, amount, mirror);
// This subtraction in this order is important for overlapping copies.
@@ -205,7 +222,7 @@ bool BufferCache
::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
if (has_new_downloads) {
memory_tracker.MarkRegionAsGpuModified(*cpu_dest_address, amount);
}
- std::vector tmp_buffer(amount);
+ tmp_buffer.resize(amount);
cpu_memory.ReadBlockUnsafe(*cpu_src_address, tmp_buffer.data(), amount);
cpu_memory.WriteBlockUnsafe(*cpu_dest_address, tmp_buffer.data(), amount);
return true;
@@ -441,9 +458,7 @@ void BufferCache::BindComputeTextureBuffer(size_t tbo_index, GPUVAddr gpu_add
template
void BufferCache::FlushCachedWrites() {
- cached_write_buffer_ids.clear();
memory_tracker.FlushCachedWrites();
- cached_ranges.clear();
}
template
@@ -1221,6 +1236,9 @@ void BufferCache::MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 s
const IntervalType base_interval{cpu_addr, cpu_addr + size};
common_ranges.add(base_interval);
+ if (!memory_tracker.IsRegionPreflushable(cpu_addr, cpu_addr + size)) {
+ return;
+ }
uncommitted_ranges.add(base_interval);
pending_ranges.add(base_interval);
}
@@ -1629,7 +1647,6 @@ void BufferCache
::DeleteBuffer(BufferId buffer_id, bool do_not_mark) {
replace(transform_feedback_buffers);
replace(compute_uniform_buffers);
replace(compute_storage_buffers);
- std::erase(cached_write_buffer_ids, buffer_id);
// Mark the whole buffer as CPU written to stop tracking CPU writes
if (!do_not_mark) {
diff --git a/src/video_core/buffer_cache/buffer_cache_base.h b/src/video_core/buffer_cache/buffer_cache_base.h
index 656baa550..e3914a53a 100644
--- a/src/video_core/buffer_cache/buffer_cache_base.h
+++ b/src/video_core/buffer_cache/buffer_cache_base.h
@@ -188,6 +188,8 @@ public:
void DownloadMemory(VAddr cpu_addr, u64 size);
+ std::optional GetFlushArea(VAddr cpu_addr, u64 size);
+
bool InlineMemory(VAddr dest_address, size_t copy_size, std::span inlined_buffer);
void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size);
@@ -541,8 +543,6 @@ private:
std::array, NUM_STAGES>, Empty>
uniform_buffer_binding_sizes{};
- std::vector cached_write_buffer_ids;
-
MemoryTracker memory_tracker;
IntervalSet uncommitted_ranges;
IntervalSet common_ranges;
@@ -575,6 +575,7 @@ private:
bool active_async_buffers = false;
std::array> CACHING_PAGEBITS)> page_table;
+ std::vector tmp_buffer;
};
} // namespace VideoCommon
diff --git a/src/video_core/buffer_cache/memory_tracker_base.h b/src/video_core/buffer_cache/memory_tracker_base.h
index dc4ebfcaa..6036b21c9 100644
--- a/src/video_core/buffer_cache/memory_tracker_base.h
+++ b/src/video_core/buffer_cache/memory_tracker_base.h
@@ -66,6 +66,14 @@ public:
});
}
+ /// Returns true if a region has been marked as Preflushable
+ [[nodiscard]] bool IsRegionPreflushable(VAddr query_cpu_addr, u64 query_size) noexcept {
+ return IteratePages(
+ query_cpu_addr, query_size, [](Manager* manager, u64 offset, size_t size) {
+ return manager->template IsRegionModified(offset, size);
+ });
+ }
+
/// Mark region as CPU modified, notifying the rasterizer about this change
void MarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 query_size) {
IteratePages(dirty_cpu_addr, query_size,
@@ -93,6 +101,15 @@ public:
});
}
+ /// Mark region as modified from the host GPU
+ void MarkRegionAsPreflushable(VAddr dirty_cpu_addr, u64 query_size) noexcept {
+ IteratePages(dirty_cpu_addr, query_size,
+ [](Manager* manager, u64 offset, size_t size) {
+ manager->template ChangeRegionState(
+ manager->GetCpuAddr() + offset, size);
+ });
+ }
+
/// Unmark region as modified from the host GPU
void UnmarkRegionAsGpuModified(VAddr dirty_cpu_addr, u64 query_size) noexcept {
IteratePages(dirty_cpu_addr, query_size,
@@ -102,6 +119,15 @@ public:
});
}
+ /// Unmark region as modified from the host GPU
+ void UnmarkRegionAsPreflushable(VAddr dirty_cpu_addr, u64 query_size) noexcept {
+ IteratePages(dirty_cpu_addr, query_size,
+ [](Manager* manager, u64 offset, size_t size) {
+ manager->template ChangeRegionState(
+ manager->GetCpuAddr() + offset, size);
+ });
+ }
+
/// Mark region as modified from the CPU
/// but don't mark it as modified until FlusHCachedWrites is called.
void CachedCpuWrite(VAddr dirty_cpu_addr, u64 query_size) {
diff --git a/src/video_core/buffer_cache/word_manager.h b/src/video_core/buffer_cache/word_manager.h
index a42455045..0fb199a54 100644
--- a/src/video_core/buffer_cache/word_manager.h
+++ b/src/video_core/buffer_cache/word_manager.h
@@ -26,6 +26,7 @@ enum class Type {
GPU,
CachedCPU,
Untracked,
+ Preflushable,
};
/// Vector tracking modified pages tightly packed with small vector optimization
@@ -55,17 +56,20 @@ struct Words {
gpu.stack.fill(0);
cached_cpu.stack.fill(0);
untracked.stack.fill(~u64{0});
+ preflushable.stack.fill(0);
} else {
// Share allocation between CPU and GPU pages and set their default values
- u64* const alloc = new u64[num_words * 4];
+ u64* const alloc = new u64[num_words * 5];
cpu.heap = alloc;
gpu.heap = alloc + num_words;
cached_cpu.heap = alloc + num_words * 2;
untracked.heap = alloc + num_words * 3;
+ preflushable.heap = alloc + num_words * 4;
std::fill_n(cpu.heap, num_words, ~u64{0});
std::fill_n(gpu.heap, num_words, 0);
std::fill_n(cached_cpu.heap, num_words, 0);
std::fill_n(untracked.heap, num_words, ~u64{0});
+ std::fill_n(preflushable.heap, num_words, 0);
}
// Clean up tailing bits
const u64 last_word_size = size_bytes % BYTES_PER_WORD;
@@ -88,13 +92,14 @@ struct Words {
gpu = rhs.gpu;
cached_cpu = rhs.cached_cpu;
untracked = rhs.untracked;
+ preflushable = rhs.preflushable;
rhs.cpu.heap = nullptr;
return *this;
}
Words(Words&& rhs) noexcept
: size_bytes{rhs.size_bytes}, num_words{rhs.num_words}, cpu{rhs.cpu}, gpu{rhs.gpu},
- cached_cpu{rhs.cached_cpu}, untracked{rhs.untracked} {
+ cached_cpu{rhs.cached_cpu}, untracked{rhs.untracked}, preflushable{rhs.preflushable} {
rhs.cpu.heap = nullptr;
}
@@ -129,6 +134,8 @@ struct Words {
return std::span(cached_cpu.Pointer(IsShort()), num_words);
} else if constexpr (type == Type::Untracked) {
return std::span(untracked.Pointer(IsShort()), num_words);
+ } else if constexpr (type == Type::Preflushable) {
+ return std::span(preflushable.Pointer(IsShort()), num_words);
}
}
@@ -142,6 +149,8 @@ struct Words {
return std::span(cached_cpu.Pointer(IsShort()), num_words);
} else if constexpr (type == Type::Untracked) {
return std::span(untracked.Pointer(IsShort()), num_words);
+ } else if constexpr (type == Type::Preflushable) {
+ return std::span(preflushable.Pointer(IsShort()), num_words);
}
}
@@ -151,6 +160,7 @@ struct Words {
WordsArray gpu;
WordsArray cached_cpu;
WordsArray untracked;
+ WordsArray preflushable;
};
template
--
cgit v1.2.3
From 92da86290c5ea657ae918bfe36071bdf7ac15075 Mon Sep 17 00:00:00 2001
From: Fernando Sahmkow
Date: Thu, 4 May 2023 02:34:49 +0200
Subject: Settings: add option to enable / disable reactive flushing
---
src/video_core/buffer_cache/buffer_cache.h | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
(limited to 'src/video_core/buffer_cache')
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 479a1a508..474822354 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -126,7 +126,8 @@ std::optional BufferCache::GetFlushArea(VA
area->preemtive = true;
return area;
};
- memory_tracker.MarkRegionAsPreflushable(cpu_addr_start_aligned, cpu_addr_end_aligned - cpu_addr_start_aligned);
+ memory_tracker.MarkRegionAsPreflushable(cpu_addr_start_aligned,
+ cpu_addr_end_aligned - cpu_addr_start_aligned);
area->preemtive = !IsRegionGpuModified(cpu_addr, size);
return area;
}
@@ -206,7 +207,8 @@ bool BufferCache
::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
const VAddr new_base_address = *cpu_dest_address + diff;
const IntervalType add_interval{new_base_address, new_base_address + size};
tmp_intervals.push_back(add_interval);
- if (memory_tracker.IsRegionPreflushable(new_base_address, new_base_address + size)) {
+ if (!Settings::values.use_reactive_flushing.GetValue() ||
+ memory_tracker.IsRegionPreflushable(new_base_address, new_base_address + size)) {
uncommitted_ranges.add(add_interval);
pending_ranges.add(add_interval);
}
@@ -1236,7 +1238,8 @@ void BufferCache
::MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 s
const IntervalType base_interval{cpu_addr, cpu_addr + size};
common_ranges.add(base_interval);
- if (!memory_tracker.IsRegionPreflushable(cpu_addr, cpu_addr + size)) {
+ if (Settings::values.use_reactive_flushing.GetValue() &&
+ !memory_tracker.IsRegionPreflushable(cpu_addr, cpu_addr + size)) {
return;
}
uncommitted_ranges.add(base_interval);
--
cgit v1.2.3
From 36c302fa32b475abb1b211934eab14fe0cad936a Mon Sep 17 00:00:00 2001
From: Fernando Sahmkow
Date: Thu, 4 May 2023 13:23:36 +0200
Subject: Buffer cache: always use async buffer downloads and fix regression.
---
src/video_core/buffer_cache/buffer_cache.h | 114 ++++++++++++------------
src/video_core/buffer_cache/buffer_cache_base.h | 2 -
src/video_core/buffer_cache/word_manager.h | 13 +++
3 files changed, 68 insertions(+), 61 deletions(-)
(limited to 'src/video_core/buffer_cache')
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 474822354..0b15944d6 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -23,8 +23,6 @@ BufferCache
::BufferCache(VideoCore::RasterizerInterface& rasterizer_,
common_ranges.clear();
inline_buffer_id = NULL_BUFFER_ID;
- active_async_buffers = !Settings::IsGPULevelHigh();
-
if (!runtime.CanReportMemoryUsage()) {
minimum_memory = DEFAULT_EXPECTED_MEMORY;
critical_memory = DEFAULT_CRITICAL_MEMORY;
@@ -75,8 +73,6 @@ void BufferCache
::TickFrame() {
uniform_cache_hits[0] = 0;
uniform_cache_shots[0] = 0;
- active_async_buffers = !Settings::IsGPULevelHigh();
-
const bool skip_preferred = hits * 256 < shots * 251;
uniform_buffer_skip_cache_size = skip_preferred ? DEFAULT_SKIP_CACHE_SIZE : 0;
@@ -491,9 +487,8 @@ void BufferCache
::CommitAsyncFlushesHigh() {
if (committed_ranges.empty()) {
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
- if (active_async_buffers) {
- async_buffers.emplace_back(std::optional{});
- }
+
+ async_buffers.emplace_back(std::optional{});
}
return;
}
@@ -554,64 +549,65 @@ void BufferCache::CommitAsyncFlushesHigh() {
committed_ranges.clear();
if (downloads.empty()) {
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
- if (active_async_buffers) {
- async_buffers.emplace_back(std::optional{});
- }
+
+ async_buffers.emplace_back(std::optional{});
}
return;
}
- if (active_async_buffers) {
- if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
- auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes, true);
- boost::container::small_vector normalized_copies;
- IntervalSet new_async_range{};
- runtime.PreCopyBarrier();
- for (auto& [copy, buffer_id] : downloads) {
- copy.dst_offset += download_staging.offset;
- const std::array copies{copy};
- BufferCopy second_copy{copy};
- Buffer& buffer = slot_buffers[buffer_id];
- second_copy.src_offset = static_cast(buffer.CpuAddr()) + copy.src_offset;
- VAddr orig_cpu_addr = static_cast(second_copy.src_offset);
- const IntervalType base_interval{orig_cpu_addr, orig_cpu_addr + copy.size};
- async_downloads += std::make_pair(base_interval, 1);
- runtime.CopyBuffer(download_staging.buffer, buffer, copies, false);
- normalized_copies.push_back(second_copy);
- }
- runtime.PostCopyBarrier();
- pending_downloads.emplace_back(std::move(normalized_copies));
- async_buffers.emplace_back(download_staging);
- } else {
- committed_ranges.clear();
- uncommitted_ranges.clear();
+ if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
+ auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes, true);
+ boost::container::small_vector normalized_copies;
+ IntervalSet new_async_range{};
+ runtime.PreCopyBarrier();
+ for (auto& [copy, buffer_id] : downloads) {
+ copy.dst_offset += download_staging.offset;
+ const std::array copies{copy};
+ BufferCopy second_copy{copy};
+ Buffer& buffer = slot_buffers[buffer_id];
+ second_copy.src_offset = static_cast(buffer.CpuAddr()) + copy.src_offset;
+ VAddr orig_cpu_addr = static_cast(second_copy.src_offset);
+ const IntervalType base_interval{orig_cpu_addr, orig_cpu_addr + copy.size};
+ async_downloads += std::make_pair(base_interval, 1);
+ runtime.CopyBuffer(download_staging.buffer, buffer, copies, false);
+ normalized_copies.push_back(second_copy);
}
+ runtime.PostCopyBarrier();
+ pending_downloads.emplace_back(std::move(normalized_copies));
+ async_buffers.emplace_back(download_staging);
} else {
- if constexpr (USE_MEMORY_MAPS) {
- auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes);
- runtime.PreCopyBarrier();
- for (auto& [copy, buffer_id] : downloads) {
- // Have in mind the staging buffer offset for the copy
- copy.dst_offset += download_staging.offset;
- const std::array copies{copy};
- runtime.CopyBuffer(download_staging.buffer, slot_buffers[buffer_id], copies, false);
- }
- runtime.PostCopyBarrier();
- runtime.Finish();
- for (const auto& [copy, buffer_id] : downloads) {
- const Buffer& buffer = slot_buffers[buffer_id];
- const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
- // Undo the modified offset
- const u64 dst_offset = copy.dst_offset - download_staging.offset;
- const u8* read_mapped_memory = download_staging.mapped_span.data() + dst_offset;
- cpu_memory.WriteBlockUnsafe(cpu_addr, read_mapped_memory, copy.size);
- }
+ if (!Settings::IsGPULevelHigh()) {
+ committed_ranges.clear();
+ uncommitted_ranges.clear();
} else {
- const std::span immediate_buffer = ImmediateBuffer(largest_copy);
- for (const auto& [copy, buffer_id] : downloads) {
- Buffer& buffer = slot_buffers[buffer_id];
- buffer.ImmediateDownload(copy.src_offset, immediate_buffer.subspan(0, copy.size));
- const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
- cpu_memory.WriteBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size);
+ if constexpr (USE_MEMORY_MAPS) {
+ auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes);
+ runtime.PreCopyBarrier();
+ for (auto& [copy, buffer_id] : downloads) {
+ // Have in mind the staging buffer offset for the copy
+ copy.dst_offset += download_staging.offset;
+ const std::array copies{copy};
+ runtime.CopyBuffer(download_staging.buffer, slot_buffers[buffer_id], copies,
+ false);
+ }
+ runtime.PostCopyBarrier();
+ runtime.Finish();
+ for (const auto& [copy, buffer_id] : downloads) {
+ const Buffer& buffer = slot_buffers[buffer_id];
+ const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
+ // Undo the modified offset
+ const u64 dst_offset = copy.dst_offset - download_staging.offset;
+ const u8* read_mapped_memory = download_staging.mapped_span.data() + dst_offset;
+ cpu_memory.WriteBlockUnsafe(cpu_addr, read_mapped_memory, copy.size);
+ }
+ } else {
+ const std::span immediate_buffer = ImmediateBuffer(largest_copy);
+ for (const auto& [copy, buffer_id] : downloads) {
+ Buffer& buffer = slot_buffers[buffer_id];
+ buffer.ImmediateDownload(copy.src_offset,
+ immediate_buffer.subspan(0, copy.size));
+ const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
+ cpu_memory.WriteBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size);
+ }
}
}
}
diff --git a/src/video_core/buffer_cache/buffer_cache_base.h b/src/video_core/buffer_cache/buffer_cache_base.h
index e3914a53a..0445ec47f 100644
--- a/src/video_core/buffer_cache/buffer_cache_base.h
+++ b/src/video_core/buffer_cache/buffer_cache_base.h
@@ -572,8 +572,6 @@ private:
u64 critical_memory = 0;
BufferId inline_buffer_id;
- bool active_async_buffers = false;
-
std::array> CACHING_PAGEBITS)> page_table;
std::vector tmp_buffer;
};
diff --git a/src/video_core/buffer_cache/word_manager.h b/src/video_core/buffer_cache/word_manager.h
index 0fb199a54..a336bde41 100644
--- a/src/video_core/buffer_cache/word_manager.h
+++ b/src/video_core/buffer_cache/word_manager.h
@@ -302,6 +302,9 @@ public:
(pending_pointer - pending_offset) * BYTES_PER_PAGE);
};
IterateWords(offset, size, [&](size_t index, u64 mask) {
+ if constexpr (type == Type::GPU) {
+ mask &= ~untracked_words[index];
+ }
const u64 word = state_words[index] & mask;
if constexpr (clear) {
if constexpr (type == Type::CPU || type == Type::CachedCPU) {
@@ -350,8 +353,13 @@ public:
static_assert(type != Type::Untracked);
const std::span state_words = words.template Span();
+ [[maybe_unused]] const std::span untracked_words =
+ words.template Span();
bool result = false;
IterateWords(offset, size, [&](size_t index, u64 mask) {
+ if constexpr (type == Type::GPU) {
+ mask &= ~untracked_words[index];
+ }
const u64 word = state_words[index] & mask;
if (word != 0) {
result = true;
@@ -372,9 +380,14 @@ public:
[[nodiscard]] std::pair ModifiedRegion(u64 offset, u64 size) const noexcept {
static_assert(type != Type::Untracked);
const std::span state_words = words.template Span();
+ [[maybe_unused]] const std::span untracked_words =
+ words.template Span();
u64 begin = std::numeric_limits::max();
u64 end = 0;
IterateWords(offset, size, [&](size_t index, u64 mask) {
+ if constexpr (type == Type::GPU) {
+ mask &= ~untracked_words[index];
+ }
const u64 word = state_words[index] & mask;
if (word == 0) {
return;
--
cgit v1.2.3
From 2df19ef0fd5a91ca87e2c2cf201166a40c9d44dc Mon Sep 17 00:00:00 2001
From: Fernando Sahmkow
Date: Sun, 7 May 2023 23:25:34 +0200
Subject: Buffer Cache: disable reactive flushing in it.
---
src/video_core/buffer_cache/buffer_cache.h | 11 ++---------
1 file changed, 2 insertions(+), 9 deletions(-)
(limited to 'src/video_core/buffer_cache')
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 0b15944d6..6624919a4 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -203,11 +203,8 @@ bool BufferCache::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
const VAddr new_base_address = *cpu_dest_address + diff;
const IntervalType add_interval{new_base_address, new_base_address + size};
tmp_intervals.push_back(add_interval);
- if (!Settings::values.use_reactive_flushing.GetValue() ||
- memory_tracker.IsRegionPreflushable(new_base_address, new_base_address + size)) {
- uncommitted_ranges.add(add_interval);
- pending_ranges.add(add_interval);
- }
+ uncommitted_ranges.add(add_interval);
+ pending_ranges.add(add_interval);
};
ForEachInRangeSet(common_ranges, *cpu_src_address, amount, mirror);
// This subtraction in this order is important for overlapping copies.
@@ -1234,10 +1231,6 @@ void BufferCache
::MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 s
const IntervalType base_interval{cpu_addr, cpu_addr + size};
common_ranges.add(base_interval);
- if (Settings::values.use_reactive_flushing.GetValue() &&
- !memory_tracker.IsRegionPreflushable(cpu_addr, cpu_addr + size)) {
- return;
- }
uncommitted_ranges.add(base_interval);
pending_ranges.add(base_interval);
}
--
cgit v1.2.3