summaryrefslogtreecommitdiff
path: root/src/video_core/buffer_cache
diff options
context:
space:
mode:
authorGravatar liamwhite2023-05-07 19:05:56 -0400
committerGravatar GitHub2023-05-07 19:05:56 -0400
commit2688fb1aa2d80fc4edac87ff93365f3570cd3af8 (patch)
treeb874061d30aa6a03fd3c92116df92ed6d3b91a19 /src/video_core/buffer_cache
parentMerge pull request #10097 from german77/nfp_full (diff)
parentTexture cache: Only force flush the dma downloads (diff)
downloadyuzu-2688fb1aa2d80fc4edac87ff93365f3570cd3af8.tar.gz
yuzu-2688fb1aa2d80fc4edac87ff93365f3570cd3af8.tar.xz
yuzu-2688fb1aa2d80fc4edac87ff93365f3570cd3af8.zip
Merge pull request #10155 from FernandoS27/reactive-flushing-new
Y.F.C. bring back Reactive Flushing
Diffstat (limited to 'src/video_core/buffer_cache')
-rw-r--r--src/video_core/buffer_cache/buffer_base.h9
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h141
-rw-r--r--src/video_core/buffer_cache/buffer_cache_base.h7
-rw-r--r--src/video_core/buffer_cache/memory_tracker_base.h26
-rw-r--r--src/video_core/buffer_cache/word_manager.h27
5 files changed, 138 insertions, 72 deletions
diff --git a/src/video_core/buffer_cache/buffer_base.h b/src/video_core/buffer_cache/buffer_base.h
index 9cbd95c4b..0bb3bf8ae 100644
--- a/src/video_core/buffer_cache/buffer_base.h
+++ b/src/video_core/buffer_cache/buffer_base.h
@@ -18,6 +18,7 @@ namespace VideoCommon {
18enum class BufferFlagBits { 18enum class BufferFlagBits {
19 Picked = 1 << 0, 19 Picked = 1 << 0,
20 CachedWrites = 1 << 1, 20 CachedWrites = 1 << 1,
21 PreemtiveDownload = 1 << 2,
21}; 22};
22DECLARE_ENUM_FLAG_OPERATORS(BufferFlagBits) 23DECLARE_ENUM_FLAG_OPERATORS(BufferFlagBits)
23 24
@@ -54,6 +55,10 @@ public:
54 flags |= BufferFlagBits::Picked; 55 flags |= BufferFlagBits::Picked;
55 } 56 }
56 57
58 void MarkPreemtiveDownload() noexcept {
59 flags |= BufferFlagBits::PreemtiveDownload;
60 }
61
57 /// Unmark buffer as picked 62 /// Unmark buffer as picked
58 void Unpick() noexcept { 63 void Unpick() noexcept {
59 flags &= ~BufferFlagBits::Picked; 64 flags &= ~BufferFlagBits::Picked;
@@ -84,6 +89,10 @@ public:
84 return True(flags & BufferFlagBits::CachedWrites); 89 return True(flags & BufferFlagBits::CachedWrites);
85 } 90 }
86 91
92 bool IsPreemtiveDownload() const noexcept {
93 return True(flags & BufferFlagBits::PreemtiveDownload);
94 }
95
87 /// Returns the base CPU address of the buffer 96 /// Returns the base CPU address of the buffer
88 [[nodiscard]] VAddr CpuAddr() const noexcept { 97 [[nodiscard]] VAddr CpuAddr() const noexcept {
89 return cpu_addr; 98 return cpu_addr;
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index e534e1e9c..6624919a4 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -23,8 +23,6 @@ BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_,
23 common_ranges.clear(); 23 common_ranges.clear();
24 inline_buffer_id = NULL_BUFFER_ID; 24 inline_buffer_id = NULL_BUFFER_ID;
25 25
26 active_async_buffers = !Settings::IsGPULevelHigh();
27
28 if (!runtime.CanReportMemoryUsage()) { 26 if (!runtime.CanReportMemoryUsage()) {
29 minimum_memory = DEFAULT_EXPECTED_MEMORY; 27 minimum_memory = DEFAULT_EXPECTED_MEMORY;
30 critical_memory = DEFAULT_CRITICAL_MEMORY; 28 critical_memory = DEFAULT_CRITICAL_MEMORY;
@@ -75,8 +73,6 @@ void BufferCache<P>::TickFrame() {
75 uniform_cache_hits[0] = 0; 73 uniform_cache_hits[0] = 0;
76 uniform_cache_shots[0] = 0; 74 uniform_cache_shots[0] = 0;
77 75
78 active_async_buffers = !Settings::IsGPULevelHigh();
79
80 const bool skip_preferred = hits * 256 < shots * 251; 76 const bool skip_preferred = hits * 256 < shots * 251;
81 uniform_buffer_skip_cache_size = skip_preferred ? DEFAULT_SKIP_CACHE_SIZE : 0; 77 uniform_buffer_skip_cache_size = skip_preferred ? DEFAULT_SKIP_CACHE_SIZE : 0;
82 78
@@ -111,9 +107,25 @@ void BufferCache<P>::WriteMemory(VAddr cpu_addr, u64 size) {
111template <class P> 107template <class P>
112void BufferCache<P>::CachedWriteMemory(VAddr cpu_addr, u64 size) { 108void BufferCache<P>::CachedWriteMemory(VAddr cpu_addr, u64 size) {
113 memory_tracker.CachedCpuWrite(cpu_addr, size); 109 memory_tracker.CachedCpuWrite(cpu_addr, size);
114 const IntervalType add_interval{Common::AlignDown(cpu_addr, YUZU_PAGESIZE), 110}
115 Common::AlignUp(cpu_addr + size, YUZU_PAGESIZE)}; 111
116 cached_ranges.add(add_interval); 112template <class P>
113std::optional<VideoCore::RasterizerDownloadArea> BufferCache<P>::GetFlushArea(VAddr cpu_addr,
114 u64 size) {
115 std::optional<VideoCore::RasterizerDownloadArea> area{};
116 area.emplace();
117 VAddr cpu_addr_start_aligned = Common::AlignDown(cpu_addr, Core::Memory::YUZU_PAGESIZE);
118 VAddr cpu_addr_end_aligned = Common::AlignUp(cpu_addr + size, Core::Memory::YUZU_PAGESIZE);
119 area->start_address = cpu_addr_start_aligned;
120 area->end_address = cpu_addr_end_aligned;
121 if (memory_tracker.IsRegionPreflushable(cpu_addr, size)) {
122 area->preemtive = true;
123 return area;
124 };
125 memory_tracker.MarkRegionAsPreflushable(cpu_addr_start_aligned,
126 cpu_addr_end_aligned - cpu_addr_start_aligned);
127 area->preemtive = !IsRegionGpuModified(cpu_addr, size);
128 return area;
117} 129}
118 130
119template <class P> 131template <class P>
@@ -205,7 +217,7 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
205 if (has_new_downloads) { 217 if (has_new_downloads) {
206 memory_tracker.MarkRegionAsGpuModified(*cpu_dest_address, amount); 218 memory_tracker.MarkRegionAsGpuModified(*cpu_dest_address, amount);
207 } 219 }
208 std::vector<u8> tmp_buffer(amount); 220 tmp_buffer.resize(amount);
209 cpu_memory.ReadBlockUnsafe(*cpu_src_address, tmp_buffer.data(), amount); 221 cpu_memory.ReadBlockUnsafe(*cpu_src_address, tmp_buffer.data(), amount);
210 cpu_memory.WriteBlockUnsafe(*cpu_dest_address, tmp_buffer.data(), amount); 222 cpu_memory.WriteBlockUnsafe(*cpu_dest_address, tmp_buffer.data(), amount);
211 return true; 223 return true;
@@ -441,9 +453,7 @@ void BufferCache<P>::BindComputeTextureBuffer(size_t tbo_index, GPUVAddr gpu_add
441 453
442template <class P> 454template <class P>
443void BufferCache<P>::FlushCachedWrites() { 455void BufferCache<P>::FlushCachedWrites() {
444 cached_write_buffer_ids.clear();
445 memory_tracker.FlushCachedWrites(); 456 memory_tracker.FlushCachedWrites();
446 cached_ranges.clear();
447} 457}
448 458
449template <class P> 459template <class P>
@@ -474,9 +484,8 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
474 484
475 if (committed_ranges.empty()) { 485 if (committed_ranges.empty()) {
476 if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) { 486 if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
477 if (active_async_buffers) { 487
478 async_buffers.emplace_back(std::optional<Async_Buffer>{}); 488 async_buffers.emplace_back(std::optional<Async_Buffer>{});
479 }
480 } 489 }
481 return; 490 return;
482 } 491 }
@@ -537,64 +546,65 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
537 committed_ranges.clear(); 546 committed_ranges.clear();
538 if (downloads.empty()) { 547 if (downloads.empty()) {
539 if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) { 548 if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
540 if (active_async_buffers) { 549
541 async_buffers.emplace_back(std::optional<Async_Buffer>{}); 550 async_buffers.emplace_back(std::optional<Async_Buffer>{});
542 }
543 } 551 }
544 return; 552 return;
545 } 553 }
546 if (active_async_buffers) { 554 if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
547 if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) { 555 auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes, true);
548 auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes, true); 556 boost::container::small_vector<BufferCopy, 4> normalized_copies;
549 boost::container::small_vector<BufferCopy, 4> normalized_copies; 557 IntervalSet new_async_range{};
550 IntervalSet new_async_range{}; 558 runtime.PreCopyBarrier();
551 runtime.PreCopyBarrier(); 559 for (auto& [copy, buffer_id] : downloads) {
552 for (auto& [copy, buffer_id] : downloads) { 560 copy.dst_offset += download_staging.offset;
553 copy.dst_offset += download_staging.offset; 561 const std::array copies{copy};
554 const std::array copies{copy}; 562 BufferCopy second_copy{copy};
555 BufferCopy second_copy{copy}; 563 Buffer& buffer = slot_buffers[buffer_id];
556 Buffer& buffer = slot_buffers[buffer_id]; 564 second_copy.src_offset = static_cast<size_t>(buffer.CpuAddr()) + copy.src_offset;
557 second_copy.src_offset = static_cast<size_t>(buffer.CpuAddr()) + copy.src_offset; 565 VAddr orig_cpu_addr = static_cast<VAddr>(second_copy.src_offset);
558 VAddr orig_cpu_addr = static_cast<VAddr>(second_copy.src_offset); 566 const IntervalType base_interval{orig_cpu_addr, orig_cpu_addr + copy.size};
559 const IntervalType base_interval{orig_cpu_addr, orig_cpu_addr + copy.size}; 567 async_downloads += std::make_pair(base_interval, 1);
560 async_downloads += std::make_pair(base_interval, 1); 568 runtime.CopyBuffer(download_staging.buffer, buffer, copies, false);
561 runtime.CopyBuffer(download_staging.buffer, buffer, copies, false); 569 normalized_copies.push_back(second_copy);
562 normalized_copies.push_back(second_copy);
563 }
564 runtime.PostCopyBarrier();
565 pending_downloads.emplace_back(std::move(normalized_copies));
566 async_buffers.emplace_back(download_staging);
567 } else {
568 committed_ranges.clear();
569 uncommitted_ranges.clear();
570 } 570 }
571 runtime.PostCopyBarrier();
572 pending_downloads.emplace_back(std::move(normalized_copies));
573 async_buffers.emplace_back(download_staging);
571 } else { 574 } else {
572 if constexpr (USE_MEMORY_MAPS) { 575 if (!Settings::IsGPULevelHigh()) {
573 auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes); 576 committed_ranges.clear();
574 runtime.PreCopyBarrier(); 577 uncommitted_ranges.clear();
575 for (auto& [copy, buffer_id] : downloads) {
576 // Have in mind the staging buffer offset for the copy
577 copy.dst_offset += download_staging.offset;
578 const std::array copies{copy};
579 runtime.CopyBuffer(download_staging.buffer, slot_buffers[buffer_id], copies, false);
580 }
581 runtime.PostCopyBarrier();
582 runtime.Finish();
583 for (const auto& [copy, buffer_id] : downloads) {
584 const Buffer& buffer = slot_buffers[buffer_id];
585 const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
586 // Undo the modified offset
587 const u64 dst_offset = copy.dst_offset - download_staging.offset;
588 const u8* read_mapped_memory = download_staging.mapped_span.data() + dst_offset;
589 cpu_memory.WriteBlockUnsafe(cpu_addr, read_mapped_memory, copy.size);
590 }
591 } else { 578 } else {
592 const std::span<u8> immediate_buffer = ImmediateBuffer(largest_copy); 579 if constexpr (USE_MEMORY_MAPS) {
593 for (const auto& [copy, buffer_id] : downloads) { 580 auto download_staging = runtime.DownloadStagingBuffer(total_size_bytes);
594 Buffer& buffer = slot_buffers[buffer_id]; 581 runtime.PreCopyBarrier();
595 buffer.ImmediateDownload(copy.src_offset, immediate_buffer.subspan(0, copy.size)); 582 for (auto& [copy, buffer_id] : downloads) {
596 const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset; 583 // Have in mind the staging buffer offset for the copy
597 cpu_memory.WriteBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size); 584 copy.dst_offset += download_staging.offset;
585 const std::array copies{copy};
586 runtime.CopyBuffer(download_staging.buffer, slot_buffers[buffer_id], copies,
587 false);
588 }
589 runtime.PostCopyBarrier();
590 runtime.Finish();
591 for (const auto& [copy, buffer_id] : downloads) {
592 const Buffer& buffer = slot_buffers[buffer_id];
593 const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
594 // Undo the modified offset
595 const u64 dst_offset = copy.dst_offset - download_staging.offset;
596 const u8* read_mapped_memory = download_staging.mapped_span.data() + dst_offset;
597 cpu_memory.WriteBlockUnsafe(cpu_addr, read_mapped_memory, copy.size);
598 }
599 } else {
600 const std::span<u8> immediate_buffer = ImmediateBuffer(largest_copy);
601 for (const auto& [copy, buffer_id] : downloads) {
602 Buffer& buffer = slot_buffers[buffer_id];
603 buffer.ImmediateDownload(copy.src_offset,
604 immediate_buffer.subspan(0, copy.size));
605 const VAddr cpu_addr = buffer.CpuAddr() + copy.src_offset;
606 cpu_memory.WriteBlockUnsafe(cpu_addr, immediate_buffer.data(), copy.size);
607 }
598 } 608 }
599 } 609 }
600 } 610 }
@@ -1629,7 +1639,6 @@ void BufferCache<P>::DeleteBuffer(BufferId buffer_id, bool do_not_mark) {
1629 replace(transform_feedback_buffers); 1639 replace(transform_feedback_buffers);
1630 replace(compute_uniform_buffers); 1640 replace(compute_uniform_buffers);
1631 replace(compute_storage_buffers); 1641 replace(compute_storage_buffers);
1632 std::erase(cached_write_buffer_ids, buffer_id);
1633 1642
1634 // Mark the whole buffer as CPU written to stop tracking CPU writes 1643 // Mark the whole buffer as CPU written to stop tracking CPU writes
1635 if (!do_not_mark) { 1644 if (!do_not_mark) {
diff --git a/src/video_core/buffer_cache/buffer_cache_base.h b/src/video_core/buffer_cache/buffer_cache_base.h
index 656baa550..0445ec47f 100644
--- a/src/video_core/buffer_cache/buffer_cache_base.h
+++ b/src/video_core/buffer_cache/buffer_cache_base.h
@@ -188,6 +188,8 @@ public:
188 188
189 void DownloadMemory(VAddr cpu_addr, u64 size); 189 void DownloadMemory(VAddr cpu_addr, u64 size);
190 190
191 std::optional<VideoCore::RasterizerDownloadArea> GetFlushArea(VAddr cpu_addr, u64 size);
192
191 bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<const u8> inlined_buffer); 193 bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<const u8> inlined_buffer);
192 194
193 void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size); 195 void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size);
@@ -541,8 +543,6 @@ private:
541 std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES>, Empty> 543 std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES>, Empty>
542 uniform_buffer_binding_sizes{}; 544 uniform_buffer_binding_sizes{};
543 545
544 std::vector<BufferId> cached_write_buffer_ids;
545
546 MemoryTracker memory_tracker; 546 MemoryTracker memory_tracker;
547 IntervalSet uncommitted_ranges; 547 IntervalSet uncommitted_ranges;
548 IntervalSet common_ranges; 548 IntervalSet common_ranges;
@@ -572,9 +572,8 @@ private:
572 u64 critical_memory = 0; 572 u64 critical_memory = 0;
573 BufferId inline_buffer_id; 573 BufferId inline_buffer_id;
574 574
575 bool active_async_buffers = false;
576
577 std::array<BufferId, ((1ULL << 39) >> CACHING_PAGEBITS)> page_table; 575 std::array<BufferId, ((1ULL << 39) >> CACHING_PAGEBITS)> page_table;
576 std::vector<u8> tmp_buffer;
578}; 577};
579 578
580} // namespace VideoCommon 579} // namespace VideoCommon
diff --git a/src/video_core/buffer_cache/memory_tracker_base.h b/src/video_core/buffer_cache/memory_tracker_base.h
index dc4ebfcaa..6036b21c9 100644
--- a/src/video_core/buffer_cache/memory_tracker_base.h
+++ b/src/video_core/buffer_cache/memory_tracker_base.h
@@ -66,6 +66,14 @@ public:
66 }); 66 });
67 } 67 }
68 68
69 /// Returns true if a region has been marked as Preflushable
70 [[nodiscard]] bool IsRegionPreflushable(VAddr query_cpu_addr, u64 query_size) noexcept {
71 return IteratePages<false>(
72 query_cpu_addr, query_size, [](Manager* manager, u64 offset, size_t size) {
73 return manager->template IsRegionModified<Type::Preflushable>(offset, size);
74 });
75 }
76
69 /// Mark region as CPU modified, notifying the rasterizer about this change 77 /// Mark region as CPU modified, notifying the rasterizer about this change
70 void MarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 query_size) { 78 void MarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 query_size) {
71 IteratePages<true>(dirty_cpu_addr, query_size, 79 IteratePages<true>(dirty_cpu_addr, query_size,
@@ -93,6 +101,15 @@ public:
93 }); 101 });
94 } 102 }
95 103
104 /// Mark region as modified from the host GPU
105 void MarkRegionAsPreflushable(VAddr dirty_cpu_addr, u64 query_size) noexcept {
106 IteratePages<true>(dirty_cpu_addr, query_size,
107 [](Manager* manager, u64 offset, size_t size) {
108 manager->template ChangeRegionState<Type::Preflushable, true>(
109 manager->GetCpuAddr() + offset, size);
110 });
111 }
112
96 /// Unmark region as modified from the host GPU 113 /// Unmark region as modified from the host GPU
97 void UnmarkRegionAsGpuModified(VAddr dirty_cpu_addr, u64 query_size) noexcept { 114 void UnmarkRegionAsGpuModified(VAddr dirty_cpu_addr, u64 query_size) noexcept {
98 IteratePages<true>(dirty_cpu_addr, query_size, 115 IteratePages<true>(dirty_cpu_addr, query_size,
@@ -102,6 +119,15 @@ public:
102 }); 119 });
103 } 120 }
104 121
122 /// Unmark region as modified from the host GPU
123 void UnmarkRegionAsPreflushable(VAddr dirty_cpu_addr, u64 query_size) noexcept {
124 IteratePages<true>(dirty_cpu_addr, query_size,
125 [](Manager* manager, u64 offset, size_t size) {
126 manager->template ChangeRegionState<Type::Preflushable, false>(
127 manager->GetCpuAddr() + offset, size);
128 });
129 }
130
105 /// Mark region as modified from the CPU 131 /// Mark region as modified from the CPU
106 /// but don't mark it as modified until FlusHCachedWrites is called. 132 /// but don't mark it as modified until FlusHCachedWrites is called.
107 void CachedCpuWrite(VAddr dirty_cpu_addr, u64 query_size) { 133 void CachedCpuWrite(VAddr dirty_cpu_addr, u64 query_size) {
diff --git a/src/video_core/buffer_cache/word_manager.h b/src/video_core/buffer_cache/word_manager.h
index a42455045..a336bde41 100644
--- a/src/video_core/buffer_cache/word_manager.h
+++ b/src/video_core/buffer_cache/word_manager.h
@@ -26,6 +26,7 @@ enum class Type {
26 GPU, 26 GPU,
27 CachedCPU, 27 CachedCPU,
28 Untracked, 28 Untracked,
29 Preflushable,
29}; 30};
30 31
31/// Vector tracking modified pages tightly packed with small vector optimization 32/// Vector tracking modified pages tightly packed with small vector optimization
@@ -55,17 +56,20 @@ struct Words {
55 gpu.stack.fill(0); 56 gpu.stack.fill(0);
56 cached_cpu.stack.fill(0); 57 cached_cpu.stack.fill(0);
57 untracked.stack.fill(~u64{0}); 58 untracked.stack.fill(~u64{0});
59 preflushable.stack.fill(0);
58 } else { 60 } else {
59 // Share allocation between CPU and GPU pages and set their default values 61 // Share allocation between CPU and GPU pages and set their default values
60 u64* const alloc = new u64[num_words * 4]; 62 u64* const alloc = new u64[num_words * 5];
61 cpu.heap = alloc; 63 cpu.heap = alloc;
62 gpu.heap = alloc + num_words; 64 gpu.heap = alloc + num_words;
63 cached_cpu.heap = alloc + num_words * 2; 65 cached_cpu.heap = alloc + num_words * 2;
64 untracked.heap = alloc + num_words * 3; 66 untracked.heap = alloc + num_words * 3;
67 preflushable.heap = alloc + num_words * 4;
65 std::fill_n(cpu.heap, num_words, ~u64{0}); 68 std::fill_n(cpu.heap, num_words, ~u64{0});
66 std::fill_n(gpu.heap, num_words, 0); 69 std::fill_n(gpu.heap, num_words, 0);
67 std::fill_n(cached_cpu.heap, num_words, 0); 70 std::fill_n(cached_cpu.heap, num_words, 0);
68 std::fill_n(untracked.heap, num_words, ~u64{0}); 71 std::fill_n(untracked.heap, num_words, ~u64{0});
72 std::fill_n(preflushable.heap, num_words, 0);
69 } 73 }
70 // Clean up tailing bits 74 // Clean up tailing bits
71 const u64 last_word_size = size_bytes % BYTES_PER_WORD; 75 const u64 last_word_size = size_bytes % BYTES_PER_WORD;
@@ -88,13 +92,14 @@ struct Words {
88 gpu = rhs.gpu; 92 gpu = rhs.gpu;
89 cached_cpu = rhs.cached_cpu; 93 cached_cpu = rhs.cached_cpu;
90 untracked = rhs.untracked; 94 untracked = rhs.untracked;
95 preflushable = rhs.preflushable;
91 rhs.cpu.heap = nullptr; 96 rhs.cpu.heap = nullptr;
92 return *this; 97 return *this;
93 } 98 }
94 99
95 Words(Words&& rhs) noexcept 100 Words(Words&& rhs) noexcept
96 : size_bytes{rhs.size_bytes}, num_words{rhs.num_words}, cpu{rhs.cpu}, gpu{rhs.gpu}, 101 : size_bytes{rhs.size_bytes}, num_words{rhs.num_words}, cpu{rhs.cpu}, gpu{rhs.gpu},
97 cached_cpu{rhs.cached_cpu}, untracked{rhs.untracked} { 102 cached_cpu{rhs.cached_cpu}, untracked{rhs.untracked}, preflushable{rhs.preflushable} {
98 rhs.cpu.heap = nullptr; 103 rhs.cpu.heap = nullptr;
99 } 104 }
100 105
@@ -129,6 +134,8 @@ struct Words {
129 return std::span<u64>(cached_cpu.Pointer(IsShort()), num_words); 134 return std::span<u64>(cached_cpu.Pointer(IsShort()), num_words);
130 } else if constexpr (type == Type::Untracked) { 135 } else if constexpr (type == Type::Untracked) {
131 return std::span<u64>(untracked.Pointer(IsShort()), num_words); 136 return std::span<u64>(untracked.Pointer(IsShort()), num_words);
137 } else if constexpr (type == Type::Preflushable) {
138 return std::span<u64>(preflushable.Pointer(IsShort()), num_words);
132 } 139 }
133 } 140 }
134 141
@@ -142,6 +149,8 @@ struct Words {
142 return std::span<const u64>(cached_cpu.Pointer(IsShort()), num_words); 149 return std::span<const u64>(cached_cpu.Pointer(IsShort()), num_words);
143 } else if constexpr (type == Type::Untracked) { 150 } else if constexpr (type == Type::Untracked) {
144 return std::span<const u64>(untracked.Pointer(IsShort()), num_words); 151 return std::span<const u64>(untracked.Pointer(IsShort()), num_words);
152 } else if constexpr (type == Type::Preflushable) {
153 return std::span<const u64>(preflushable.Pointer(IsShort()), num_words);
145 } 154 }
146 } 155 }
147 156
@@ -151,6 +160,7 @@ struct Words {
151 WordsArray<stack_words> gpu; 160 WordsArray<stack_words> gpu;
152 WordsArray<stack_words> cached_cpu; 161 WordsArray<stack_words> cached_cpu;
153 WordsArray<stack_words> untracked; 162 WordsArray<stack_words> untracked;
163 WordsArray<stack_words> preflushable;
154}; 164};
155 165
156template <class RasterizerInterface, size_t stack_words = 1> 166template <class RasterizerInterface, size_t stack_words = 1>
@@ -292,6 +302,9 @@ public:
292 (pending_pointer - pending_offset) * BYTES_PER_PAGE); 302 (pending_pointer - pending_offset) * BYTES_PER_PAGE);
293 }; 303 };
294 IterateWords(offset, size, [&](size_t index, u64 mask) { 304 IterateWords(offset, size, [&](size_t index, u64 mask) {
305 if constexpr (type == Type::GPU) {
306 mask &= ~untracked_words[index];
307 }
295 const u64 word = state_words[index] & mask; 308 const u64 word = state_words[index] & mask;
296 if constexpr (clear) { 309 if constexpr (clear) {
297 if constexpr (type == Type::CPU || type == Type::CachedCPU) { 310 if constexpr (type == Type::CPU || type == Type::CachedCPU) {
@@ -340,8 +353,13 @@ public:
340 static_assert(type != Type::Untracked); 353 static_assert(type != Type::Untracked);
341 354
342 const std::span<const u64> state_words = words.template Span<type>(); 355 const std::span<const u64> state_words = words.template Span<type>();
356 [[maybe_unused]] const std::span<const u64> untracked_words =
357 words.template Span<Type::Untracked>();
343 bool result = false; 358 bool result = false;
344 IterateWords(offset, size, [&](size_t index, u64 mask) { 359 IterateWords(offset, size, [&](size_t index, u64 mask) {
360 if constexpr (type == Type::GPU) {
361 mask &= ~untracked_words[index];
362 }
345 const u64 word = state_words[index] & mask; 363 const u64 word = state_words[index] & mask;
346 if (word != 0) { 364 if (word != 0) {
347 result = true; 365 result = true;
@@ -362,9 +380,14 @@ public:
362 [[nodiscard]] std::pair<u64, u64> ModifiedRegion(u64 offset, u64 size) const noexcept { 380 [[nodiscard]] std::pair<u64, u64> ModifiedRegion(u64 offset, u64 size) const noexcept {
363 static_assert(type != Type::Untracked); 381 static_assert(type != Type::Untracked);
364 const std::span<const u64> state_words = words.template Span<type>(); 382 const std::span<const u64> state_words = words.template Span<type>();
383 [[maybe_unused]] const std::span<const u64> untracked_words =
384 words.template Span<Type::Untracked>();
365 u64 begin = std::numeric_limits<u64>::max(); 385 u64 begin = std::numeric_limits<u64>::max();
366 u64 end = 0; 386 u64 end = 0;
367 IterateWords(offset, size, [&](size_t index, u64 mask) { 387 IterateWords(offset, size, [&](size_t index, u64 mask) {
388 if constexpr (type == Type::GPU) {
389 mask &= ~untracked_words[index];
390 }
368 const u64 word = state_words[index] & mask; 391 const u64 word = state_words[index] & mask;
369 if (word == 0) { 392 if (word == 0) {
370 return; 393 return;