summaryrefslogtreecommitdiff
path: root/src/video_core/rasterizer_accelerated.cpp
diff options
context:
space:
mode:
authorGravatar liamwhite2023-12-16 11:47:21 -0500
committerGravatar GitHub2023-12-16 11:47:21 -0500
commit3bc7575c47f2460b249702c2844687b6c0106cc8 (patch)
tree540ca2ced701b86da44fbd674dedbdcf81b83117 /src/video_core/rasterizer_accelerated.cpp
parentMerge pull request #12358 from liamwhite/optimized-alloc (diff)
parentvideo_core: lock interval map update (diff)
downloadyuzu-3bc7575c47f2460b249702c2844687b6c0106cc8.tar.gz
yuzu-3bc7575c47f2460b249702c2844687b6c0106cc8.tar.xz
yuzu-3bc7575c47f2460b249702c2844687b6c0106cc8.zip
Merge pull request #12344 from liamwhite/its-free-real-estate
video_core: use interval map for page count tracking
Diffstat (limited to 'src/video_core/rasterizer_accelerated.cpp')
-rw-r--r--src/video_core/rasterizer_accelerated.cpp99
1 files changed, 52 insertions, 47 deletions
diff --git a/src/video_core/rasterizer_accelerated.cpp b/src/video_core/rasterizer_accelerated.cpp
index f200a650f..3c9477f6e 100644
--- a/src/video_core/rasterizer_accelerated.cpp
+++ b/src/video_core/rasterizer_accelerated.cpp
@@ -3,6 +3,7 @@
3 3
4#include <atomic> 4#include <atomic>
5 5
6#include "common/alignment.h"
6#include "common/assert.h" 7#include "common/assert.h"
7#include "common/common_types.h" 8#include "common/common_types.h"
8#include "common/div_ceil.h" 9#include "common/div_ceil.h"
@@ -11,61 +12,65 @@
11 12
12namespace VideoCore { 13namespace VideoCore {
13 14
15static constexpr u16 IdentityValue = 1;
16
14using namespace Core::Memory; 17using namespace Core::Memory;
15 18
16RasterizerAccelerated::RasterizerAccelerated(Memory& cpu_memory_) 19RasterizerAccelerated::RasterizerAccelerated(Memory& cpu_memory_) : map{}, cpu_memory{cpu_memory_} {
17 : cached_pages(std::make_unique<CachedPages>()), cpu_memory{cpu_memory_} {} 20 // We are tracking CPU memory, which cannot map more than 39 bits.
21 const VAddr start_address = 0;
22 const VAddr end_address = (1ULL << 39);
23 const IntervalType address_space_interval(start_address, end_address);
24 const auto value = std::make_pair(address_space_interval, IdentityValue);
25
26 map.add(value);
27}
18 28
19RasterizerAccelerated::~RasterizerAccelerated() = default; 29RasterizerAccelerated::~RasterizerAccelerated() = default;
20 30
21void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) { 31void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, bool cache) {
22 u64 uncache_begin = 0; 32 std::scoped_lock lk{map_lock};
23 u64 cache_begin = 0;
24 u64 uncache_bytes = 0;
25 u64 cache_bytes = 0;
26
27 std::atomic_thread_fence(std::memory_order_acquire);
28 const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE);
29 for (u64 page = addr >> YUZU_PAGEBITS; page != page_end; ++page) {
30 std::atomic_uint16_t& count = cached_pages->at(page >> 2).Count(page);
31
32 if (delta > 0) {
33 ASSERT_MSG(count.load(std::memory_order::relaxed) < UINT16_MAX, "Count may overflow!");
34 } else if (delta < 0) {
35 ASSERT_MSG(count.load(std::memory_order::relaxed) > 0, "Count may underflow!");
36 } else {
37 ASSERT_MSG(false, "Delta must be non-zero!");
38 }
39 33
40 // Adds or subtracts 1, as count is a unsigned 8-bit value 34 // Align sizes.
41 count.fetch_add(static_cast<u16>(delta), std::memory_order_release); 35 addr = Common::AlignDown(addr, YUZU_PAGESIZE);
42 36 size = Common::AlignUp(size, YUZU_PAGESIZE);
43 // Assume delta is either -1 or 1 37
44 if (count.load(std::memory_order::relaxed) == 0) { 38 // Declare the overall interval we are going to operate on.
45 if (uncache_bytes == 0) { 39 const VAddr start_address = addr;
46 uncache_begin = page; 40 const VAddr end_address = addr + size;
47 } 41 const IntervalType modification_range(start_address, end_address);
48 uncache_bytes += YUZU_PAGESIZE; 42
49 } else if (uncache_bytes > 0) { 43 // Find the boundaries of where to iterate.
50 cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes, 44 const auto lower = map.lower_bound(modification_range);
51 false); 45 const auto upper = map.upper_bound(modification_range);
52 uncache_bytes = 0; 46
53 } 47 // Iterate over the contained intervals.
54 if (count.load(std::memory_order::relaxed) == 1 && delta > 0) { 48 for (auto it = lower; it != upper; it++) {
55 if (cache_bytes == 0) { 49 // Intersect interval range with modification range.
56 cache_begin = page; 50 const auto current_range = modification_range & it->first;
57 } 51
58 cache_bytes += YUZU_PAGESIZE; 52 // Calculate the address and size to operate over.
59 } else if (cache_bytes > 0) { 53 const auto current_addr = current_range.lower();
60 cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true); 54 const auto current_size = current_range.upper() - current_addr;
61 cache_bytes = 0; 55
56 // Get the current value of the range.
57 const auto value = it->second;
58
59 if (cache && value == IdentityValue) {
60 // If we are going to cache, and the value is not yet referenced, then cache this range.
61 cpu_memory.RasterizerMarkRegionCached(current_addr, current_size, true);
62 } else if (!cache && value == IdentityValue + 1) {
63 // If we are going to uncache, and this is the last reference, then uncache this range.
64 cpu_memory.RasterizerMarkRegionCached(current_addr, current_size, false);
62 } 65 }
63 } 66 }
64 if (uncache_bytes > 0) { 67
65 cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes, false); 68 // Update the set.
66 } 69 const auto value = std::make_pair(modification_range, IdentityValue);
67 if (cache_bytes > 0) { 70 if (cache) {
68 cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true); 71 map.add(value);
72 } else {
73 map.subtract(value);
69 } 74 }
70} 75}
71 76