summaryrefslogtreecommitdiff
path: root/src/video_core/buffer_cache
diff options
context:
space:
mode:
authorGravatar ReinUsesLisp2020-05-16 17:08:33 -0300
committerGravatar ReinUsesLisp2020-05-21 16:02:20 -0300
commit599274e3f0d97c36b31016ba63dcc300d0cf8f6a (patch)
tree47f761bdbca2b95b8c48e926c50b4ceb97f44048 /src/video_core/buffer_cache
parentMerge pull request #3926 from ogniK5377/keyboard-states (diff)
downloadyuzu-599274e3f0d97c36b31016ba63dcc300d0cf8f6a.tar.gz
yuzu-599274e3f0d97c36b31016ba63dcc300d0cf8f6a.tar.xz
yuzu-599274e3f0d97c36b31016ba63dcc300d0cf8f6a.zip
buffer_cache: Minor style changes
Minor style changes. Mostly done so I avoid editing it while doing other changes.
Diffstat (limited to 'src/video_core/buffer_cache')
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h100
-rw-r--r--src/video_core/buffer_cache/map_interval.h94
2 files changed, 65 insertions, 129 deletions
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 56e570994..81134eb1f 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -40,14 +40,12 @@ public:
40 bool is_written = false, bool use_fast_cbuf = false) { 40 bool is_written = false, bool use_fast_cbuf = false) {
41 std::lock_guard lock{mutex}; 41 std::lock_guard lock{mutex};
42 42
43 const std::optional<VAddr> cpu_addr_opt = 43 const auto& memory_manager = system.GPU().MemoryManager();
44 system.GPU().MemoryManager().GpuToCpuAddress(gpu_addr); 44 const std::optional<VAddr> cpu_addr_opt = memory_manager.GpuToCpuAddress(gpu_addr);
45
46 if (!cpu_addr_opt) { 45 if (!cpu_addr_opt) {
47 return {GetEmptyBuffer(size), 0}; 46 return {GetEmptyBuffer(size), 0};
48 } 47 }
49 48 const VAddr cpu_addr = *cpu_addr_opt;
50 VAddr cpu_addr = *cpu_addr_opt;
51 49
52 // Cache management is a big overhead, so only cache entries with a given size. 50 // Cache management is a big overhead, so only cache entries with a given size.
53 // TODO: Figure out which size is the best for given games. 51 // TODO: Figure out which size is the best for given games.
@@ -84,9 +82,9 @@ public:
84 if (Settings::IsGPULevelHigh() && Settings::values.use_asynchronous_gpu_emulation) { 82 if (Settings::IsGPULevelHigh() && Settings::values.use_asynchronous_gpu_emulation) {
85 MarkForAsyncFlush(map); 83 MarkForAsyncFlush(map);
86 } 84 }
87 if (!map->IsWritten()) { 85 if (!map->is_written) {
88 map->MarkAsWritten(true); 86 map->is_written = true;
89 MarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1); 87 MarkRegionAsWritten(map->start, map->end - 1);
90 } 88 }
91 } 89 }
92 90
@@ -133,11 +131,11 @@ public:
133 std::lock_guard lock{mutex}; 131 std::lock_guard lock{mutex};
134 132
135 std::vector<MapInterval> objects = GetMapsInRange(addr, size); 133 std::vector<MapInterval> objects = GetMapsInRange(addr, size);
136 std::sort(objects.begin(), objects.end(), [](const MapInterval& a, const MapInterval& b) { 134 std::sort(
137 return a->GetModificationTick() < b->GetModificationTick(); 135 objects.begin(), objects.end(),
138 }); 136 [](const MapInterval& lhs, const MapInterval& rhs) { return lhs->ticks < rhs->ticks; });
139 for (auto& object : objects) { 137 for (auto& object : objects) {
140 if (object->IsModified() && object->IsRegistered()) { 138 if (object->is_modified && object->is_registered) {
141 mutex.unlock(); 139 mutex.unlock();
142 FlushMap(object); 140 FlushMap(object);
143 mutex.lock(); 141 mutex.lock();
@@ -150,7 +148,7 @@ public:
150 148
151 const std::vector<MapInterval> objects = GetMapsInRange(addr, size); 149 const std::vector<MapInterval> objects = GetMapsInRange(addr, size);
152 return std::any_of(objects.cbegin(), objects.cend(), [](const MapInterval& map) { 150 return std::any_of(objects.cbegin(), objects.cend(), [](const MapInterval& map) {
153 return map->IsModified() && map->IsRegistered(); 151 return map->is_modified && map->is_registered;
154 }); 152 });
155 } 153 }
156 154
@@ -160,7 +158,7 @@ public:
160 158
161 std::vector<MapInterval> objects = GetMapsInRange(addr, size); 159 std::vector<MapInterval> objects = GetMapsInRange(addr, size);
162 for (auto& object : objects) { 160 for (auto& object : objects) {
163 if (object->IsRegistered()) { 161 if (object->is_registered) {
164 Unregister(object); 162 Unregister(object);
165 } 163 }
166 } 164 }
@@ -170,9 +168,9 @@ public:
170 std::lock_guard lock{mutex}; 168 std::lock_guard lock{mutex};
171 169
172 for (const auto& object : GetMapsInRange(addr, size)) { 170 for (const auto& object : GetMapsInRange(addr, size)) {
173 if (object->IsMemoryMarked() && object->IsRegistered()) { 171 if (object->is_memory_marked && object->is_registered) {
174 UnmarkMemory(object); 172 UnmarkMemory(object);
175 object->SetSyncPending(true); 173 object->is_sync_pending = true;
176 marked_for_unregister.emplace_back(object); 174 marked_for_unregister.emplace_back(object);
177 } 175 }
178 } 176 }
@@ -182,8 +180,8 @@ public:
182 std::lock_guard lock{mutex}; 180 std::lock_guard lock{mutex};
183 181
184 for (const auto& object : marked_for_unregister) { 182 for (const auto& object : marked_for_unregister) {
185 if (object->IsRegistered()) { 183 if (object->is_registered) {
186 object->SetSyncPending(false); 184 object->is_sync_pending = false;
187 Unregister(object); 185 Unregister(object);
188 } 186 }
189 } 187 }
@@ -194,7 +192,7 @@ public:
194 if (uncommitted_flushes) { 192 if (uncommitted_flushes) {
195 auto commit_list = std::make_shared<std::list<MapInterval>>(); 193 auto commit_list = std::make_shared<std::list<MapInterval>>();
196 for (auto& map : *uncommitted_flushes) { 194 for (auto& map : *uncommitted_flushes) {
197 if (map->IsRegistered() && map->IsModified()) { 195 if (map->is_registered && map->is_modified) {
198 // TODO(Blinkhawk): Implement backend asynchronous flushing 196 // TODO(Blinkhawk): Implement backend asynchronous flushing
199 // AsyncFlushMap(map) 197 // AsyncFlushMap(map)
200 commit_list->push_back(map); 198 commit_list->push_back(map);
@@ -229,7 +227,7 @@ public:
229 return; 227 return;
230 } 228 }
231 for (MapInterval& map : *flush_list) { 229 for (MapInterval& map : *flush_list) {
232 if (map->IsRegistered()) { 230 if (map->is_registered) {
233 // TODO(Blinkhawk): Replace this for reading the asynchronous flush 231 // TODO(Blinkhawk): Replace this for reading the asynchronous flush
234 FlushMap(map); 232 FlushMap(map);
235 } 233 }
@@ -266,45 +264,45 @@ protected:
266 264
267 /// Register an object into the cache 265 /// Register an object into the cache
268 void Register(const MapInterval& new_map, bool inherit_written = false) { 266 void Register(const MapInterval& new_map, bool inherit_written = false) {
269 const VAddr cpu_addr = new_map->GetStart(); 267 const VAddr cpu_addr = new_map->start;
270 if (!cpu_addr) { 268 if (!cpu_addr) {
271 LOG_CRITICAL(HW_GPU, "Failed to register buffer with unmapped gpu_address 0x{:016x}", 269 LOG_CRITICAL(HW_GPU, "Failed to register buffer with unmapped gpu_address 0x{:016x}",
272 new_map->GetGpuAddress()); 270 new_map->gpu_addr);
273 return; 271 return;
274 } 272 }
275 const std::size_t size = new_map->GetEnd() - new_map->GetStart(); 273 const std::size_t size = new_map->end - new_map->start;
276 new_map->MarkAsRegistered(true); 274 new_map->is_registered = true;
277 const IntervalType interval{new_map->GetStart(), new_map->GetEnd()}; 275 const IntervalType interval{new_map->start, new_map->end};
278 mapped_addresses.insert({interval, new_map}); 276 mapped_addresses.insert({interval, new_map});
279 rasterizer.UpdatePagesCachedCount(cpu_addr, size, 1); 277 rasterizer.UpdatePagesCachedCount(cpu_addr, size, 1);
280 new_map->SetMemoryMarked(true); 278 new_map->is_memory_marked = true;
281 if (inherit_written) { 279 if (inherit_written) {
282 MarkRegionAsWritten(new_map->GetStart(), new_map->GetEnd() - 1); 280 MarkRegionAsWritten(new_map->start, new_map->end - 1);
283 new_map->MarkAsWritten(true); 281 new_map->is_written = true;
284 } 282 }
285 } 283 }
286 284
287 void UnmarkMemory(const MapInterval& map) { 285 void UnmarkMemory(const MapInterval& map) {
288 if (!map->IsMemoryMarked()) { 286 if (!map->is_memory_marked) {
289 return; 287 return;
290 } 288 }
291 const std::size_t size = map->GetEnd() - map->GetStart(); 289 const std::size_t size = map->end - map->start;
292 rasterizer.UpdatePagesCachedCount(map->GetStart(), size, -1); 290 rasterizer.UpdatePagesCachedCount(map->start, size, -1);
293 map->SetMemoryMarked(false); 291 map->is_memory_marked = false;
294 } 292 }
295 293
296 /// Unregisters an object from the cache 294 /// Unregisters an object from the cache
297 void Unregister(const MapInterval& map) { 295 void Unregister(const MapInterval& map) {
298 UnmarkMemory(map); 296 UnmarkMemory(map);
299 map->MarkAsRegistered(false); 297 map->is_registered = false;
300 if (map->IsSyncPending()) { 298 if (map->is_sync_pending) {
299 map->is_sync_pending = false;
301 marked_for_unregister.remove(map); 300 marked_for_unregister.remove(map);
302 map->SetSyncPending(false);
303 } 301 }
304 if (map->IsWritten()) { 302 if (map->is_written) {
305 UnmarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1); 303 UnmarkRegionAsWritten(map->start, map->end - 1);
306 } 304 }
307 const IntervalType delete_interval{map->GetStart(), map->GetEnd()}; 305 const IntervalType delete_interval{map->start, map->end};
308 mapped_addresses.erase(delete_interval); 306 mapped_addresses.erase(delete_interval);
309 } 307 }
310 308
@@ -345,10 +343,10 @@ private:
345 bool modified_inheritance = false; 343 bool modified_inheritance = false;
346 // Calculate new buffer parameters 344 // Calculate new buffer parameters
347 for (auto& overlap : overlaps) { 345 for (auto& overlap : overlaps) {
348 new_start = std::min(overlap->GetStart(), new_start); 346 new_start = std::min(overlap->start, new_start);
349 new_end = std::max(overlap->GetEnd(), new_end); 347 new_end = std::max(overlap->end, new_end);
350 write_inheritance |= overlap->IsWritten(); 348 write_inheritance |= overlap->is_written;
351 modified_inheritance |= overlap->IsModified(); 349 modified_inheritance |= overlap->is_modified;
352 } 350 }
353 GPUVAddr new_gpu_addr = gpu_addr + new_start - cpu_addr; 351 GPUVAddr new_gpu_addr = gpu_addr + new_start - cpu_addr;
354 for (auto& overlap : overlaps) { 352 for (auto& overlap : overlaps) {
@@ -372,7 +370,7 @@ private:
372 IntervalSet interval_set{}; 370 IntervalSet interval_set{};
373 interval_set.add(base_interval); 371 interval_set.add(base_interval);
374 for (auto& overlap : overlaps) { 372 for (auto& overlap : overlaps) {
375 const IntervalType subtract{overlap->GetStart(), overlap->GetEnd()}; 373 const IntervalType subtract{overlap->start, overlap->end};
376 interval_set.subtract(subtract); 374 interval_set.subtract(subtract);
377 } 375 }
378 for (auto& interval : interval_set) { 376 for (auto& interval : interval_set) {
@@ -406,11 +404,11 @@ private:
406 } 404 }
407 405
408 void FlushMap(MapInterval map) { 406 void FlushMap(MapInterval map) {
409 std::size_t size = map->GetEnd() - map->GetStart(); 407 std::size_t size = map->end - map->start;
410 OwnerBuffer block = blocks[map->GetStart() >> block_page_bits]; 408 OwnerBuffer block = blocks[map->start >> block_page_bits];
411 staging_buffer.resize(size); 409 staging_buffer.resize(size);
412 DownloadBlockData(block, block->GetOffset(map->GetStart()), size, staging_buffer.data()); 410 DownloadBlockData(block, block->GetOffset(map->start), size, staging_buffer.data());
413 system.Memory().WriteBlockUnsafe(map->GetStart(), staging_buffer.data(), size); 411 system.Memory().WriteBlockUnsafe(map->start, staging_buffer.data(), size);
414 map->MarkAsModified(false, 0); 412 map->MarkAsModified(false, 0);
415 } 413 }
416 414
@@ -515,7 +513,7 @@ private:
515 } else { 513 } else {
516 written_pages[page_start] = 1; 514 written_pages[page_start] = 1;
517 } 515 }
518 page_start++; 516 ++page_start;
519 } 517 }
520 } 518 }
521 519
@@ -531,7 +529,7 @@ private:
531 written_pages.erase(it); 529 written_pages.erase(it);
532 } 530 }
533 } 531 }
534 page_start++; 532 ++page_start;
535 } 533 }
536 } 534 }
537 535
@@ -542,7 +540,7 @@ private:
542 if (written_pages.count(page_start) > 0) { 540 if (written_pages.count(page_start) > 0) {
543 return true; 541 return true;
544 } 542 }
545 page_start++; 543 ++page_start;
546 } 544 }
547 return false; 545 return false;
548 } 546 }
@@ -585,7 +583,7 @@ private:
585 std::vector<u8> staging_buffer; 583 std::vector<u8> staging_buffer;
586 std::list<MapInterval> marked_for_unregister; 584 std::list<MapInterval> marked_for_unregister;
587 585
588 std::shared_ptr<std::unordered_set<MapInterval>> uncommitted_flushes{}; 586 std::shared_ptr<std::unordered_set<MapInterval>> uncommitted_flushes;
589 std::list<std::shared_ptr<std::list<MapInterval>>> committed_flushes; 587 std::list<std::shared_ptr<std::list<MapInterval>>> committed_flushes;
590 588
591 std::recursive_mutex mutex; 589 std::recursive_mutex mutex;
diff --git a/src/video_core/buffer_cache/map_interval.h b/src/video_core/buffer_cache/map_interval.h
index 29d8b26f3..1e77012d9 100644
--- a/src/video_core/buffer_cache/map_interval.h
+++ b/src/video_core/buffer_cache/map_interval.h
@@ -9,99 +9,37 @@
9 9
10namespace VideoCommon { 10namespace VideoCommon {
11 11
12class MapIntervalBase { 12struct MapIntervalBase {
13public: 13 constexpr explicit MapIntervalBase(VAddr start, VAddr end, GPUVAddr gpu_addr) noexcept
14 MapIntervalBase(const VAddr start, const VAddr end, const GPUVAddr gpu_addr)
15 : start{start}, end{end}, gpu_addr{gpu_addr} {} 14 : start{start}, end{end}, gpu_addr{gpu_addr} {}
16 15
17 void SetCpuAddress(VAddr new_cpu_addr) { 16 constexpr bool IsInside(const VAddr other_start, const VAddr other_end) const noexcept {
18 cpu_addr = new_cpu_addr;
19 }
20
21 VAddr GetCpuAddress() const {
22 return cpu_addr;
23 }
24
25 GPUVAddr GetGpuAddress() const {
26 return gpu_addr;
27 }
28
29 bool IsInside(const VAddr other_start, const VAddr other_end) const {
30 return (start <= other_start && other_end <= end); 17 return (start <= other_start && other_end <= end);
31 } 18 }
32 19
33 bool operator==(const MapIntervalBase& rhs) const { 20 constexpr void MarkAsModified(bool is_modified_, u64 ticks_) noexcept {
34 return std::tie(start, end) == std::tie(rhs.start, rhs.end);
35 }
36
37 bool operator!=(const MapIntervalBase& rhs) const {
38 return !operator==(rhs);
39 }
40
41 void MarkAsRegistered(const bool registered) {
42 is_registered = registered;
43 }
44
45 bool IsRegistered() const {
46 return is_registered;
47 }
48
49 void SetMemoryMarked(bool is_memory_marked_) {
50 is_memory_marked = is_memory_marked_;
51 }
52
53 bool IsMemoryMarked() const {
54 return is_memory_marked;
55 }
56
57 void SetSyncPending(bool is_sync_pending_) {
58 is_sync_pending = is_sync_pending_;
59 }
60
61 bool IsSyncPending() const {
62 return is_sync_pending;
63 }
64
65 VAddr GetStart() const {
66 return start;
67 }
68
69 VAddr GetEnd() const {
70 return end;
71 }
72
73 void MarkAsModified(const bool is_modified_, const u64 tick) {
74 is_modified = is_modified_; 21 is_modified = is_modified_;
75 ticks = tick; 22 ticks = ticks_;
76 }
77
78 bool IsModified() const {
79 return is_modified;
80 }
81
82 u64 GetModificationTick() const {
83 return ticks;
84 } 23 }
85 24
86 void MarkAsWritten(const bool is_written_) { 25 constexpr bool operator==(const MapIntervalBase& rhs) const noexcept {
87 is_written = is_written_; 26 return start == rhs.start && end == rhs.end;
88 } 27 }
89 28
90 bool IsWritten() const { 29 constexpr bool operator!=(const MapIntervalBase& rhs) const noexcept {
91 return is_written; 30 return !operator==(rhs);
92 } 31 }
93 32
94private:
95 VAddr start; 33 VAddr start;
96 VAddr end; 34 VAddr end;
97 GPUVAddr gpu_addr; 35 GPUVAddr gpu_addr;
98 VAddr cpu_addr{}; 36 VAddr cpu_addr = 0;
99 bool is_written{}; 37 u64 ticks = 0;
100 bool is_modified{}; 38 bool is_written = false;
101 bool is_registered{}; 39 bool is_modified = false;
102 bool is_memory_marked{}; 40 bool is_registered = false;
103 bool is_sync_pending{}; 41 bool is_memory_marked = false;
104 u64 ticks{}; 42 bool is_sync_pending = false;
105}; 43};
106 44
107} // namespace VideoCommon 45} // namespace VideoCommon