summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Liam2024-01-16 23:35:48 -0500
committerGravatar Fernando Sahmkow2024-01-31 16:38:51 +0100
commit8f848f43e979ac4049237d3b6a161696dd85372b (patch)
treee218fcb939560362b4293ab035669d776d6e0cb0
parentTexture Cache: make sparse texture table per channel (diff)
downloadyuzu-8f848f43e979ac4049237d3b6a161696dd85372b.tar.gz
yuzu-8f848f43e979ac4049237d3b6a161696dd85372b.tar.xz
yuzu-8f848f43e979ac4049237d3b6a161696dd85372b.zip
smmu: use new range mutex construction for protecting counters
Diffstat (limited to '')
-rw-r--r--src/common/CMakeLists.txt1
-rw-r--r--src/common/range_mutex.h93
-rw-r--r--src/core/device_memory_manager.h3
-rw-r--r--src/core/device_memory_manager.inc11
4 files changed, 97 insertions, 11 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index e30fea268..85926fc8f 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -106,6 +106,7 @@ add_library(common STATIC
106 precompiled_headers.h 106 precompiled_headers.h
107 quaternion.h 107 quaternion.h
108 range_map.h 108 range_map.h
109 range_mutex.h
109 reader_writer_queue.h 110 reader_writer_queue.h
110 ring_buffer.h 111 ring_buffer.h
111 ${CMAKE_CURRENT_BINARY_DIR}/scm_rev.cpp 112 ${CMAKE_CURRENT_BINARY_DIR}/scm_rev.cpp
diff --git a/src/common/range_mutex.h b/src/common/range_mutex.h
new file mode 100644
index 000000000..d6c949811
--- /dev/null
+++ b/src/common/range_mutex.h
@@ -0,0 +1,93 @@
1// SPDX-FileCopyrightText: 2024 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <condition_variable>
7#include <mutex>
8
9#include "common/intrusive_list.h"
10
11namespace Common {
12
13class ScopedRangeLock;
14
15class RangeMutex {
16public:
17 explicit RangeMutex() = default;
18 ~RangeMutex() = default;
19
20private:
21 friend class ScopedRangeLock;
22
23 void Lock(ScopedRangeLock& l);
24 void Unlock(ScopedRangeLock& l);
25 bool HasIntersectionLocked(ScopedRangeLock& l);
26
27private:
28 std::mutex m_mutex;
29 std::condition_variable m_cv;
30
31 using LockList = Common::IntrusiveListBaseTraits<ScopedRangeLock>::ListType;
32 LockList m_list;
33};
34
35class ScopedRangeLock : public Common::IntrusiveListBaseNode<ScopedRangeLock> {
36public:
37 explicit ScopedRangeLock(RangeMutex& mutex, u64 address, u64 size)
38 : m_mutex(mutex), m_address(address), m_size(size) {
39 if (m_size > 0) {
40 m_mutex.Lock(*this);
41 }
42 }
43 ~ScopedRangeLock() {
44 if (m_size > 0) {
45 m_mutex.Unlock(*this);
46 }
47 }
48
49 u64 GetAddress() const {
50 return m_address;
51 }
52
53 u64 GetSize() const {
54 return m_size;
55 }
56
57private:
58 RangeMutex& m_mutex;
59 const u64 m_address{};
60 const u64 m_size{};
61};
62
63inline void RangeMutex::Lock(ScopedRangeLock& l) {
64 std::unique_lock lk{m_mutex};
65 m_cv.wait(lk, [&] { return !HasIntersectionLocked(l); });
66 m_list.push_back(l);
67}
68
69inline void RangeMutex::Unlock(ScopedRangeLock& l) {
70 {
71 std::scoped_lock lk{m_mutex};
72 m_list.erase(m_list.iterator_to(l));
73 }
74 m_cv.notify_all();
75}
76
77inline bool RangeMutex::HasIntersectionLocked(ScopedRangeLock& l) {
78 const auto cur_begin = l.GetAddress();
79 const auto cur_last = l.GetAddress() + l.GetSize() - 1;
80
81 for (const auto& other : m_list) {
82 const auto other_begin = other.GetAddress();
83 const auto other_last = other.GetAddress() + other.GetSize() - 1;
84
85 if (cur_begin <= other_last && other_begin <= cur_last) {
86 return true;
87 }
88 }
89
90 return false;
91}
92
93} // namespace Common
diff --git a/src/core/device_memory_manager.h b/src/core/device_memory_manager.h
index ffeed46cc..63823602c 100644
--- a/src/core/device_memory_manager.h
+++ b/src/core/device_memory_manager.h
@@ -10,6 +10,7 @@
10#include <mutex> 10#include <mutex>
11 11
12#include "common/common_types.h" 12#include "common/common_types.h"
13#include "common/range_mutex.h"
13#include "common/scratch_buffer.h" 14#include "common/scratch_buffer.h"
14#include "common/virtual_buffer.h" 15#include "common/virtual_buffer.h"
15 16
@@ -204,7 +205,7 @@ private:
204 (1ULL << (device_virtual_bits - page_bits)) / subentries; 205 (1ULL << (device_virtual_bits - page_bits)) / subentries;
205 using CachedPages = std::array<CounterEntry, num_counter_entries>; 206 using CachedPages = std::array<CounterEntry, num_counter_entries>;
206 std::unique_ptr<CachedPages> cached_pages; 207 std::unique_ptr<CachedPages> cached_pages;
207 std::mutex counter_guard; 208 Common::RangeMutex counter_guard;
208 std::mutex mapping_guard; 209 std::mutex mapping_guard;
209}; 210};
210 211
diff --git a/src/core/device_memory_manager.inc b/src/core/device_memory_manager.inc
index eab8a2731..0a59000aa 100644
--- a/src/core/device_memory_manager.inc
+++ b/src/core/device_memory_manager.inc
@@ -508,12 +508,7 @@ void DeviceMemoryManager<Traits>::UnregisterProcess(Asid asid) {
508 508
509template <typename Traits> 509template <typename Traits>
510void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size, s32 delta) { 510void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size, s32 delta) {
511 std::unique_lock<std::mutex> lk(counter_guard, std::defer_lock); 511 Common::ScopedRangeLock lk(counter_guard, addr, size);
512 const auto Lock = [&] {
513 if (!lk) {
514 lk.lock();
515 }
516 };
517 u64 uncache_begin = 0; 512 u64 uncache_begin = 0;
518 u64 cache_begin = 0; 513 u64 cache_begin = 0;
519 u64 uncache_bytes = 0; 514 u64 uncache_bytes = 0;
@@ -548,7 +543,6 @@ void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size
548 } 543 }
549 uncache_bytes += Memory::YUZU_PAGESIZE; 544 uncache_bytes += Memory::YUZU_PAGESIZE;
550 } else if (uncache_bytes > 0) { 545 } else if (uncache_bytes > 0) {
551 Lock();
552 MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS, 546 MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS,
553 uncache_bytes, false); 547 uncache_bytes, false);
554 uncache_bytes = 0; 548 uncache_bytes = 0;
@@ -559,7 +553,6 @@ void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size
559 } 553 }
560 cache_bytes += Memory::YUZU_PAGESIZE; 554 cache_bytes += Memory::YUZU_PAGESIZE;
561 } else if (cache_bytes > 0) { 555 } else if (cache_bytes > 0) {
562 Lock();
563 MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes, 556 MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes,
564 true); 557 true);
565 cache_bytes = 0; 558 cache_bytes = 0;
@@ -567,12 +560,10 @@ void DeviceMemoryManager<Traits>::UpdatePagesCachedCount(DAddr addr, size_t size
567 vpage++; 560 vpage++;
568 } 561 }
569 if (uncache_bytes > 0) { 562 if (uncache_bytes > 0) {
570 Lock();
571 MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS, uncache_bytes, 563 MarkRegionCaching(memory_device_inter, uncache_begin << Memory::YUZU_PAGEBITS, uncache_bytes,
572 false); 564 false);
573 } 565 }
574 if (cache_bytes > 0) { 566 if (cache_bytes > 0) {
575 Lock();
576 MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes, 567 MarkRegionCaching(memory_device_inter, cache_begin << Memory::YUZU_PAGEBITS, cache_bytes,
577 true); 568 true);
578 } 569 }