summaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
authorGravatar Fernando Sahmkow2024-02-04 14:44:38 +0100
committerGravatar Fernando Sahmkow2024-02-04 20:01:50 +0100
commitaccccc0cbf54bb080c1180ad47445aada317454c (patch)
tree652e97ff215283803d64b502818fcd248e94e87b /src/core
parentCommon: Introduce Range Sets (diff)
downloadyuzu-accccc0cbf54bb080c1180ad47445aada317454c.tar.gz
yuzu-accccc0cbf54bb080c1180ad47445aada317454c.tar.xz
yuzu-accccc0cbf54bb080c1180ad47445aada317454c.zip
NVDRV: Refactor HeapMapper to use RangeSets
Diffstat (limited to 'src/core')
-rw-r--r--src/core/hle/service/nvdrv/core/heap_mapper.cpp187
1 files changed, 43 insertions, 144 deletions
diff --git a/src/core/hle/service/nvdrv/core/heap_mapper.cpp b/src/core/hle/service/nvdrv/core/heap_mapper.cpp
index 096dc5deb..542125a1c 100644
--- a/src/core/hle/service/nvdrv/core/heap_mapper.cpp
+++ b/src/core/hle/service/nvdrv/core/heap_mapper.cpp
@@ -3,110 +3,21 @@
3 3
4#include <mutex> 4#include <mutex>
5 5
6#include <boost/container/small_vector.hpp> 6#include "common/range_sets.h"
7#define BOOST_NO_MT 7#include "common/range_sets.inc"
8#include <boost/pool/detail/mutex.hpp>
9#undef BOOST_NO_MT
10#include <boost/icl/interval.hpp>
11#include <boost/icl/interval_base_set.hpp>
12#include <boost/icl/interval_set.hpp>
13#include <boost/icl/split_interval_map.hpp>
14#include <boost/pool/pool.hpp>
15#include <boost/pool/pool_alloc.hpp>
16#include <boost/pool/poolfwd.hpp>
17
18#include "core/hle/service/nvdrv/core/heap_mapper.h" 8#include "core/hle/service/nvdrv/core/heap_mapper.h"
19#include "video_core/host1x/host1x.h" 9#include "video_core/host1x/host1x.h"
20 10
21namespace boost {
22template <typename T>
23class fast_pool_allocator<T, default_user_allocator_new_delete, details::pool::null_mutex, 4096, 0>;
24}
25
26namespace Service::Nvidia::NvCore { 11namespace Service::Nvidia::NvCore {
27 12
28using IntervalCompare = std::less<DAddr>;
29using IntervalInstance = boost::icl::interval_type_default<DAddr, std::less>;
30using IntervalAllocator = boost::fast_pool_allocator<DAddr>;
31using IntervalSet = boost::icl::interval_set<DAddr>;
32using IntervalType = typename IntervalSet::interval_type;
33
34template <typename Type>
35struct counter_add_functor : public boost::icl::identity_based_inplace_combine<Type> {
36 // types
37 typedef counter_add_functor<Type> type;
38 typedef boost::icl::identity_based_inplace_combine<Type> base_type;
39
40 // public member functions
41 void operator()(Type& current, const Type& added) const {
42 current += added;
43 if (current < base_type::identity_element()) {
44 current = base_type::identity_element();
45 }
46 }
47
48 // public static functions
49 static void version(Type&){};
50};
51
52using OverlapCombine = counter_add_functor<int>;
53using OverlapSection = boost::icl::inter_section<int>;
54using OverlapCounter = boost::icl::split_interval_map<DAddr, int>;
55
56struct HeapMapper::HeapMapperInternal { 13struct HeapMapper::HeapMapperInternal {
57 HeapMapperInternal(Tegra::Host1x::Host1x& host1x) : device_memory{host1x.MemoryManager()} {} 14 HeapMapperInternal(Tegra::Host1x::Host1x& host1x) : m_device_memory{host1x.MemoryManager()} {}
58 ~HeapMapperInternal() = default; 15 ~HeapMapperInternal() = default;
59 16
60 template <typename Func> 17 Common::RangeSet<VAddr> m_temporary_set;
61 void ForEachInOverlapCounter(OverlapCounter& current_range, VAddr cpu_addr, u64 size, 18 Common::SplitRangeSet<VAddr> m_mapped_ranges;
62 Func&& func) { 19 Tegra::MaxwellDeviceMemoryManager& m_device_memory;
63 const DAddr start_address = cpu_addr; 20 std::mutex m_guard;
64 const DAddr end_address = start_address + size;
65 const IntervalType search_interval{start_address, end_address};
66 auto it = current_range.lower_bound(search_interval);
67 if (it == current_range.end()) {
68 return;
69 }
70 auto end_it = current_range.upper_bound(search_interval);
71 for (; it != end_it; it++) {
72 auto& inter = it->first;
73 DAddr inter_addr_end = inter.upper();
74 DAddr inter_addr = inter.lower();
75 if (inter_addr_end > end_address) {
76 inter_addr_end = end_address;
77 }
78 if (inter_addr < start_address) {
79 inter_addr = start_address;
80 }
81 func(inter_addr, inter_addr_end, it->second);
82 }
83 }
84
85 void RemoveEachInOverlapCounter(OverlapCounter& current_range,
86 const IntervalType search_interval, int subtract_value) {
87 bool any_removals = false;
88 current_range.add(std::make_pair(search_interval, subtract_value));
89 do {
90 any_removals = false;
91 auto it = current_range.lower_bound(search_interval);
92 if (it == current_range.end()) {
93 return;
94 }
95 auto end_it = current_range.upper_bound(search_interval);
96 for (; it != end_it; it++) {
97 if (it->second <= 0) {
98 any_removals = true;
99 current_range.erase(it);
100 break;
101 }
102 }
103 } while (any_removals);
104 }
105
106 IntervalSet base_set;
107 OverlapCounter mapping_overlaps;
108 Tegra::MaxwellDeviceMemoryManager& device_memory;
109 std::mutex guard;
110}; 21};
111 22
112HeapMapper::HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size, Core::Asid asid, 23HeapMapper::HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size, Core::Asid asid,
@@ -116,60 +27,48 @@ HeapMapper::HeapMapper(VAddr start_vaddress, DAddr start_daddress, size_t size,
116} 27}
117 28
118HeapMapper::~HeapMapper() { 29HeapMapper::~HeapMapper() {
119 m_internal->device_memory.Unmap(m_daddress, m_size); 30 // Unmap whatever has been mapped.
31 m_internal->m_mapped_ranges.ForEach([this](VAddr start_addr, VAddr end_addr, s32 count) {
32 const size_t sub_size = end_addr - start_addr;
33 const size_t offset = start_addr - m_vaddress;
34 m_internal->m_device_memory.Unmap(m_daddress + offset, sub_size);
35 });
120} 36}
121 37
122DAddr HeapMapper::Map(VAddr start, size_t size) { 38DAddr HeapMapper::Map(VAddr start, size_t size) {
123 std::scoped_lock lk(m_internal->guard); 39 std::scoped_lock lk(m_internal->m_guard);
124 m_internal->base_set.clear(); 40 // Add the mapping range to a temporary range set.
125 const IntervalType interval{start, start + size}; 41 m_internal->m_temporary_set.Clear();
126 m_internal->base_set.insert(interval); 42 m_internal->m_temporary_set.Add(start, size);
127 m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size, 43
128 [this](VAddr start_addr, VAddr end_addr, int) { 44 // Remove anything that's already mapped from the temporary range set.
129 const IntervalType other{start_addr, end_addr}; 45 m_internal->m_mapped_ranges.ForEachInRange(
130 m_internal->base_set.subtract(other); 46 start, size, [this](VAddr start_addr, VAddr end_addr, s32) {
131 }); 47 m_internal->m_temporary_set.Subtract(start_addr, end_addr - start_addr);
132 if (!m_internal->base_set.empty()) { 48 });
133 auto it = m_internal->base_set.begin(); 49
134 auto end_it = m_internal->base_set.end(); 50 // Map anything that has not been mapped yet.
135 for (; it != end_it; it++) { 51 m_internal->m_temporary_set.ForEach([this](VAddr start_addr, VAddr end_addr) {
136 const VAddr inter_addr_end = it->upper(); 52 const size_t sub_size = end_addr - start_addr;
137 const VAddr inter_addr = it->lower(); 53 const size_t offset = start_addr - m_vaddress;
138 const size_t offset = inter_addr - m_vaddress; 54 m_internal->m_device_memory.Map(m_daddress + offset, m_vaddress + offset, sub_size, m_asid);
139 const size_t sub_size = inter_addr_end - inter_addr; 55 });
140 m_internal->device_memory.Map(m_daddress + offset, m_vaddress + offset, sub_size, 56
141 m_asid); 57 // Add the mapping range to the split map, to register the map and overlaps.
142 } 58 m_internal->m_mapped_ranges.Add(start, size);
143 } 59 m_internal->m_temporary_set.Clear();
144 m_internal->mapping_overlaps += std::make_pair(interval, 1); 60 return m_daddress + static_cast<DAddr>(start - m_vaddress);
145 m_internal->base_set.clear();
146 return m_daddress + (start - m_vaddress);
147} 61}
148 62
149void HeapMapper::Unmap(VAddr start, size_t size) { 63void HeapMapper::Unmap(VAddr start, size_t size) {
150 std::scoped_lock lk(m_internal->guard); 64 std::scoped_lock lk(m_internal->m_guard);
151 m_internal->base_set.clear(); 65
152 m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size, 66 // Just subtract the range and whatever is deleted, unmap it.
153 [this](VAddr start_addr, VAddr end_addr, int value) { 67 m_internal->m_mapped_ranges.Subtract(start, size, [this](VAddr start_addr, VAddr end_addr) {
154 if (value <= 1) { 68 const size_t sub_size = end_addr - start_addr;
155 const IntervalType other{start_addr, end_addr}; 69 const size_t offset = start_addr - m_vaddress;
156 m_internal->base_set.insert(other); 70 m_internal->m_device_memory.Unmap(m_daddress + offset, sub_size);
157 } 71 });
158 });
159 if (!m_internal->base_set.empty()) {
160 auto it = m_internal->base_set.begin();
161 auto end_it = m_internal->base_set.end();
162 for (; it != end_it; it++) {
163 const VAddr inter_addr_end = it->upper();
164 const VAddr inter_addr = it->lower();
165 const size_t offset = inter_addr - m_vaddress;
166 const size_t sub_size = inter_addr_end - inter_addr;
167 m_internal->device_memory.Unmap(m_daddress + offset, sub_size);
168 }
169 }
170 const IntervalType to_remove{start, start + size};
171 m_internal->RemoveEachInOverlapCounter(m_internal->mapping_overlaps, to_remove, -1);
172 m_internal->base_set.clear();
173} 72}
174 73
175} // namespace Service::Nvidia::NvCore 74} // namespace Service::Nvidia::NvCore