summaryrefslogtreecommitdiff
path: root/src/common/heap_tracker.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/common/heap_tracker.cpp')
-rw-r--r--src/common/heap_tracker.cpp281
1 files changed, 281 insertions, 0 deletions
diff --git a/src/common/heap_tracker.cpp b/src/common/heap_tracker.cpp
new file mode 100644
index 000000000..683208795
--- /dev/null
+++ b/src/common/heap_tracker.cpp
@@ -0,0 +1,281 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include <fstream>
5#include <vector>
6
7#include "common/heap_tracker.h"
8#include "common/logging/log.h"
9
10namespace Common {
11
12namespace {
13
14s64 GetMaxPermissibleResidentMapCount() {
15 // Default value.
16 s64 value = 65530;
17
18 // Try to read how many mappings we can make.
19 std::ifstream s("/proc/sys/vm/max_map_count");
20 s >> value;
21
22 // Print, for debug.
23 LOG_INFO(HW_Memory, "Current maximum map count: {}", value);
24
25 // Allow 20000 maps for other code and to account for split inaccuracy.
26 return std::max<s64>(value - 20000, 0);
27}
28
29} // namespace
30
31HeapTracker::HeapTracker(Common::HostMemory& buffer)
32 : m_buffer(buffer), m_max_resident_map_count(GetMaxPermissibleResidentMapCount()) {}
33HeapTracker::~HeapTracker() = default;
34
35void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length,
36 MemoryPermission perm, bool is_separate_heap) {
37 // When mapping other memory, map pages immediately.
38 if (!is_separate_heap) {
39 m_buffer.Map(virtual_offset, host_offset, length, perm, false);
40 return;
41 }
42
43 {
44 // We are mapping part of a separate heap.
45 std::scoped_lock lk{m_lock};
46
47 auto* const map = new SeparateHeapMap{
48 .vaddr = virtual_offset,
49 .paddr = host_offset,
50 .size = length,
51 .tick = m_tick++,
52 .perm = perm,
53 .is_resident = false,
54 };
55
56 // Insert into mappings.
57 m_map_count++;
58 m_mappings.insert(*map);
59 }
60
61 // Finally, map.
62 this->DeferredMapSeparateHeap(virtual_offset);
63}
64
65void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) {
66 // If this is a separate heap...
67 if (is_separate_heap) {
68 std::scoped_lock lk{m_lock};
69
70 const SeparateHeapMap key{
71 .vaddr = virtual_offset,
72 };
73
74 // Split at the boundaries of the region we are removing.
75 this->SplitHeapMapLocked(virtual_offset);
76 this->SplitHeapMapLocked(virtual_offset + size);
77
78 // Erase all mappings in range.
79 auto it = m_mappings.find(key);
80 while (it != m_mappings.end() && it->vaddr < virtual_offset + size) {
81 // Get underlying item.
82 auto* const item = std::addressof(*it);
83
84 // If resident, erase from resident map.
85 if (item->is_resident) {
86 ASSERT(--m_resident_map_count >= 0);
87 m_resident_mappings.erase(m_resident_mappings.iterator_to(*item));
88 }
89
90 // Erase from map.
91 ASSERT(--m_map_count >= 0);
92 it = m_mappings.erase(it);
93
94 // Free the item.
95 delete item;
96 }
97 }
98
99 // Unmap pages.
100 m_buffer.Unmap(virtual_offset, size, false);
101}
102
103void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission perm) {
104 // Ensure no rebuild occurs while reprotecting.
105 std::shared_lock lk{m_rebuild_lock};
106
107 // Split at the boundaries of the region we are reprotecting.
108 this->SplitHeapMap(virtual_offset, size);
109
110 // Declare tracking variables.
111 const VAddr end = virtual_offset + size;
112 VAddr cur = virtual_offset;
113
114 while (cur < end) {
115 VAddr next = cur;
116 bool should_protect = false;
117
118 {
119 std::scoped_lock lk2{m_lock};
120
121 const SeparateHeapMap key{
122 .vaddr = next,
123 };
124
125 // Try to get the next mapping corresponding to this address.
126 const auto it = m_mappings.nfind(key);
127
128 if (it == m_mappings.end()) {
129 // There are no separate heap mappings remaining.
130 next = end;
131 should_protect = true;
132 } else if (it->vaddr == cur) {
133 // We are in range.
134 // Update permission bits.
135 it->perm = perm;
136
137 // Determine next address and whether we should protect.
138 next = cur + it->size;
139 should_protect = it->is_resident;
140 } else /* if (it->vaddr > cur) */ {
141 // We weren't in range, but there is a block coming up that will be.
142 next = it->vaddr;
143 should_protect = true;
144 }
145 }
146
147 // Clamp to end.
148 next = std::min(next, end);
149
150 // Reprotect, if we need to.
151 if (should_protect) {
152 m_buffer.Protect(cur, next - cur, perm);
153 }
154
155 // Advance.
156 cur = next;
157 }
158}
159
160bool HeapTracker::DeferredMapSeparateHeap(u8* fault_address) {
161 if (m_buffer.IsInVirtualRange(fault_address)) {
162 return this->DeferredMapSeparateHeap(fault_address - m_buffer.VirtualBasePointer());
163 }
164
165 return false;
166}
167
168bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) {
169 bool rebuild_required = false;
170
171 {
172 std::scoped_lock lk{m_lock};
173
174 // Check to ensure this was a non-resident separate heap mapping.
175 const auto it = this->GetNearestHeapMapLocked(virtual_offset);
176 if (it == m_mappings.end() || it->is_resident) {
177 return false;
178 }
179
180 // Update tick before possible rebuild.
181 it->tick = m_tick++;
182
183 // Check if we need to rebuild.
184 if (m_resident_map_count > m_max_resident_map_count) {
185 rebuild_required = true;
186 }
187
188 // Map the area.
189 m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false);
190
191 // This map is now resident.
192 it->is_resident = true;
193 m_resident_map_count++;
194 m_resident_mappings.insert(*it);
195 }
196
197 if (rebuild_required) {
198 // A rebuild was required, so perform it now.
199 this->RebuildSeparateHeapAddressSpace();
200 }
201
202 return true;
203}
204
205void HeapTracker::RebuildSeparateHeapAddressSpace() {
206 std::scoped_lock lk{m_rebuild_lock, m_lock};
207
208 ASSERT(!m_resident_mappings.empty());
209
210 // Dump half of the mappings.
211 //
212 // Despite being worse in theory, this has proven to be better in practice than more
213 // regularly dumping a smaller amount, because it significantly reduces average case
214 // lock contention.
215 const size_t desired_count = std::min(m_resident_map_count, m_max_resident_map_count) / 2;
216 const size_t evict_count = m_resident_map_count - desired_count;
217 auto it = m_resident_mappings.begin();
218
219 for (size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) {
220 // Unmark and unmap.
221 it->is_resident = false;
222 m_buffer.Unmap(it->vaddr, it->size, false);
223
224 // Advance.
225 ASSERT(--m_resident_map_count >= 0);
226 it = m_resident_mappings.erase(it);
227 }
228}
229
230void HeapTracker::SplitHeapMap(VAddr offset, size_t size) {
231 std::scoped_lock lk{m_lock};
232
233 this->SplitHeapMapLocked(offset);
234 this->SplitHeapMapLocked(offset + size);
235}
236
237void HeapTracker::SplitHeapMapLocked(VAddr offset) {
238 const auto it = this->GetNearestHeapMapLocked(offset);
239 if (it == m_mappings.end() || it->vaddr == offset) {
240 // Not contained or no split required.
241 return;
242 }
243
244 // Cache the original values.
245 auto* const left = std::addressof(*it);
246 const size_t orig_size = left->size;
247
248 // Adjust the left map.
249 const size_t left_size = offset - left->vaddr;
250 left->size = left_size;
251
252 // Create the new right map.
253 auto* const right = new SeparateHeapMap{
254 .vaddr = left->vaddr + left_size,
255 .paddr = left->paddr + left_size,
256 .size = orig_size - left_size,
257 .tick = left->tick,
258 .perm = left->perm,
259 .is_resident = left->is_resident,
260 };
261
262 // Insert the new right map.
263 m_map_count++;
264 m_mappings.insert(*right);
265
266 // If resident, also insert into resident map.
267 if (right->is_resident) {
268 m_resident_map_count++;
269 m_resident_mappings.insert(*right);
270 }
271}
272
273HeapTracker::AddrTree::iterator HeapTracker::GetNearestHeapMapLocked(VAddr offset) {
274 const SeparateHeapMap key{
275 .vaddr = offset,
276 };
277
278 return m_mappings.find(key);
279}
280
281} // namespace Common