summaryrefslogtreecommitdiff
path: root/src/common
diff options
context:
space:
mode:
authorGravatar Narr the Reg2024-01-01 13:56:16 -0600
committerGravatar GitHub2024-01-01 13:56:16 -0600
commitf0f92edbd0a78abda819251ddc325da4acc14216 (patch)
tree6a23c1be26148c4137a6f67ebdf926a3f82ce47f /src/common
parentMerge pull request #12501 from liamwhite/ips (diff)
parentheap_tracker: use linear-time mapping eviction (diff)
downloadyuzu-f0f92edbd0a78abda819251ddc325da4acc14216.tar.gz
yuzu-f0f92edbd0a78abda819251ddc325da4acc14216.tar.xz
yuzu-f0f92edbd0a78abda819251ddc325da4acc14216.zip
Merge pull request #12466 from liamwhite/sh2
core: track separate heap allocation for linux
Diffstat (limited to 'src/common')
-rw-r--r--src/common/CMakeLists.txt2
-rw-r--r--src/common/heap_tracker.cpp281
-rw-r--r--src/common/heap_tracker.h98
-rw-r--r--src/common/host_memory.cpp10
-rw-r--r--src/common/host_memory.h11
5 files changed, 395 insertions, 7 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index b58a7073f..8c57d47c6 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -64,6 +64,8 @@ add_library(common STATIC
64 fs/path_util.cpp 64 fs/path_util.cpp
65 fs/path_util.h 65 fs/path_util.h
66 hash.h 66 hash.h
67 heap_tracker.cpp
68 heap_tracker.h
67 hex_util.cpp 69 hex_util.cpp
68 hex_util.h 70 hex_util.h
69 host_memory.cpp 71 host_memory.cpp
diff --git a/src/common/heap_tracker.cpp b/src/common/heap_tracker.cpp
new file mode 100644
index 000000000..683208795
--- /dev/null
+++ b/src/common/heap_tracker.cpp
@@ -0,0 +1,281 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include <fstream>
5#include <vector>
6
7#include "common/heap_tracker.h"
8#include "common/logging/log.h"
9
10namespace Common {
11
12namespace {
13
14s64 GetMaxPermissibleResidentMapCount() {
15 // Default value.
16 s64 value = 65530;
17
18 // Try to read how many mappings we can make.
19 std::ifstream s("/proc/sys/vm/max_map_count");
20 s >> value;
21
22 // Print, for debug.
23 LOG_INFO(HW_Memory, "Current maximum map count: {}", value);
24
25 // Allow 20000 maps for other code and to account for split inaccuracy.
26 return std::max<s64>(value - 20000, 0);
27}
28
29} // namespace
30
31HeapTracker::HeapTracker(Common::HostMemory& buffer)
32 : m_buffer(buffer), m_max_resident_map_count(GetMaxPermissibleResidentMapCount()) {}
33HeapTracker::~HeapTracker() = default;
34
35void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length,
36 MemoryPermission perm, bool is_separate_heap) {
37 // When mapping other memory, map pages immediately.
38 if (!is_separate_heap) {
39 m_buffer.Map(virtual_offset, host_offset, length, perm, false);
40 return;
41 }
42
43 {
44 // We are mapping part of a separate heap.
45 std::scoped_lock lk{m_lock};
46
47 auto* const map = new SeparateHeapMap{
48 .vaddr = virtual_offset,
49 .paddr = host_offset,
50 .size = length,
51 .tick = m_tick++,
52 .perm = perm,
53 .is_resident = false,
54 };
55
56 // Insert into mappings.
57 m_map_count++;
58 m_mappings.insert(*map);
59 }
60
61 // Finally, map.
62 this->DeferredMapSeparateHeap(virtual_offset);
63}
64
65void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) {
66 // If this is a separate heap...
67 if (is_separate_heap) {
68 std::scoped_lock lk{m_lock};
69
70 const SeparateHeapMap key{
71 .vaddr = virtual_offset,
72 };
73
74 // Split at the boundaries of the region we are removing.
75 this->SplitHeapMapLocked(virtual_offset);
76 this->SplitHeapMapLocked(virtual_offset + size);
77
78 // Erase all mappings in range.
79 auto it = m_mappings.find(key);
80 while (it != m_mappings.end() && it->vaddr < virtual_offset + size) {
81 // Get underlying item.
82 auto* const item = std::addressof(*it);
83
84 // If resident, erase from resident map.
85 if (item->is_resident) {
86 ASSERT(--m_resident_map_count >= 0);
87 m_resident_mappings.erase(m_resident_mappings.iterator_to(*item));
88 }
89
90 // Erase from map.
91 ASSERT(--m_map_count >= 0);
92 it = m_mappings.erase(it);
93
94 // Free the item.
95 delete item;
96 }
97 }
98
99 // Unmap pages.
100 m_buffer.Unmap(virtual_offset, size, false);
101}
102
103void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission perm) {
104 // Ensure no rebuild occurs while reprotecting.
105 std::shared_lock lk{m_rebuild_lock};
106
107 // Split at the boundaries of the region we are reprotecting.
108 this->SplitHeapMap(virtual_offset, size);
109
110 // Declare tracking variables.
111 const VAddr end = virtual_offset + size;
112 VAddr cur = virtual_offset;
113
114 while (cur < end) {
115 VAddr next = cur;
116 bool should_protect = false;
117
118 {
119 std::scoped_lock lk2{m_lock};
120
121 const SeparateHeapMap key{
122 .vaddr = next,
123 };
124
125 // Try to get the next mapping corresponding to this address.
126 const auto it = m_mappings.nfind(key);
127
128 if (it == m_mappings.end()) {
129 // There are no separate heap mappings remaining.
130 next = end;
131 should_protect = true;
132 } else if (it->vaddr == cur) {
133 // We are in range.
134 // Update permission bits.
135 it->perm = perm;
136
137 // Determine next address and whether we should protect.
138 next = cur + it->size;
139 should_protect = it->is_resident;
140 } else /* if (it->vaddr > cur) */ {
141 // We weren't in range, but there is a block coming up that will be.
142 next = it->vaddr;
143 should_protect = true;
144 }
145 }
146
147 // Clamp to end.
148 next = std::min(next, end);
149
150 // Reprotect, if we need to.
151 if (should_protect) {
152 m_buffer.Protect(cur, next - cur, perm);
153 }
154
155 // Advance.
156 cur = next;
157 }
158}
159
160bool HeapTracker::DeferredMapSeparateHeap(u8* fault_address) {
161 if (m_buffer.IsInVirtualRange(fault_address)) {
162 return this->DeferredMapSeparateHeap(fault_address - m_buffer.VirtualBasePointer());
163 }
164
165 return false;
166}
167
168bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) {
169 bool rebuild_required = false;
170
171 {
172 std::scoped_lock lk{m_lock};
173
174 // Check to ensure this was a non-resident separate heap mapping.
175 const auto it = this->GetNearestHeapMapLocked(virtual_offset);
176 if (it == m_mappings.end() || it->is_resident) {
177 return false;
178 }
179
180 // Update tick before possible rebuild.
181 it->tick = m_tick++;
182
183 // Check if we need to rebuild.
184 if (m_resident_map_count > m_max_resident_map_count) {
185 rebuild_required = true;
186 }
187
188 // Map the area.
189 m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false);
190
191 // This map is now resident.
192 it->is_resident = true;
193 m_resident_map_count++;
194 m_resident_mappings.insert(*it);
195 }
196
197 if (rebuild_required) {
198 // A rebuild was required, so perform it now.
199 this->RebuildSeparateHeapAddressSpace();
200 }
201
202 return true;
203}
204
205void HeapTracker::RebuildSeparateHeapAddressSpace() {
206 std::scoped_lock lk{m_rebuild_lock, m_lock};
207
208 ASSERT(!m_resident_mappings.empty());
209
210 // Dump half of the mappings.
211 //
212 // Despite being worse in theory, this has proven to be better in practice than more
213 // regularly dumping a smaller amount, because it significantly reduces average case
214 // lock contention.
215 const size_t desired_count = std::min(m_resident_map_count, m_max_resident_map_count) / 2;
216 const size_t evict_count = m_resident_map_count - desired_count;
217 auto it = m_resident_mappings.begin();
218
219 for (size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) {
220 // Unmark and unmap.
221 it->is_resident = false;
222 m_buffer.Unmap(it->vaddr, it->size, false);
223
224 // Advance.
225 ASSERT(--m_resident_map_count >= 0);
226 it = m_resident_mappings.erase(it);
227 }
228}
229
230void HeapTracker::SplitHeapMap(VAddr offset, size_t size) {
231 std::scoped_lock lk{m_lock};
232
233 this->SplitHeapMapLocked(offset);
234 this->SplitHeapMapLocked(offset + size);
235}
236
237void HeapTracker::SplitHeapMapLocked(VAddr offset) {
238 const auto it = this->GetNearestHeapMapLocked(offset);
239 if (it == m_mappings.end() || it->vaddr == offset) {
240 // Not contained or no split required.
241 return;
242 }
243
244 // Cache the original values.
245 auto* const left = std::addressof(*it);
246 const size_t orig_size = left->size;
247
248 // Adjust the left map.
249 const size_t left_size = offset - left->vaddr;
250 left->size = left_size;
251
252 // Create the new right map.
253 auto* const right = new SeparateHeapMap{
254 .vaddr = left->vaddr + left_size,
255 .paddr = left->paddr + left_size,
256 .size = orig_size - left_size,
257 .tick = left->tick,
258 .perm = left->perm,
259 .is_resident = left->is_resident,
260 };
261
262 // Insert the new right map.
263 m_map_count++;
264 m_mappings.insert(*right);
265
266 // If resident, also insert into resident map.
267 if (right->is_resident) {
268 m_resident_map_count++;
269 m_resident_mappings.insert(*right);
270 }
271}
272
273HeapTracker::AddrTree::iterator HeapTracker::GetNearestHeapMapLocked(VAddr offset) {
274 const SeparateHeapMap key{
275 .vaddr = offset,
276 };
277
278 return m_mappings.find(key);
279}
280
281} // namespace Common
diff --git a/src/common/heap_tracker.h b/src/common/heap_tracker.h
new file mode 100644
index 000000000..ee5b0bf43
--- /dev/null
+++ b/src/common/heap_tracker.h
@@ -0,0 +1,98 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <atomic>
7#include <mutex>
8#include <set>
9#include <shared_mutex>
10
11#include "common/host_memory.h"
12#include "common/intrusive_red_black_tree.h"
13
14namespace Common {
15
16struct SeparateHeapMap {
17 Common::IntrusiveRedBlackTreeNode addr_node{};
18 Common::IntrusiveRedBlackTreeNode tick_node{};
19 VAddr vaddr{};
20 PAddr paddr{};
21 size_t size{};
22 size_t tick{};
23 MemoryPermission perm{};
24 bool is_resident{};
25};
26
27struct SeparateHeapMapAddrComparator {
28 static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
29 if (lhs.vaddr < rhs.vaddr) {
30 return -1;
31 } else if (lhs.vaddr <= (rhs.vaddr + rhs.size - 1)) {
32 return 0;
33 } else {
34 return 1;
35 }
36 }
37};
38
39struct SeparateHeapMapTickComparator {
40 static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
41 if (lhs.tick < rhs.tick) {
42 return -1;
43 } else if (lhs.tick > rhs.tick) {
44 return 1;
45 } else {
46 return SeparateHeapMapAddrComparator::Compare(lhs, rhs);
47 }
48 }
49};
50
51class HeapTracker {
52public:
53 explicit HeapTracker(Common::HostMemory& buffer);
54 ~HeapTracker();
55
56 void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm,
57 bool is_separate_heap);
58 void Unmap(size_t virtual_offset, size_t size, bool is_separate_heap);
59 void Protect(size_t virtual_offset, size_t length, MemoryPermission perm);
60 u8* VirtualBasePointer() {
61 return m_buffer.VirtualBasePointer();
62 }
63
64 bool DeferredMapSeparateHeap(u8* fault_address);
65 bool DeferredMapSeparateHeap(size_t virtual_offset);
66
67private:
68 using AddrTreeTraits =
69 Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::addr_node>;
70 using AddrTree = AddrTreeTraits::TreeType<SeparateHeapMapAddrComparator>;
71
72 using TickTreeTraits =
73 Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::tick_node>;
74 using TickTree = TickTreeTraits::TreeType<SeparateHeapMapTickComparator>;
75
76 AddrTree m_mappings{};
77 TickTree m_resident_mappings{};
78
79private:
80 void SplitHeapMap(VAddr offset, size_t size);
81 void SplitHeapMapLocked(VAddr offset);
82
83 AddrTree::iterator GetNearestHeapMapLocked(VAddr offset);
84
85 void RebuildSeparateHeapAddressSpace();
86
87private:
88 Common::HostMemory& m_buffer;
89 const s64 m_max_resident_map_count;
90
91 std::shared_mutex m_rebuild_lock{};
92 std::mutex m_lock{};
93 s64 m_map_count{};
94 s64 m_resident_map_count{};
95 size_t m_tick{};
96};
97
98} // namespace Common
diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp
index e540375b8..860c39e6a 100644
--- a/src/common/host_memory.cpp
+++ b/src/common/host_memory.cpp
@@ -679,7 +679,7 @@ HostMemory::HostMemory(HostMemory&&) noexcept = default;
679HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default; 679HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default;
680 680
681void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length, 681void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length,
682 MemoryPermission perms) { 682 MemoryPermission perms, bool separate_heap) {
683 ASSERT(virtual_offset % PageAlignment == 0); 683 ASSERT(virtual_offset % PageAlignment == 0);
684 ASSERT(host_offset % PageAlignment == 0); 684 ASSERT(host_offset % PageAlignment == 0);
685 ASSERT(length % PageAlignment == 0); 685 ASSERT(length % PageAlignment == 0);
@@ -691,7 +691,7 @@ void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length,
691 impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms); 691 impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms);
692} 692}
693 693
694void HostMemory::Unmap(size_t virtual_offset, size_t length) { 694void HostMemory::Unmap(size_t virtual_offset, size_t length, bool separate_heap) {
695 ASSERT(virtual_offset % PageAlignment == 0); 695 ASSERT(virtual_offset % PageAlignment == 0);
696 ASSERT(length % PageAlignment == 0); 696 ASSERT(length % PageAlignment == 0);
697 ASSERT(virtual_offset + length <= virtual_size); 697 ASSERT(virtual_offset + length <= virtual_size);
@@ -701,14 +701,16 @@ void HostMemory::Unmap(size_t virtual_offset, size_t length) {
701 impl->Unmap(virtual_offset + virtual_base_offset, length); 701 impl->Unmap(virtual_offset + virtual_base_offset, length);
702} 702}
703 703
704void HostMemory::Protect(size_t virtual_offset, size_t length, bool read, bool write, 704void HostMemory::Protect(size_t virtual_offset, size_t length, MemoryPermission perm) {
705 bool execute) {
706 ASSERT(virtual_offset % PageAlignment == 0); 705 ASSERT(virtual_offset % PageAlignment == 0);
707 ASSERT(length % PageAlignment == 0); 706 ASSERT(length % PageAlignment == 0);
708 ASSERT(virtual_offset + length <= virtual_size); 707 ASSERT(virtual_offset + length <= virtual_size);
709 if (length == 0 || !virtual_base || !impl) { 708 if (length == 0 || !virtual_base || !impl) {
710 return; 709 return;
711 } 710 }
711 const bool read = True(perm & MemoryPermission::Read);
712 const bool write = True(perm & MemoryPermission::Write);
713 const bool execute = True(perm & MemoryPermission::Execute);
712 impl->Protect(virtual_offset + virtual_base_offset, length, read, write, execute); 714 impl->Protect(virtual_offset + virtual_base_offset, length, read, write, execute);
713} 715}
714 716
diff --git a/src/common/host_memory.h b/src/common/host_memory.h
index 747c5850c..72fbb05af 100644
--- a/src/common/host_memory.h
+++ b/src/common/host_memory.h
@@ -40,11 +40,12 @@ public:
40 HostMemory(HostMemory&& other) noexcept; 40 HostMemory(HostMemory&& other) noexcept;
41 HostMemory& operator=(HostMemory&& other) noexcept; 41 HostMemory& operator=(HostMemory&& other) noexcept;
42 42
43 void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms); 43 void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms,
44 bool separate_heap);
44 45
45 void Unmap(size_t virtual_offset, size_t length); 46 void Unmap(size_t virtual_offset, size_t length, bool separate_heap);
46 47
47 void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute = false); 48 void Protect(size_t virtual_offset, size_t length, MemoryPermission perms);
48 49
49 void EnableDirectMappedAddress(); 50 void EnableDirectMappedAddress();
50 51
@@ -64,6 +65,10 @@ public:
64 return virtual_base; 65 return virtual_base;
65 } 66 }
66 67
68 bool IsInVirtualRange(void* address) const noexcept {
69 return address >= virtual_base && address < virtual_base + virtual_size;
70 }
71
67private: 72private:
68 size_t backing_size{}; 73 size_t backing_size{};
69 size_t virtual_size{}; 74 size_t virtual_size{};