summaryrefslogtreecommitdiff
path: root/src/common
diff options
context:
space:
mode:
Diffstat (limited to 'src/common')
-rw-r--r--src/common/CMakeLists.txt2
-rw-r--r--src/common/heap_tracker.cpp263
-rw-r--r--src/common/heap_tracker.h97
-rw-r--r--src/common/host_memory.cpp10
-rw-r--r--src/common/host_memory.h11
5 files changed, 376 insertions, 7 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index b58a7073f..8c57d47c6 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -64,6 +64,8 @@ add_library(common STATIC
64 fs/path_util.cpp 64 fs/path_util.cpp
65 fs/path_util.h 65 fs/path_util.h
66 hash.h 66 hash.h
67 heap_tracker.cpp
68 heap_tracker.h
67 hex_util.cpp 69 hex_util.cpp
68 hex_util.h 70 hex_util.h
69 host_memory.cpp 71 host_memory.cpp
diff --git a/src/common/heap_tracker.cpp b/src/common/heap_tracker.cpp
new file mode 100644
index 000000000..95dc8aa1e
--- /dev/null
+++ b/src/common/heap_tracker.cpp
@@ -0,0 +1,263 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include <algorithm>
5#include <vector>
6
7#include "common/heap_tracker.h"
8#include "common/logging/log.h"
9
10namespace Common {
11
12namespace {
13
14constexpr s64 MaxResidentMapCount = 0x8000;
15
16} // namespace
17
18HeapTracker::HeapTracker(Common::HostMemory& buffer) : m_buffer(buffer) {}
19HeapTracker::~HeapTracker() = default;
20
21void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length,
22 MemoryPermission perm, bool is_separate_heap) {
23 // When mapping other memory, map pages immediately.
24 if (!is_separate_heap) {
25 m_buffer.Map(virtual_offset, host_offset, length, perm, false);
26 return;
27 }
28
29 {
30 // We are mapping part of a separate heap.
31 std::scoped_lock lk{m_lock};
32
33 auto* const map = new SeparateHeapMap{
34 .vaddr = virtual_offset,
35 .paddr = host_offset,
36 .size = length,
37 .tick = m_tick++,
38 .perm = perm,
39 .is_resident = false,
40 };
41
42 // Insert into mappings.
43 m_map_count++;
44 m_mappings.insert(*map);
45 }
46
47 // Finally, map.
48 this->DeferredMapSeparateHeap(virtual_offset);
49}
50
51void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) {
52 // If this is a separate heap...
53 if (is_separate_heap) {
54 std::scoped_lock lk{m_lock};
55
56 const SeparateHeapMap key{
57 .vaddr = virtual_offset,
58 };
59
60 // Split at the boundaries of the region we are removing.
61 this->SplitHeapMapLocked(virtual_offset);
62 this->SplitHeapMapLocked(virtual_offset + size);
63
64 // Erase all mappings in range.
65 auto it = m_mappings.find(key);
66 while (it != m_mappings.end() && it->vaddr < virtual_offset + size) {
67 // Get underlying item.
68 auto* const item = std::addressof(*it);
69
70 // If resident, erase from resident map.
71 if (item->is_resident) {
72 ASSERT(--m_resident_map_count >= 0);
73 m_resident_mappings.erase(m_resident_mappings.iterator_to(*item));
74 }
75
76 // Erase from map.
77 it = m_mappings.erase(it);
78 ASSERT(--m_map_count >= 0);
79
80 // Free the item.
81 delete item;
82 }
83 }
84
85 // Unmap pages.
86 m_buffer.Unmap(virtual_offset, size, false);
87}
88
89void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission perm) {
90 // Ensure no rebuild occurs while reprotecting.
91 std::shared_lock lk{m_rebuild_lock};
92
93 // Split at the boundaries of the region we are reprotecting.
94 this->SplitHeapMap(virtual_offset, size);
95
96 // Declare tracking variables.
97 VAddr cur = virtual_offset;
98 VAddr end = virtual_offset + size;
99
100 while (cur < end) {
101 VAddr next = cur;
102 bool should_protect = false;
103
104 {
105 std::scoped_lock lk2{m_lock};
106
107 const SeparateHeapMap key{
108 .vaddr = next,
109 };
110
111 // Try to get the next mapping corresponding to this address.
112 const auto it = m_mappings.nfind(key);
113
114 if (it == m_mappings.end()) {
115 // There are no separate heap mappings remaining.
116 next = end;
117 should_protect = true;
118 } else if (it->vaddr == cur) {
119 // We are in range.
120 // Update permission bits.
121 it->perm = perm;
122
123 // Determine next address and whether we should protect.
124 next = cur + it->size;
125 should_protect = it->is_resident;
126 } else /* if (it->vaddr > cur) */ {
127 // We weren't in range, but there is a block coming up that will be.
128 next = it->vaddr;
129 should_protect = true;
130 }
131 }
132
133 // Clamp to end.
134 next = std::min(next, end);
135
136 // Reprotect, if we need to.
137 if (should_protect) {
138 m_buffer.Protect(cur, next - cur, perm);
139 }
140
141 // Advance.
142 cur = next;
143 }
144}
145
146bool HeapTracker::DeferredMapSeparateHeap(u8* fault_address) {
147 if (m_buffer.IsInVirtualRange(fault_address)) {
148 return this->DeferredMapSeparateHeap(fault_address - m_buffer.VirtualBasePointer());
149 }
150
151 return false;
152}
153
154bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) {
155 bool rebuild_required = false;
156
157 {
158 std::scoped_lock lk{m_lock};
159
160 // Check to ensure this was a non-resident separate heap mapping.
161 const auto it = this->GetNearestHeapMapLocked(virtual_offset);
162 if (it == m_mappings.end() || it->is_resident) {
163 return false;
164 }
165
166 // Update tick before possible rebuild.
167 it->tick = m_tick++;
168
169 // Check if we need to rebuild.
170 if (m_resident_map_count > MaxResidentMapCount) {
171 rebuild_required = true;
172 }
173
174 // Map the area.
175 m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false);
176
177 // This map is now resident.
178 it->is_resident = true;
179 m_resident_map_count++;
180 m_resident_mappings.insert(*it);
181 }
182
183 if (rebuild_required) {
184 // A rebuild was required, so perform it now.
185 this->RebuildSeparateHeapAddressSpace();
186 }
187
188 return true;
189}
190
191void HeapTracker::RebuildSeparateHeapAddressSpace() {
192 std::scoped_lock lk{m_rebuild_lock, m_lock};
193
194 ASSERT(!m_resident_mappings.empty());
195
196 // Unmap so we have at least 4 maps available.
197 const size_t desired_count = std::min(m_resident_map_count, MaxResidentMapCount - 4);
198 const size_t evict_count = m_resident_map_count - desired_count;
199 auto it = m_resident_mappings.begin();
200
201 for (size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) {
202 // Unmark and unmap.
203 it->is_resident = false;
204 m_buffer.Unmap(it->vaddr, it->size, false);
205
206 // Advance.
207 ASSERT(--m_resident_map_count >= 0);
208 it = m_resident_mappings.erase(it);
209 }
210}
211
212void HeapTracker::SplitHeapMap(VAddr offset, size_t size) {
213 std::scoped_lock lk{m_lock};
214
215 this->SplitHeapMapLocked(offset);
216 this->SplitHeapMapLocked(offset + size);
217}
218
219void HeapTracker::SplitHeapMapLocked(VAddr offset) {
220 const auto it = this->GetNearestHeapMapLocked(offset);
221 if (it == m_mappings.end() || it->vaddr == offset) {
222 // Not contained or no split required.
223 return;
224 }
225
226 // Cache the original values.
227 auto* const left = std::addressof(*it);
228 const size_t orig_size = left->size;
229
230 // Adjust the left map.
231 const size_t left_size = offset - left->vaddr;
232 left->size = left_size;
233
234 // Create the new right map.
235 auto* const right = new SeparateHeapMap{
236 .vaddr = left->vaddr + left_size,
237 .paddr = left->paddr + left_size,
238 .size = orig_size - left_size,
239 .tick = left->tick,
240 .perm = left->perm,
241 .is_resident = left->is_resident,
242 };
243
244 // Insert the new right map.
245 m_map_count++;
246 m_mappings.insert(*right);
247
248 // If resident, also insert into resident map.
249 if (right->is_resident) {
250 m_resident_mappings.insert(*right);
251 m_resident_map_count++;
252 }
253}
254
255HeapTracker::AddrTree::iterator HeapTracker::GetNearestHeapMapLocked(VAddr offset) {
256 const SeparateHeapMap key{
257 .vaddr = offset,
258 };
259
260 return m_mappings.find(key);
261}
262
263} // namespace Common
diff --git a/src/common/heap_tracker.h b/src/common/heap_tracker.h
new file mode 100644
index 000000000..cc16041d9
--- /dev/null
+++ b/src/common/heap_tracker.h
@@ -0,0 +1,97 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <atomic>
7#include <mutex>
8#include <set>
9#include <shared_mutex>
10
11#include "common/host_memory.h"
12#include "common/intrusive_red_black_tree.h"
13
14namespace Common {
15
16struct SeparateHeapMap {
17 Common::IntrusiveRedBlackTreeNode addr_node{};
18 Common::IntrusiveRedBlackTreeNode tick_node{};
19 VAddr vaddr{};
20 PAddr paddr{};
21 size_t size{};
22 size_t tick{};
23 MemoryPermission perm{};
24 bool is_resident{};
25};
26
27struct SeparateHeapMapAddrComparator {
28 static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
29 if (lhs.vaddr < rhs.vaddr) {
30 return -1;
31 } else if (lhs.vaddr <= (rhs.vaddr + rhs.size - 1)) {
32 return 0;
33 } else {
34 return 1;
35 }
36 }
37};
38
39struct SeparateHeapMapTickComparator {
40 static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
41 if (lhs.tick < rhs.tick) {
42 return -1;
43 } else if (lhs.tick > rhs.tick) {
44 return 1;
45 } else {
46 return SeparateHeapMapAddrComparator::Compare(lhs, rhs);
47 }
48 }
49};
50
51class HeapTracker {
52public:
53 explicit HeapTracker(Common::HostMemory& buffer);
54 ~HeapTracker();
55
56 void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm,
57 bool is_separate_heap);
58 void Unmap(size_t virtual_offset, size_t size, bool is_separate_heap);
59 void Protect(size_t virtual_offset, size_t length, MemoryPermission perm);
60 u8* VirtualBasePointer() {
61 return m_buffer.VirtualBasePointer();
62 }
63
64 bool DeferredMapSeparateHeap(u8* fault_address);
65 bool DeferredMapSeparateHeap(size_t virtual_offset);
66
67private:
68 using AddrTreeTraits =
69 Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::addr_node>;
70 using AddrTree = AddrTreeTraits::TreeType<SeparateHeapMapAddrComparator>;
71
72 using TickTreeTraits =
73 Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::tick_node>;
74 using TickTree = TickTreeTraits::TreeType<SeparateHeapMapTickComparator>;
75
76 AddrTree m_mappings{};
77 TickTree m_resident_mappings{};
78
79private:
80 void SplitHeapMap(VAddr offset, size_t size);
81 void SplitHeapMapLocked(VAddr offset);
82
83 AddrTree::iterator GetNearestHeapMapLocked(VAddr offset);
84
85 void RebuildSeparateHeapAddressSpace();
86
87private:
88 Common::HostMemory& m_buffer;
89
90 std::shared_mutex m_rebuild_lock{};
91 std::mutex m_lock{};
92 s64 m_map_count{};
93 s64 m_resident_map_count{};
94 size_t m_tick{};
95};
96
97} // namespace Common
diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp
index e540375b8..860c39e6a 100644
--- a/src/common/host_memory.cpp
+++ b/src/common/host_memory.cpp
@@ -679,7 +679,7 @@ HostMemory::HostMemory(HostMemory&&) noexcept = default;
679HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default; 679HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default;
680 680
681void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length, 681void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length,
682 MemoryPermission perms) { 682 MemoryPermission perms, bool separate_heap) {
683 ASSERT(virtual_offset % PageAlignment == 0); 683 ASSERT(virtual_offset % PageAlignment == 0);
684 ASSERT(host_offset % PageAlignment == 0); 684 ASSERT(host_offset % PageAlignment == 0);
685 ASSERT(length % PageAlignment == 0); 685 ASSERT(length % PageAlignment == 0);
@@ -691,7 +691,7 @@ void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length,
691 impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms); 691 impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms);
692} 692}
693 693
694void HostMemory::Unmap(size_t virtual_offset, size_t length) { 694void HostMemory::Unmap(size_t virtual_offset, size_t length, bool separate_heap) {
695 ASSERT(virtual_offset % PageAlignment == 0); 695 ASSERT(virtual_offset % PageAlignment == 0);
696 ASSERT(length % PageAlignment == 0); 696 ASSERT(length % PageAlignment == 0);
697 ASSERT(virtual_offset + length <= virtual_size); 697 ASSERT(virtual_offset + length <= virtual_size);
@@ -701,14 +701,16 @@ void HostMemory::Unmap(size_t virtual_offset, size_t length) {
701 impl->Unmap(virtual_offset + virtual_base_offset, length); 701 impl->Unmap(virtual_offset + virtual_base_offset, length);
702} 702}
703 703
704void HostMemory::Protect(size_t virtual_offset, size_t length, bool read, bool write, 704void HostMemory::Protect(size_t virtual_offset, size_t length, MemoryPermission perm) {
705 bool execute) {
706 ASSERT(virtual_offset % PageAlignment == 0); 705 ASSERT(virtual_offset % PageAlignment == 0);
707 ASSERT(length % PageAlignment == 0); 706 ASSERT(length % PageAlignment == 0);
708 ASSERT(virtual_offset + length <= virtual_size); 707 ASSERT(virtual_offset + length <= virtual_size);
709 if (length == 0 || !virtual_base || !impl) { 708 if (length == 0 || !virtual_base || !impl) {
710 return; 709 return;
711 } 710 }
711 const bool read = True(perm & MemoryPermission::Read);
712 const bool write = True(perm & MemoryPermission::Write);
713 const bool execute = True(perm & MemoryPermission::Execute);
712 impl->Protect(virtual_offset + virtual_base_offset, length, read, write, execute); 714 impl->Protect(virtual_offset + virtual_base_offset, length, read, write, execute);
713} 715}
714 716
diff --git a/src/common/host_memory.h b/src/common/host_memory.h
index 747c5850c..72fbb05af 100644
--- a/src/common/host_memory.h
+++ b/src/common/host_memory.h
@@ -40,11 +40,12 @@ public:
40 HostMemory(HostMemory&& other) noexcept; 40 HostMemory(HostMemory&& other) noexcept;
41 HostMemory& operator=(HostMemory&& other) noexcept; 41 HostMemory& operator=(HostMemory&& other) noexcept;
42 42
43 void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms); 43 void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms,
44 bool separate_heap);
44 45
45 void Unmap(size_t virtual_offset, size_t length); 46 void Unmap(size_t virtual_offset, size_t length, bool separate_heap);
46 47
47 void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute = false); 48 void Protect(size_t virtual_offset, size_t length, MemoryPermission perms);
48 49
49 void EnableDirectMappedAddress(); 50 void EnableDirectMappedAddress();
50 51
@@ -64,6 +65,10 @@ public:
64 return virtual_base; 65 return virtual_base;
65 } 66 }
66 67
68 bool IsInVirtualRange(void* address) const noexcept {
69 return address >= virtual_base && address < virtual_base + virtual_size;
70 }
71
67private: 72private:
68 size_t backing_size{}; 73 size_t backing_size{};
69 size_t virtual_size{}; 74 size_t virtual_size{};